1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2005 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of the Lustre file system, http://www.lustre.org
14 * Lustre is a trademark of Cluster File Systems, Inc.
16 * You may have signed or agreed to another license before downloading
17 * this software. If so, you are bound by the terms and conditions
18 * of that agreement, and the following does not apply to you. See the
19 * LICENSE file included with this distribution for more information.
21 * If you did not agree to a different license, then this copy of Lustre
22 * is open source software; you can redistribute it and/or modify it
23 * under the terms of version 2 of the GNU General Public License as
24 * published by the Free Software Foundation.
26 * In either case, Lustre is distributed in the hope that it will be
27 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
28 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * license text for more details.
33 # define EXPORT_SYMTAB
35 #define DEBUG_SUBSYSTEM S_MDS
37 #include <lustre_mds.h>
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/random.h>
42 #include <linux/jbd.h>
43 #include <linux/ext3_fs.h>
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
45 # include <linux/smp_lock.h>
46 # include <linux/buffer_head.h>
47 # include <linux/workqueue.h>
48 # include <linux/mount.h>
50 # include <linux/locks.h>
53 #include <obd_class.h>
54 #include <lustre_dlm.h>
56 #include <lustre_fsfilt.h>
57 #include <lprocfs_status.h>
58 #include <lustre_commit_confd.h>
59 #include <lustre_quota.h>
60 #include <lustre_disk.h>
61 #include <lustre_param.h>
63 #include "mds_internal.h"
66 CFS_MODULE_PARM(mds_num_threads, "i", int, 0444,
67 "number of MDS service threads to start");
69 static int mds_intent_policy(struct ldlm_namespace *ns,
70 struct ldlm_lock **lockp, void *req_cookie,
71 ldlm_mode_t mode, int flags, void *data);
72 static int mds_postsetup(struct obd_device *obd);
73 static int mds_cleanup(struct obd_device *obd);
75 /* Assumes caller has already pushed into the kernel filesystem context */
76 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
77 loff_t offset, int count)
79 struct ptlrpc_bulk_desc *desc;
80 struct l_wait_info lwi;
82 int rc = 0, npages, i, tmpcount, tmpsize = 0;
85 LASSERT((offset & ~CFS_PAGE_MASK) == 0); /* I'm dubious about this */
87 npages = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
88 OBD_ALLOC(pages, sizeof(*pages) * npages);
90 GOTO(out, rc = -ENOMEM);
92 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
95 GOTO(out_free, rc = -ENOMEM);
97 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
98 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
100 pages[i] = alloc_pages(GFP_KERNEL, 0);
101 if (pages[i] == NULL)
102 GOTO(cleanup_buf, rc = -ENOMEM);
104 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
107 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
108 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
109 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
110 tmpsize, offset, file->f_dentry->d_inode->i_ino,
111 file->f_dentry->d_inode->i_size);
113 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
114 kmap(pages[i]), tmpsize, &offset);
118 GOTO(cleanup_buf, rc = -EIO);
121 LASSERT(desc->bd_nob == count);
123 rc = ptlrpc_start_bulk_transfer(desc);
125 GOTO(cleanup_buf, rc);
127 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
128 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
129 OBD_FAIL_MDS_SENDPAGE, rc);
130 GOTO(abort_bulk, rc);
133 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
134 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), &lwi);
135 LASSERT (rc == 0 || rc == -ETIMEDOUT);
138 if (desc->bd_success &&
139 desc->bd_nob_transferred == count)
140 GOTO(cleanup_buf, rc);
142 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
145 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
146 (rc == -ETIMEDOUT) ? "timeout" : "network error",
147 desc->bd_nob_transferred, count,
148 req->rq_export->exp_client_uuid.uuid,
149 req->rq_export->exp_connection->c_remote_uuid.uuid);
151 class_fail_export(req->rq_export);
155 ptlrpc_abort_bulk (desc);
157 for (i = 0; i < npages; i++)
159 __free_pages(pages[i], 0);
161 ptlrpc_free_bulk(desc);
163 OBD_FREE(pages, sizeof(*pages) * npages);
168 /* only valid locked dentries or errors should be returned */
169 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
170 struct vfsmount **mnt, int lock_mode,
171 struct lustre_handle *lockh,
172 char *name, int namelen, __u64 lockpart)
174 struct mds_obd *mds = &obd->u.mds;
175 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
176 struct ldlm_res_id res_id = { .name = {0} };
177 int flags = LDLM_FL_ATOMIC_CB, rc;
178 ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
184 res_id.name[0] = de->d_inode->i_ino;
185 res_id.name[1] = de->d_inode->i_generation;
186 rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id,
187 LDLM_IBITS, &policy, lock_mode, &flags,
188 ldlm_blocking_ast, ldlm_completion_ast,
189 NULL, NULL, 0, NULL, lockh);
190 if (rc != ELDLM_OK) {
192 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
198 /* Look up an entry by inode number. */
199 /* this function ONLY returns valid dget'd dentries with an initialized inode
201 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
202 struct vfsmount **mnt)
205 unsigned long ino = fid->id;
206 __u32 generation = fid->generation;
208 struct dentry *result;
211 RETURN(ERR_PTR(-ESTALE));
213 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
215 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino/gen %lu/%u, sb %p\n",
216 ino, generation, mds->mds_obt.obt_sb);
218 /* under ext3 this is neither supposed to return bad inodes
220 result = ll_lookup_one_len(fid_name, mds->mds_fid_de, strlen(fid_name));
224 inode = result->d_inode;
226 RETURN(ERR_PTR(-ENOENT));
228 if (inode->i_generation == 0 || inode->i_nlink == 0) {
229 LCONSOLE_WARN("Found inode with zero generation or link -- this"
230 " may indicate disk corruption (inode: %lu/%u, "
231 "link %lu, count %d)\n", inode->i_ino,
232 inode->i_generation,(unsigned long)inode->i_nlink,
233 atomic_read(&inode->i_count));
235 RETURN(ERR_PTR(-ENOENT));
238 if (generation && inode->i_generation != generation) {
239 /* we didn't find the right inode.. */
240 CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
241 "count: %d, generation %u/%u\n", inode->i_ino,
242 (unsigned long)inode->i_nlink,
243 atomic_read(&inode->i_count), inode->i_generation,
246 RETURN(ERR_PTR(-ENOENT));
250 *mnt = mds->mds_vfsmnt;
257 static int mds_connect_internal(struct obd_export *exp,
258 struct obd_connect_data *data)
260 struct obd_device *obd = exp->exp_obd;
262 data->ocd_connect_flags &= MDS_CONNECT_SUPPORTED;
263 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
265 /* If no known bits (which should not happen, probably,
266 as everybody should support LOOKUP and UPDATE bits at least)
267 revert to compat mode with plain locks. */
268 if (!data->ocd_ibits_known &&
269 data->ocd_connect_flags & OBD_CONNECT_IBITS)
270 data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
272 if (!obd->u.mds.mds_fl_acl)
273 data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
275 if (!obd->u.mds.mds_fl_user_xattr)
276 data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
278 exp->exp_connect_flags = data->ocd_connect_flags;
279 data->ocd_version = LUSTRE_VERSION_CODE;
280 exp->exp_mds_data.med_ibits_known = data->ocd_ibits_known;
283 if (obd->u.mds.mds_fl_acl &&
284 ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
285 CWARN("%s: MDS requires ACL support but client does not\n",
292 static int mds_reconnect(struct obd_export *exp, struct obd_device *obd,
293 struct obd_uuid *cluuid,
294 struct obd_connect_data *data)
299 if (exp == NULL || obd == NULL || cluuid == NULL)
302 rc = mds_connect_internal(exp, data);
307 /* Establish a connection to the MDS.
309 * This will set up an export structure for the client to hold state data
310 * about that client, like open files, the last operation number it did
311 * on the server, etc.
313 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
314 struct obd_uuid *cluuid, struct obd_connect_data *data)
316 struct obd_export *exp;
317 struct mds_export_data *med;
318 struct mds_client_data *mcd = NULL;
319 int rc, abort_recovery;
322 if (!conn || !obd || !cluuid)
325 /* Check for aborted recovery. */
326 spin_lock_bh(&obd->obd_processing_task_lock);
327 abort_recovery = obd->obd_abort_recovery;
328 spin_unlock_bh(&obd->obd_processing_task_lock);
330 target_abort_recovery(obd);
332 /* XXX There is a small race between checking the list and adding a
333 * new connection for the same UUID, but the real threat (list
334 * corruption when multiple different clients connect) is solved.
336 * There is a second race between adding the export to the list,
337 * and filling in the client data below. Hence skipping the case
338 * of NULL mcd above. We should already be controlling multiple
339 * connects at the client, and we can't hold the spinlock over
340 * memory allocations without risk of deadlocking.
342 rc = class_connect(conn, obd, cluuid);
345 exp = class_conn2export(conn);
347 med = &exp->exp_mds_data;
349 rc = mds_connect_internal(exp, data);
353 OBD_ALLOC(mcd, sizeof(*mcd));
355 GOTO(out, rc = -ENOMEM);
357 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
360 rc = mds_client_add(obd, exp, -1);
366 OBD_FREE(mcd, sizeof(*mcd));
369 class_disconnect(exp);
371 class_export_put(exp);
377 int mds_init_export(struct obd_export *exp)
379 struct mds_export_data *med = &exp->exp_mds_data;
381 INIT_LIST_HEAD(&med->med_open_head);
382 spin_lock_init(&med->med_open_lock);
383 exp->exp_connecting = 1;
387 static int mds_destroy_export(struct obd_export *export)
389 struct mds_export_data *med;
390 struct obd_device *obd = export->exp_obd;
391 struct mds_obd *mds = &obd->u.mds;
392 struct lvfs_run_ctxt saved;
393 struct lov_mds_md *lmm;
394 struct llog_cookie *logcookies;
398 med = &export->exp_mds_data;
399 target_destroy_export(export);
401 if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
404 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
405 /* Close any open files (which may also cause orphan unlinking). */
407 OBD_ALLOC(lmm, mds->mds_max_mdsize);
409 CWARN("%s: allocation failure during cleanup; can not force "
410 "close file handles on this service.\n", obd->obd_name);
411 GOTO(out, rc = -ENOMEM);
414 OBD_ALLOC(logcookies, mds->mds_max_cookiesize);
415 if (logcookies == NULL) {
416 CWARN("%s: allocation failure during cleanup; can not force "
417 "close file handles on this service.\n", obd->obd_name);
418 OBD_FREE(lmm, mds->mds_max_mdsize);
419 GOTO(out, rc = -ENOMEM);
422 spin_lock(&med->med_open_lock);
423 while (!list_empty(&med->med_open_head)) {
424 struct list_head *tmp = med->med_open_head.next;
425 struct mds_file_data *mfd =
426 list_entry(tmp, struct mds_file_data, mfd_list);
427 int lmm_size = mds->mds_max_mdsize;
428 umode_t mode = mfd->mfd_dentry->d_inode->i_mode;
431 /* Remove mfd handle so it can't be found again.
432 * We are consuming the mfd_list reference here. */
433 mds_mfd_unlink(mfd, 0);
434 spin_unlock(&med->med_open_lock);
436 /* If you change this message, be sure to update
437 * replay_single:test_46 */
438 CDEBUG(D_INODE|D_IOCTL, "%s: force closing file handle for "
439 "%.*s (ino %lu)\n", obd->obd_name,
440 mfd->mfd_dentry->d_name.len,mfd->mfd_dentry->d_name.name,
441 mfd->mfd_dentry->d_inode->i_ino);
443 rc = mds_get_md(obd, mfd->mfd_dentry->d_inode, lmm,&lmm_size,1);
445 CWARN("mds_get_md failure, rc=%d\n", rc);
447 valid |= OBD_MD_FLEASIZE;
449 /* child orphan sem protects orphan_dec_test and
450 * is_orphan race, mds_mfd_close drops it */
451 MDS_DOWN_WRITE_ORPHAN_SEM(mfd->mfd_dentry->d_inode);
453 rc = mds_mfd_close(NULL, REQ_REC_OFF, obd, mfd,
454 !(export->exp_flags & OBD_OPT_FAILOVER),
455 lmm, lmm_size, logcookies,
456 mds->mds_max_cookiesize,
460 CDEBUG(D_INODE|D_IOCTL, "Error closing file: %d\n", rc);
462 if (valid & OBD_MD_FLCOOKIE) {
463 rc = mds_osc_destroy_orphan(obd, mode, lmm,
464 lmm_size, logcookies, 1);
466 CDEBUG(D_INODE, "%s: destroy of orphan failed,"
467 " rc = %d\n", obd->obd_name, rc);
470 valid &= ~OBD_MD_FLCOOKIE;
473 spin_lock(&med->med_open_lock);
476 OBD_FREE(logcookies, mds->mds_max_cookiesize);
477 OBD_FREE(lmm, mds->mds_max_mdsize);
479 spin_unlock(&med->med_open_lock);
481 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
482 mds_client_free(export);
488 static int mds_disconnect(struct obd_export *exp)
494 class_export_get(exp);
496 /* Disconnect early so that clients can't keep using export */
497 rc = class_disconnect(exp);
498 if (exp->exp_obd->obd_namespace != NULL)
499 ldlm_cancel_locks_for_export(exp);
501 /* complete all outstanding replies */
502 spin_lock(&exp->exp_lock);
503 while (!list_empty(&exp->exp_outstanding_replies)) {
504 struct ptlrpc_reply_state *rs =
505 list_entry(exp->exp_outstanding_replies.next,
506 struct ptlrpc_reply_state, rs_exp_list);
507 struct ptlrpc_service *svc = rs->rs_service;
509 spin_lock(&svc->srv_lock);
510 list_del_init(&rs->rs_exp_list);
511 ptlrpc_schedule_difficult_reply(rs);
512 spin_unlock(&svc->srv_lock);
514 spin_unlock(&exp->exp_lock);
516 class_export_put(exp);
520 static int mds_getstatus(struct ptlrpc_request *req)
522 struct mds_obd *mds = mds_req2mds(req);
523 struct mds_body *body;
524 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
527 rc = lustre_pack_reply(req, 2, size, NULL);
528 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
529 CERROR("mds: out of memory for message\n");
530 req->rq_status = -ENOMEM; /* superfluous? */
534 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
535 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
537 /* the last_committed and last_xid fields are filled in for all
538 * replies already - no need to do so here also.
543 /* get the LOV EA from @inode and store it into @md. It can be at most
544 * @size bytes, and @size is updated with the actual EA size.
545 * The EA size is also returned on success, and -ve errno on failure.
546 * If there is no EA then 0 is returned. */
547 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
554 LOCK_INODE_MUTEX(inode);
555 rc = fsfilt_get_md(obd, inode, md, *size, "lov");
558 CERROR("Error %d reading eadata for ino %lu\n",
562 rc = mds_convert_lov_ea(obd, inode, md, lmm_size);
574 UNLOCK_INODE_MUTEX(inode);
580 /* Call with lock=1 if you want mds_pack_md to take the i_mutex.
581 * Call with lock=0 if the caller has already taken the i_mutex. */
582 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
583 struct mds_body *body, struct inode *inode, int lock)
585 struct mds_obd *mds = &obd->u.mds;
591 lmm = lustre_msg_buf(msg, offset, 0);
593 /* Some problem with getting eadata when I sized the reply
595 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
599 lmm_size = lustre_msg_buflen(msg, offset);
601 /* I don't really like this, but it is a sanity check on the client
602 * MD request. However, if the client doesn't know how much space
603 * to reserve for the MD, it shouldn't be bad to have too much space.
605 if (lmm_size > mds->mds_max_mdsize) {
606 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
607 inode->i_ino, lmm_size, mds->mds_max_mdsize);
611 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock);
613 if (S_ISDIR(inode->i_mode))
614 body->valid |= OBD_MD_FLDIREA;
616 body->valid |= OBD_MD_FLEASIZE;
617 body->eadatasize = lmm_size;
624 #ifdef CONFIG_FS_POSIX_ACL
626 int mds_pack_posix_acl(struct inode *inode, struct lustre_msg *repmsg,
627 struct mds_body *repbody, int repoff)
629 struct dentry de = { .d_inode = inode };
633 LASSERT(repbody->aclsize == 0);
634 LASSERT(lustre_msg_bufcount(repmsg) > repoff);
636 buflen = lustre_msg_buflen(repmsg, repoff);
640 if (!inode->i_op || !inode->i_op->getxattr)
644 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
645 lustre_msg_buf(repmsg, repoff, buflen),
650 repbody->aclsize = rc;
651 else if (rc != -ENODATA) {
652 CERROR("buflen %d, get acl: %d\n", buflen, rc);
657 repbody->valid |= OBD_MD_FLACL;
661 #define mds_pack_posix_acl(inode, repmsg, repbody, repoff) 0
664 int mds_pack_acl(struct mds_export_data *med, struct inode *inode,
665 struct lustre_msg *repmsg, struct mds_body *repbody,
668 return mds_pack_posix_acl(inode, repmsg, repbody, repoff);
671 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
672 struct ptlrpc_request *req,
673 struct mds_body *reqbody, int reply_off)
675 struct mds_body *body;
676 struct inode *inode = dentry->d_inode;
683 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
684 LASSERT(body != NULL); /* caller prepped reply */
686 mds_pack_inode2fid(&body->fid1, inode);
687 body->flags = reqbody->flags; /* copy MDS_BFLAG_EXT_FLAGS if present */
688 mds_pack_inode2body(body, inode);
691 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
692 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
693 rc = mds_pack_md(obd, req->rq_repmsg, reply_off, body,
696 /* If we have LOV EA data, the OST holds size, atime, mtime */
697 if (!(body->valid & OBD_MD_FLEASIZE) &&
698 !(body->valid & OBD_MD_FLDIREA))
699 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
700 OBD_MD_FLATIME | OBD_MD_FLMTIME);
702 lustre_shrink_reply(req, reply_off, body->eadatasize, 0);
703 if (body->eadatasize)
705 } else if (S_ISLNK(inode->i_mode) &&
706 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
707 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off, 0);
710 LASSERT (symname != NULL); /* caller prepped reply */
711 len = lustre_msg_buflen(req->rq_repmsg, reply_off);
713 rc = inode->i_op->readlink(dentry, symname, len);
715 CERROR("readlink failed: %d\n", rc);
716 } else if (rc != len - 1) {
717 CERROR ("Unexpected readlink rc %d: expecting %d\n",
721 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
722 body->valid |= OBD_MD_LINKNAME;
723 body->eadatasize = rc + 1;
724 symname[rc] = 0; /* NULL terminate */
728 } else if (reqbody->valid == OBD_MD_FLFLAGS &&
729 reqbody->flags & MDS_BFLAG_EXT_FLAGS) {
732 /* We only return the full set of flags on ioctl, otherwise we
733 * get enough flags from the inode in mds_pack_inode2body(). */
734 rc = fsfilt_iocontrol(obd, inode, NULL, EXT3_IOC_GETFLAGS,
737 body->flags = flags | MDS_BFLAG_EXT_FLAGS;
740 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
741 struct mds_obd *mds = mds_req2mds(req);
742 body->max_cookiesize = mds->mds_max_cookiesize;
743 body->max_mdsize = mds->mds_max_mdsize;
744 body->valid |= OBD_MD_FLMODEASIZE;
750 #ifdef CONFIG_FS_POSIX_ACL
751 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
752 (reqbody->valid & OBD_MD_FLACL)) {
753 rc = mds_pack_acl(&req->rq_export->exp_mds_data,
754 inode, req->rq_repmsg,
757 lustre_shrink_reply(req, reply_off, body->aclsize, 0);
766 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
769 struct mds_obd *mds = mds_req2mds(req);
770 struct mds_body *body;
771 int rc, bufcount = 2;
772 int size[4] = { sizeof(struct ptlrpc_body), sizeof(*body) };
775 LASSERT(offset == REQ_REC_OFF); /* non-intent */
777 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
778 LASSERT(body != NULL); /* checked by caller */
779 LASSERT_REQSWABBED(req, offset); /* swabbed by caller */
781 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
782 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
783 LOCK_INODE_MUTEX(inode);
784 rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0,
786 UNLOCK_INODE_MUTEX(inode);
787 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
790 if (rc != -ENODATA) {
791 CERROR("error getting inode %lu MD: rc = %d\n",
796 } else if (rc > mds->mds_max_mdsize) {
798 CERROR("MD size %d larger than maximum possible %u\n",
799 rc, mds->mds_max_mdsize);
804 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
805 if (inode->i_size + 1 != body->eadatasize)
806 CERROR("symlink size: %Lu, reply space: %d\n",
807 inode->i_size + 1, body->eadatasize);
808 size[bufcount] = min_t(int, inode->i_size+1, body->eadatasize);
810 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
811 inode->i_size + 1, body->eadatasize);
814 #ifdef CONFIG_FS_POSIX_ACL
815 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
816 (body->valid & OBD_MD_FLACL)) {
817 struct dentry de = { .d_inode = inode };
820 if (inode->i_op && inode->i_op->getxattr) {
822 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
827 if (rc != -ENODATA) {
828 CERROR("got acl size: %d\n", rc);
838 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
839 CERROR("failed MDS_GETATTR_PACK test\n");
840 req->rq_status = -ENOMEM;
844 rc = lustre_pack_reply(req, bufcount, size, NULL);
846 CERROR("lustre_pack_reply failed: rc %d\n", rc);
854 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
855 int child_part, struct lustre_handle *child_lockh)
857 struct obd_device *obd = req->rq_export->exp_obd;
858 struct mds_obd *mds = &obd->u.mds;
859 struct ldlm_reply *rep = NULL;
860 struct lvfs_run_ctxt saved;
861 struct mds_body *body;
862 struct dentry *dparent = NULL, *dchild = NULL;
863 struct lvfs_ucred uc = {NULL,};
864 struct lustre_handle parent_lockh;
866 int rc = 0, cleanup_phase = 0, resent_req = 0;
870 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
872 /* Swab now, before anyone looks inside the request */
873 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
874 lustre_swab_mds_body);
876 CERROR("Can't swab mds_body\n");
880 LASSERT_REQSWAB(req, offset + 1);
881 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
883 CERROR("Can't unpack name\n");
886 namesize = lustre_msg_buflen(req->rq_reqmsg, offset + 1);
887 /* namesize less than 2 means we have empty name, probably came from
888 revalidate by cfid, so no point in having name to be set */
892 rc = mds_init_ucred(&uc, req, offset);
896 LASSERT(offset == REQ_REC_OFF || offset == DLM_INTENT_REC_OFF);
897 /* if requests were at offset 2, the getattr reply goes back at 1 */
898 if (offset == DLM_INTENT_REC_OFF) {
899 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
901 offset = DLM_REPLY_REC_OFF;
904 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
905 cleanup_phase = 1; /* kernel context */
906 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
908 /* FIXME: handle raw lookup */
910 if (body->valid == OBD_MD_FLID) {
911 struct mds_body *mds_reply;
912 int size = sizeof(*mds_reply);
914 // The user requested ONLY the inode number, so do a raw lookup
915 rc = lustre_pack_reply(req, 1, &size, NULL);
917 CERROR("out of memory\n");
921 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
923 mds_reply = lustre_msg_buf(req->rq_repmsg, offset,
925 mds_reply->fid1.id = inum;
926 mds_reply->valid = OBD_MD_FLID;
931 if (lustre_handle_is_used(child_lockh)) {
932 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
936 if (resent_req == 0) {
938 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
939 rc = mds_get_parent_child_locked(obd, &obd->u.mds,
943 MDS_INODELOCK_UPDATE,
945 child_lockh, &dchild,
948 /* For revalidate by fid we always take UPDATE lock */
949 dchild = mds_fid2locked_dentry(obd, &body->fid2, NULL,
951 NULL, 0, child_part);
954 rc = PTR_ERR(dchild);
959 struct ldlm_lock *granted_lock;
960 struct ll_fid child_fid;
961 struct ldlm_resource *res;
962 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
963 granted_lock = ldlm_handle2lock(child_lockh);
964 LASSERTF(granted_lock != NULL, LPU64"/%u lockh "LPX64"\n",
965 body->fid1.id, body->fid1.generation,
966 child_lockh->cookie);
969 res = granted_lock->l_resource;
970 child_fid.id = res->lr_name.name[0];
971 child_fid.generation = res->lr_name.name[1];
972 dchild = mds_fid2dentry(&obd->u.mds, &child_fid, NULL);
973 LASSERT(!IS_ERR(dchild));
974 LDLM_LOCK_PUT(granted_lock);
977 cleanup_phase = 2; /* dchild, dparent, locks */
979 if (dchild->d_inode == NULL) {
980 intent_set_disposition(rep, DISP_LOOKUP_NEG);
981 /* in the intent case, the policy clears this error:
982 the disposition is enough */
983 GOTO(cleanup, rc = -ENOENT);
985 intent_set_disposition(rep, DISP_LOOKUP_POS);
988 if (req->rq_repmsg == NULL) {
989 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
991 CERROR ("mds_getattr_pack_msg: %d\n", rc);
996 rc = mds_getattr_internal(obd, dchild, req, body, offset);
997 GOTO(cleanup, rc); /* returns the lock to the client */
1000 switch (cleanup_phase) {
1002 if (resent_req == 0) {
1003 if (rc && dchild->d_inode)
1004 ldlm_lock_decref(child_lockh, LCK_CR);
1006 ldlm_lock_decref(&parent_lockh, LCK_CR);
1012 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1014 mds_exit_ucred(&uc, mds);
1015 if (req->rq_reply_state == NULL) {
1016 req->rq_status = rc;
1017 lustre_pack_reply(req, 1, NULL, NULL);
1023 static int mds_getattr(struct ptlrpc_request *req, int offset)
1025 struct mds_obd *mds = mds_req2mds(req);
1026 struct obd_device *obd = req->rq_export->exp_obd;
1027 struct lvfs_run_ctxt saved;
1029 struct mds_body *body;
1030 struct lvfs_ucred uc = { NULL, };
1034 OBD_COUNTER_INCREMENT(obd, getattr);
1036 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1037 lustre_swab_mds_body);
1041 rc = mds_init_ucred(&uc, req, offset);
1043 GOTO(out_ucred, rc);
1045 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1046 de = mds_fid2dentry(mds, &body->fid1, NULL);
1048 rc = req->rq_status = PTR_ERR(de);
1052 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1054 CERROR("mds_getattr_pack_msg: %d\n", rc);
1058 req->rq_status = mds_getattr_internal(obd, de, req, body,REPLY_REC_OFF);
1063 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1065 if (req->rq_reply_state == NULL) {
1066 req->rq_status = rc;
1067 lustre_pack_reply(req, 1, NULL, NULL);
1069 mds_exit_ucred(&uc, mds);
1073 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1078 spin_lock(&obd->obd_osfs_lock);
1079 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
1081 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1082 spin_unlock(&obd->obd_osfs_lock);
1087 static int mds_statfs(struct ptlrpc_request *req)
1089 struct obd_device *obd = req->rq_export->exp_obd;
1090 int rc, size[2] = { sizeof(struct ptlrpc_body),
1091 sizeof(struct obd_statfs) };
1094 /* This will trigger a watchdog timeout */
1095 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1096 (MDS_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1);
1097 OBD_COUNTER_INCREMENT(obd, statfs);
1099 rc = lustre_pack_reply(req, 2, size, NULL);
1100 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
1101 CERROR("mds: statfs lustre_pack_reply failed: rc = %d\n", rc);
1105 /* We call this so that we can cache a bit - 1 jiffie worth */
1106 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1107 size[REPLY_REC_OFF]),
1108 cfs_time_current_64() - HZ);
1110 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1116 req->rq_status = rc;
1120 static int mds_sync(struct ptlrpc_request *req, int offset)
1122 struct obd_device *obd = req->rq_export->exp_obd;
1123 struct mds_obd *mds = &obd->u.mds;
1124 struct mds_body *body;
1125 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
1128 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1129 lustre_swab_mds_body);
1131 GOTO(out, rc = -EFAULT);
1133 rc = lustre_pack_reply(req, 2, size, NULL);
1134 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) {
1135 CERROR("fsync lustre_pack_reply failed: rc = %d\n", rc);
1139 if (body->fid1.id == 0) {
1140 /* a fid of zero is taken to mean "sync whole filesystem" */
1141 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
1146 de = mds_fid2dentry(mds, &body->fid1, NULL);
1148 GOTO(out, rc = PTR_ERR(de));
1150 /* The file parameter isn't used for anything */
1151 if (de->d_inode->i_fop && de->d_inode->i_fop->fsync)
1152 rc = de->d_inode->i_fop->fsync(NULL, de, 1);
1154 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1156 mds_pack_inode2fid(&body->fid1, de->d_inode);
1157 mds_pack_inode2body(body, de->d_inode);
1164 req->rq_status = rc;
1168 /* mds_readpage does not take a DLM lock on the inode, because the client must
1169 * already have a PR lock.
1171 * If we were to take another one here, a deadlock will result, if another
1172 * thread is already waiting for a PW lock. */
1173 static int mds_readpage(struct ptlrpc_request *req, int offset)
1175 struct obd_device *obd = req->rq_export->exp_obd;
1176 struct mds_obd *mds = &obd->u.mds;
1177 struct vfsmount *mnt;
1180 struct mds_body *body, *repbody;
1181 struct lvfs_run_ctxt saved;
1182 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) };
1183 struct lvfs_ucred uc = {NULL,};
1186 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1189 rc = lustre_pack_reply(req, 2, size, NULL);
1191 CERROR("error packing readpage reply: rc %d\n", rc);
1195 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1196 lustre_swab_mds_body);
1198 GOTO (out, rc = -EFAULT);
1200 rc = mds_init_ucred(&uc, req, offset);
1204 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1205 de = mds_fid2dentry(&obd->u.mds, &body->fid1, &mnt);
1207 GOTO(out_pop, rc = PTR_ERR(de));
1209 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1211 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1212 /* note: in case of an error, dentry_open puts dentry */
1214 GOTO(out_pop, rc = PTR_ERR(file));
1216 /* body->size is actually the offset -eeb */
1217 if ((body->size & (de->d_inode->i_blksize - 1)) != 0) {
1218 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1219 body->size, de->d_inode->i_blksize);
1220 GOTO(out_file, rc = -EFAULT);
1223 /* body->nlink is actually the #bytes to read -eeb */
1224 if (body->nlink & (de->d_inode->i_blksize - 1)) {
1225 CERROR("size %u is not multiple of blocksize %lu\n",
1226 body->nlink, de->d_inode->i_blksize);
1227 GOTO(out_file, rc = -EFAULT);
1230 repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1232 repbody->size = file->f_dentry->d_inode->i_size;
1233 repbody->valid = OBD_MD_FLSIZE;
1235 /* to make this asynchronous make sure that the handling function
1236 doesn't send a reply when this function completes. Instead a
1237 callback function would send the reply */
1238 /* body->size is actually the offset -eeb */
1239 rc = mds_sendpage(req, file, body->size, body->nlink);
1242 filp_close(file, 0);
1244 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1246 mds_exit_ucred(&uc, mds);
1247 req->rq_status = rc;
1251 int mds_reint(struct ptlrpc_request *req, int offset,
1252 struct lustre_handle *lockh)
1254 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1257 OBD_ALLOC(rec, sizeof(*rec));
1261 rc = mds_update_unpack(req, offset, rec);
1262 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1263 CERROR("invalid record\n");
1264 GOTO(out, req->rq_status = -EINVAL);
1267 /* rc will be used to interrupt a for loop over multiple records */
1268 rc = mds_reint_rec(rec, offset, req, lockh);
1270 OBD_FREE(rec, sizeof(*rec));
1274 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1275 struct obd_device *obd, int *process)
1277 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1278 case MDS_CONNECT: /* This will never get here, but for completeness. */
1279 case OST_CONNECT: /* This will never get here, but for completeness. */
1280 case MDS_DISCONNECT:
1281 case OST_DISCONNECT:
1286 case MDS_SYNC: /* used in unmounting */
1290 *process = target_queue_recovery_request(req, obd);
1294 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1296 /* XXX what should we set rq_status to here? */
1297 req->rq_status = -EAGAIN;
1298 RETURN(ptlrpc_error(req));
1302 static char *reint_names[] = {
1303 [REINT_SETATTR] "setattr",
1304 [REINT_CREATE] "create",
1305 [REINT_LINK] "link",
1306 [REINT_UNLINK] "unlink",
1307 [REINT_RENAME] "rename",
1308 [REINT_OPEN] "open",
1311 static int mds_set_info_rpc(struct obd_export *exp, struct ptlrpc_request *req)
1318 key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1);
1320 DEBUG_REQ(D_HA, req, "no set_info key");
1323 keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF);
1325 val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, sizeof(*val));
1327 DEBUG_REQ(D_HA, req, "no set_info val");
1331 rc = lustre_pack_reply(req, 1, NULL, NULL);
1334 lustre_msg_set_status(req->rq_repmsg, 0);
1336 if (keylen < strlen("read-only") ||
1337 memcmp(key, "read-only", keylen) != 0)
1341 exp->exp_connect_flags |= OBD_CONNECT_RDONLY;
1343 exp->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1348 static int mds_handle_quotacheck(struct ptlrpc_request *req)
1350 struct obd_quotactl *oqctl;
1354 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1355 lustre_swab_obd_quotactl);
1359 rc = lustre_pack_reply(req, 1, NULL, NULL);
1361 CERROR("mds: out of memory while packing quotacheck reply\n");
1365 req->rq_status = obd_quotacheck(req->rq_export, oqctl);
1369 static int mds_handle_quotactl(struct ptlrpc_request *req)
1371 struct obd_quotactl *oqctl, *repoqc;
1372 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repoqc) };
1375 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1376 lustre_swab_obd_quotactl);
1380 rc = lustre_pack_reply(req, 2, size, NULL);
1384 repoqc = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*repoqc));
1386 req->rq_status = obd_quotactl(req->rq_export, oqctl);
1391 static int mds_msg_check_version(struct lustre_msg *msg)
1395 switch (lustre_msg_get_opc(msg)) {
1397 case MDS_DISCONNECT:
1399 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
1401 CERROR("bad opc %u version %08x, expecting %08x\n",
1402 lustre_msg_get_opc(msg),
1403 lustre_msg_get_version(msg),
1404 LUSTRE_OBD_VERSION);
1408 case MDS_GETATTR_NAME:
1413 case MDS_DONE_WRITING:
1419 case MDS_QUOTACHECK:
1423 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
1425 CERROR("bad opc %u version %08x, expecting %08x\n",
1426 lustre_msg_get_opc(msg),
1427 lustre_msg_get_version(msg),
1428 LUSTRE_MDS_VERSION);
1432 case LDLM_BL_CALLBACK:
1433 case LDLM_CP_CALLBACK:
1434 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1436 CERROR("bad opc %u version %08x, expecting %08x\n",
1437 lustre_msg_get_opc(msg),
1438 lustre_msg_get_version(msg),
1439 LUSTRE_DLM_VERSION);
1441 case OBD_LOG_CANCEL:
1442 case LLOG_ORIGIN_HANDLE_CREATE:
1443 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1444 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1445 case LLOG_ORIGIN_HANDLE_CLOSE:
1446 case LLOG_ORIGIN_HANDLE_DESTROY:
1447 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1449 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1451 CERROR("bad opc %u version %08x, expecting %08x\n",
1452 lustre_msg_get_opc(msg),
1453 lustre_msg_get_version(msg),
1454 LUSTRE_LOG_VERSION);
1457 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
1463 int mds_handle(struct ptlrpc_request *req)
1465 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
1467 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1468 struct obd_device *obd = NULL;
1471 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1473 LASSERT(current->journal_info == NULL);
1475 rc = mds_msg_check_version(req->rq_reqmsg);
1477 CERROR("MDS drop mal-formed request\n");
1481 /* XXX identical to OST */
1482 if (lustre_msg_get_opc(req->rq_reqmsg) != MDS_CONNECT) {
1483 struct mds_export_data *med;
1484 int recovering, abort_recovery;
1486 if (req->rq_export == NULL) {
1487 CERROR("operation %d on unconnected MDS from %s\n",
1488 lustre_msg_get_opc(req->rq_reqmsg),
1489 libcfs_id2str(req->rq_peer));
1490 req->rq_status = -ENOTCONN;
1491 GOTO(out, rc = -ENOTCONN);
1494 med = &req->rq_export->exp_mds_data;
1495 obd = req->rq_export->exp_obd;
1498 /* sanity check: if the xid matches, the request must
1499 * be marked as a resent or replayed */
1500 if (req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_xid) ||
1501 req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_close_xid))
1502 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
1503 (MSG_RESENT | MSG_REPLAY))) {
1504 CERROR("rq_xid "LPU64" matches last_xid, "
1505 "expected RESENT flag\n",
1507 req->rq_status = -ENOTCONN;
1508 GOTO(out, rc = -EFAULT);
1510 /* else: note the opposite is not always true; a
1511 * RESENT req after a failover will usually not match
1512 * the last_xid, since it was likely never
1513 * committed. A REPLAYed request will almost never
1514 * match the last xid, however it could for a
1515 * committed, but still retained, open. */
1517 /* Check for aborted recovery. */
1518 spin_lock_bh(&obd->obd_processing_task_lock);
1519 abort_recovery = obd->obd_abort_recovery;
1520 recovering = obd->obd_recovering;
1521 spin_unlock_bh(&obd->obd_processing_task_lock);
1522 if (abort_recovery) {
1523 target_abort_recovery(obd);
1524 } else if (recovering) {
1525 rc = mds_filter_recovery_request(req, obd,
1527 if (rc || !should_process)
1532 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1534 DEBUG_REQ(D_INODE, req, "connect");
1535 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1536 rc = target_handle_connect(req, mds_handle);
1538 /* Now that we have an export, set mds. */
1539 obd = req->rq_export->exp_obd;
1540 mds = mds_req2mds(req);
1544 case MDS_DISCONNECT:
1545 DEBUG_REQ(D_INODE, req, "disconnect");
1546 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1547 rc = target_handle_disconnect(req);
1548 req->rq_status = rc; /* superfluous? */
1552 DEBUG_REQ(D_INODE, req, "getstatus");
1553 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1554 rc = mds_getstatus(req);
1558 DEBUG_REQ(D_INODE, req, "getattr");
1559 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1560 rc = mds_getattr(req, REQ_REC_OFF);
1564 DEBUG_REQ(D_INODE, req, "setxattr");
1565 OBD_FAIL_RETURN(OBD_FAIL_MDS_SETXATTR_NET, 0);
1566 rc = mds_setxattr(req);
1570 DEBUG_REQ(D_INODE, req, "getxattr");
1571 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETXATTR_NET, 0);
1572 rc = mds_getxattr(req);
1575 case MDS_GETATTR_NAME: {
1576 struct lustre_handle lockh = { 0 };
1577 DEBUG_REQ(D_INODE, req, "getattr_name");
1578 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1580 /* If this request gets a reconstructed reply, we won't be
1581 * acquiring any new locks in mds_getattr_lock, so we don't
1584 rc = mds_getattr_lock(req, REQ_REC_OFF, MDS_INODELOCK_UPDATE,
1586 /* this non-intent call (from an ioctl) is special */
1587 req->rq_status = rc;
1588 if (rc == 0 && lustre_handle_is_used(&lockh))
1589 ldlm_lock_decref(&lockh, LCK_CR);
1593 DEBUG_REQ(D_INODE, req, "statfs");
1594 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1595 rc = mds_statfs(req);
1599 DEBUG_REQ(D_INODE, req, "readpage");
1600 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1601 rc = mds_readpage(req, REQ_REC_OFF);
1603 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
1610 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF,
1613 int size[4] = { sizeof(struct ptlrpc_body),
1614 sizeof(struct mds_body),
1615 mds->mds_max_mdsize,
1616 mds->mds_max_cookiesize };
1619 /* NB only peek inside req now; mds_reint() will swab it */
1621 CERROR ("Can't inspect opcode\n");
1626 if (lustre_msg_swabbed(req->rq_reqmsg))
1629 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1630 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
1631 reint_names[opc] == NULL) ? reint_names[opc] :
1634 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1636 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1638 else if (opc == REINT_OPEN)
1643 rc = lustre_pack_reply(req, bufcount, size, NULL);
1647 rc = mds_reint(req, REQ_REC_OFF, NULL);
1648 fail = OBD_FAIL_MDS_REINT_NET_REP;
1653 DEBUG_REQ(D_INODE, req, "close");
1654 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1655 rc = mds_close(req, REQ_REC_OFF);
1658 case MDS_DONE_WRITING:
1659 DEBUG_REQ(D_INODE, req, "done_writing");
1660 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
1661 rc = mds_done_writing(req, REQ_REC_OFF);
1665 DEBUG_REQ(D_INODE, req, "pin");
1666 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1667 rc = mds_pin(req, REQ_REC_OFF);
1671 DEBUG_REQ(D_INODE, req, "sync");
1672 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
1673 rc = mds_sync(req, REQ_REC_OFF);
1677 DEBUG_REQ(D_INODE, req, "set_info");
1678 rc = mds_set_info_rpc(req->rq_export, req);
1681 case MDS_QUOTACHECK:
1682 DEBUG_REQ(D_INODE, req, "quotacheck");
1683 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACHECK_NET, 0);
1684 rc = mds_handle_quotacheck(req);
1688 DEBUG_REQ(D_INODE, req, "quotactl");
1689 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACTL_NET, 0);
1690 rc = mds_handle_quotactl(req);
1694 DEBUG_REQ(D_INODE, req, "ping");
1695 rc = target_handle_ping(req);
1698 case OBD_LOG_CANCEL:
1699 CDEBUG(D_INODE, "log cancel\n");
1700 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1701 rc = -ENOTSUPP; /* la la la */
1705 DEBUG_REQ(D_INODE, req, "enqueue");
1706 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1707 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1708 ldlm_server_blocking_ast, NULL);
1709 fail = OBD_FAIL_LDLM_REPLY;
1712 DEBUG_REQ(D_INODE, req, "convert");
1713 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1714 rc = ldlm_handle_convert(req);
1716 case LDLM_BL_CALLBACK:
1717 case LDLM_CP_CALLBACK:
1718 DEBUG_REQ(D_INODE, req, "callback");
1719 CERROR("callbacks should not happen on MDS\n");
1721 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1723 case LLOG_ORIGIN_HANDLE_CREATE:
1724 DEBUG_REQ(D_INODE, req, "llog_init");
1725 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1726 rc = llog_origin_handle_create(req);
1728 case LLOG_ORIGIN_HANDLE_DESTROY:
1729 DEBUG_REQ(D_INODE, req, "llog_init");
1730 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1731 rc = llog_origin_handle_destroy(req);
1733 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1734 DEBUG_REQ(D_INODE, req, "llog next block");
1735 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1736 rc = llog_origin_handle_next_block(req);
1738 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1739 DEBUG_REQ(D_INODE, req, "llog prev block");
1740 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1741 rc = llog_origin_handle_prev_block(req);
1743 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1744 DEBUG_REQ(D_INODE, req, "llog read header");
1745 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1746 rc = llog_origin_handle_read_header(req);
1748 case LLOG_ORIGIN_HANDLE_CLOSE:
1749 DEBUG_REQ(D_INODE, req, "llog close");
1750 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1751 rc = llog_origin_handle_close(req);
1754 DEBUG_REQ(D_INODE, req, "llog catinfo");
1755 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1756 rc = llog_catinfo(req);
1759 req->rq_status = -ENOTSUPP;
1760 rc = ptlrpc_error(req);
1764 LASSERT(current->journal_info == NULL);
1766 /* If we're DISCONNECTing, the mds_export_data is already freed */
1767 if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != MDS_DISCONNECT) {
1768 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1770 /* I don't think last_xid is used for anyway, so I'm not sure
1771 if we need to care about last_close_xid here.*/
1772 lustre_msg_set_last_xid(req->rq_repmsg,
1773 le64_to_cpu(med->med_mcd->mcd_last_xid));
1775 target_committed_to_req(req);
1781 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
1782 if (obd && obd->obd_recovering) {
1783 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
1784 return target_queue_final_reply(req, rc);
1786 /* Lost a race with recovery; let the error path DTRT. */
1787 rc = req->rq_status = -ENOTCONN;
1790 target_send_reply(req, rc, fail);
1794 /* Update the server data on disk. This stores the new mount_count and
1795 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1796 * then the server last_rcvd value may be less than that of the clients.
1797 * This will alert us that we may need to do client recovery.
1799 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1801 int mds_update_server_data(struct obd_device *obd, int force_sync)
1803 struct mds_obd *mds = &obd->u.mds;
1804 struct lr_server_data *lsd = mds->mds_server_data;
1805 struct file *filp = mds->mds_rcvd_filp;
1806 struct lvfs_run_ctxt saved;
1811 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1812 mds->mds_mount_count, mds->mds_last_transno);
1814 spin_lock(&mds->mds_transno_lock);
1815 lsd->lsd_last_transno = cpu_to_le64(mds->mds_last_transno);
1816 spin_unlock(&mds->mds_transno_lock);
1818 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1819 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off,force_sync);
1820 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1822 CERROR("error writing MDS server data: rc = %d\n", rc);
1827 static void fsoptions_to_mds_flags(struct mds_obd *mds, char *options)
1837 while (*p && *p != ',')
1841 if (len == sizeof("user_xattr") - 1 &&
1842 memcmp(options, "user_xattr", len) == 0) {
1843 mds->mds_fl_user_xattr = 1;
1844 LCONSOLE_INFO("Enabling user_xattr\n");
1845 } else if (len == sizeof("nouser_xattr") - 1 &&
1846 memcmp(options, "nouser_xattr", len) == 0) {
1847 mds->mds_fl_user_xattr = 0;
1848 LCONSOLE_INFO("Disabling user_xattr\n");
1849 } else if (len == sizeof("acl") - 1 &&
1850 memcmp(options, "acl", len) == 0) {
1851 #ifdef CONFIG_FS_POSIX_ACL
1852 mds->mds_fl_acl = 1;
1853 LCONSOLE_INFO("Enabling ACL\n");
1855 CWARN("ignoring unsupported acl mount option\n");
1857 } else if (len == sizeof("noacl") - 1 &&
1858 memcmp(options, "noacl", len) == 0) {
1859 #ifdef CONFIG_FS_POSIX_ACL
1860 mds->mds_fl_acl = 0;
1861 LCONSOLE_INFO("Disabling ACL\n");
1869 /* mount the file system (secretly). lustre_cfg parameters are:
1875 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1877 struct lprocfs_static_vars lvars;
1878 struct lustre_cfg* lcfg = buf;
1879 struct mds_obd *mds = &obd->u.mds;
1880 struct lustre_mount_info *lmi;
1881 struct vfsmount *mnt;
1882 struct obd_uuid uuid;
1884 char *options, *str, *label;
1890 /* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv */
1892 CLASSERT(offsetof(struct obd_device, u.obt) ==
1893 offsetof(struct obd_device, u.mds.mds_obt));
1895 if (lcfg->lcfg_bufcount < 3)
1896 RETURN(rc = -EINVAL);
1898 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
1899 RETURN(rc = -EINVAL);
1901 lmi = server_get_mount(obd->obd_name);
1903 /* We already mounted in lustre_fill_super.
1904 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1905 struct lustre_sb_info *lsi = s2lsi(lmi->lmi_sb);
1906 fsoptions_to_mds_flags(mds, lsi->lsi_ldd->ldd_mount_opts);
1907 fsoptions_to_mds_flags(mds, lsi->lsi_lmd->lmd_opts);
1909 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
1911 /* old path - used by lctl */
1912 CERROR("Using old MDS mount method\n");
1913 page = __get_free_page(GFP_KERNEL);
1917 options = (char *)page;
1918 memset(options, 0, CFS_PAGE_SIZE);
1920 /* here we use "iopen_nopriv" hardcoded, because it affects
1921 * MDS utility and the rest of options are passed by mount
1922 * options. Probably this should be moved to somewhere else
1923 * like startup scripts or lconf. */
1924 strcpy(options, "iopen_nopriv");
1926 if (LUSTRE_CFG_BUFLEN(lcfg, 4) > 0 && lustre_cfg_buf(lcfg, 4)) {
1927 sprintf(options + strlen(options), ",%s",
1928 lustre_cfg_string(lcfg, 4));
1929 fsoptions_to_mds_flags(mds, options);
1932 mnt = do_kern_mount(lustre_cfg_string(lcfg, 2), 0,
1933 lustre_cfg_string(lcfg, 1),
1938 LCONSOLE_ERROR("Can't mount disk %s (%d)\n",
1939 lustre_cfg_string(lcfg, 1), rc);
1943 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
1945 if (IS_ERR(obd->obd_fsops))
1946 GOTO(err_put, rc = PTR_ERR(obd->obd_fsops));
1948 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
1950 LASSERT(!lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb)));
1952 sema_init(&mds->mds_epoch_sem, 1);
1953 spin_lock_init(&mds->mds_transno_lock);
1954 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
1955 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
1956 mds->mds_atime_diff = MAX_ATIME_DIFF;
1958 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
1959 obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER);
1960 if (obd->obd_namespace == NULL) {
1962 GOTO(err_ops, rc = -ENOMEM);
1964 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
1966 rc = mds_fs_setup(obd, mnt);
1968 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
1973 rc = llog_start_commit_thread();
1977 if (lcfg->lcfg_bufcount >= 4 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
1980 generate_random_uuid(uuid);
1981 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
1983 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
1984 if (mds->mds_profile == NULL)
1985 GOTO(err_fs, rc = -ENOMEM);
1987 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
1988 LUSTRE_CFG_BUFLEN(lcfg, 3));
1991 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1992 "mds_ldlm_client", &obd->obd_ldlm_client);
1993 obd->obd_replayable = 1;
1995 rc = lquota_setup(mds_quota_interface_ref, obd);
1999 mds->mds_group_hash = upcall_cache_init(obd->obd_name);
2000 if (IS_ERR(mds->mds_group_hash)) {
2001 rc = PTR_ERR(mds->mds_group_hash);
2002 mds->mds_group_hash = NULL;
2003 GOTO(err_qctxt, rc);
2006 /* Don't wait for mds_postrecov trying to clear orphans */
2007 obd->obd_async_recov = 1;
2008 rc = mds_postsetup(obd);
2009 /* Bug 11557 - allow async abort_recov start
2010 FIXME can remove most of this obd_async_recov plumbing
2011 obd->obd_async_recov = 0;
2014 GOTO(err_qctxt, rc);
2016 lprocfs_init_vars(mds, &lvars);
2017 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
2018 lprocfs_alloc_obd_stats(obd, LPROC_MDS_LAST) == 0) {
2019 /* Init private stats here */
2020 mds_stats_counter_init(obd->obd_stats);
2021 obd->obd_proc_exports = proc_mkdir("exports",
2022 obd->obd_proc_entry);
2025 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2026 if (uuid_ptr != NULL) {
2027 class_uuid_unparse(uuid_ptr, &uuid);
2033 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2034 if (obd->obd_recovering) {
2035 LCONSOLE_WARN("MDT %s now serving %s (%s%s%s), but will be in "
2036 "recovery until %d %s reconnect, or if no clients"
2037 " reconnect for %d:%.02d; during that time new "
2038 "clients will not be allowed to connect. "
2039 "Recovery progress can be monitored by watching "
2040 "/proc/fs/lustre/mds/%s/recovery_status.\n",
2041 obd->obd_name, lustre_cfg_string(lcfg, 1),
2042 label ?: "", label ? "/" : "", str,
2043 obd->obd_recoverable_clients,
2044 (obd->obd_recoverable_clients == 1) ?
2045 "client" : "clients",
2046 (int)(OBD_RECOVERY_TIMEOUT) / 60,
2047 (int)(OBD_RECOVERY_TIMEOUT) % 60,
2050 LCONSOLE_INFO("MDT %s now serving %s (%s%s%s) with recovery "
2051 "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
2052 label ?: "", label ? "/" : "", str,
2053 obd->obd_replayable ? "enabled" : "disabled");
2061 lquota_cleanup(mds_quota_interface_ref, obd);
2063 /* No extra cleanup needed for llog_init_commit_thread() */
2064 mds_fs_cleanup(obd);
2065 upcall_cache_cleanup(mds->mds_group_hash);
2066 mds->mds_group_hash = NULL;
2068 ldlm_namespace_free(obd->obd_namespace, 0);
2069 obd->obd_namespace = NULL;
2071 fsfilt_put_ops(obd->obd_fsops);
2074 server_put_mount(obd->obd_name, mnt);
2081 obd->u.obt.obt_sb = NULL;
2085 static int mds_lov_clean(struct obd_device *obd)
2087 struct mds_obd *mds = &obd->u.mds;
2088 struct obd_device *osc = mds->mds_osc_obd;
2091 if (mds->mds_profile) {
2092 class_del_profile(mds->mds_profile);
2093 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
2094 mds->mds_profile = NULL;
2097 /* There better be a lov */
2101 RETURN(PTR_ERR(osc));
2103 obd_register_observer(osc, NULL);
2105 /* Give lov our same shutdown flags */
2106 osc->obd_force = obd->obd_force;
2107 osc->obd_fail = obd->obd_fail;
2109 /* Cleanup the lov */
2110 obd_disconnect(mds->mds_osc_exp);
2111 class_manual_cleanup(osc);
2112 mds->mds_osc_exp = NULL;
2117 static int mds_postsetup(struct obd_device *obd)
2119 struct mds_obd *mds = &obd->u.mds;
2123 rc = llog_setup(obd, LLOG_CONFIG_ORIG_CTXT, obd, 0, NULL,
2128 rc = llog_setup(obd, LLOG_LOVEA_ORIG_CTXT, obd, 0, NULL,
2133 if (mds->mds_profile) {
2134 struct lustre_profile *lprof;
2135 /* The profile defines which osc and mdc to connect to, for a
2136 client. We reuse that here to figure out the name of the
2137 lov to use (and ignore lprof->lp_mdc).
2138 The profile was set in the config log with
2139 LCFG_MOUNTOPT profilenm oscnm mdcnm */
2140 lprof = class_get_profile(mds->mds_profile);
2141 if (lprof == NULL) {
2142 CERROR("No profile found: %s\n", mds->mds_profile);
2143 GOTO(err_cleanup, rc = -ENOENT);
2145 rc = mds_lov_connect(obd, lprof->lp_osc);
2147 GOTO(err_cleanup, rc);
2154 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2155 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2159 int mds_postrecov(struct obd_device *obd)
2167 LASSERT(!obd->obd_recovering);
2168 LASSERT(llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT) != NULL);
2170 /* FIXME why not put this in the synchronize? */
2171 /* set nextid first, so we are sure it happens */
2172 rc = mds_lov_set_nextid(obd);
2174 CERROR("%s: mds_lov_set_nextid failed %d\n",
2179 /* clean PENDING dir */
2180 rc = mds_cleanup_pending(obd);
2184 /* FIXME Does target_finish_recovery really need this to block? */
2185 /* Notify the LOV, which will in turn call mds_notify for each tgt */
2186 /* This means that we have to hack obd_notify to think we're obd_set_up
2187 during mds_lov_connect. */
2188 obd_notify(obd->u.mds.mds_osc_obd, NULL,
2189 obd->obd_async_recov ? OBD_NOTIFY_SYNC_NONBLOCK :
2190 OBD_NOTIFY_SYNC, NULL);
2192 /* quota recovery */
2193 lquota_recovery(mds_quota_interface_ref, obd);
2199 /* We need to be able to stop an mds_lov_synchronize */
2200 static int mds_lov_early_clean(struct obd_device *obd)
2202 struct mds_obd *mds = &obd->u.mds;
2203 struct obd_device *osc = mds->mds_osc_obd;
2205 if (!osc || (!obd->obd_force && !obd->obd_fail))
2208 CDEBUG(D_HA, "abort inflight\n");
2209 return (obd_precleanup(osc, OBD_CLEANUP_EARLY));
2212 static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2218 case OBD_CLEANUP_EARLY:
2220 case OBD_CLEANUP_EXPORTS:
2221 target_cleanup_recovery(obd);
2222 mds_lov_early_clean(obd);
2224 case OBD_CLEANUP_SELF_EXP:
2225 mds_lov_disconnect(obd);
2227 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2228 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2229 rc = obd_llog_finish(obd, 0);
2231 case OBD_CLEANUP_OBD:
2237 static int mds_cleanup(struct obd_device *obd)
2239 struct mds_obd *mds = &obd->u.mds;
2240 lvfs_sbdev_type save_dev;
2242 int must_relock = 0;
2245 if (obd->u.obt.obt_sb == NULL)
2247 save_dev = lvfs_sbdev(obd->u.obt.obt_sb);
2249 if (mds->mds_osc_exp)
2250 /* lov export was disconnected by mds_lov_clean;
2251 we just need to drop our ref */
2252 class_export_put(mds->mds_osc_exp);
2254 lprocfs_obd_cleanup(obd);
2255 lprocfs_free_obd_stats(obd);
2257 lquota_cleanup(mds_quota_interface_ref, obd);
2259 mds_update_server_data(obd, 1);
2260 if (mds->mds_lov_objids != NULL)
2261 OBD_FREE(mds->mds_lov_objids, mds->mds_lov_objids_size);
2262 mds_fs_cleanup(obd);
2264 upcall_cache_cleanup(mds->mds_group_hash);
2265 mds->mds_group_hash = NULL;
2267 must_put = server_put_mount(obd->obd_name, mds->mds_vfsmnt);
2268 /* must_put is for old method (l_p_m returns non-0 on err) */
2270 /* We can only unlock kernel if we are in the context of sys_ioctl,
2271 otherwise we never called lock_kernel */
2272 if (ll_kernel_locked()) {
2278 /* In case we didn't mount with lustre_get_mount -- old method*/
2279 mntput(mds->mds_vfsmnt);
2280 lvfs_clear_rdonly(save_dev);
2282 obd->u.obt.obt_sb = NULL;
2284 ldlm_namespace_free(obd->obd_namespace, obd->obd_force);
2286 spin_lock_bh(&obd->obd_processing_task_lock);
2287 if (obd->obd_recovering) {
2288 target_cancel_recovery_timer(obd);
2289 obd->obd_recovering = 0;
2291 spin_unlock_bh(&obd->obd_processing_task_lock);
2296 fsfilt_put_ops(obd->obd_fsops);
2298 LCONSOLE_INFO("MDT %s has stopped.\n", obd->obd_name);
2303 static void fixup_handle_for_resent_req(struct ptlrpc_request *req, int offset,
2304 struct ldlm_lock *new_lock,
2305 struct ldlm_lock **old_lock,
2306 struct lustre_handle *lockh)
2308 struct obd_export *exp = req->rq_export;
2309 struct ldlm_request *dlmreq =
2310 lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*dlmreq));
2311 struct lustre_handle remote_hdl = dlmreq->lock_handle1;
2312 struct list_head *iter;
2314 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2317 spin_lock(&exp->exp_ldlm_data.led_lock);
2318 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
2319 struct ldlm_lock *lock;
2320 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
2321 if (lock == new_lock)
2323 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
2324 lockh->cookie = lock->l_handle.h_cookie;
2325 LDLM_DEBUG(lock, "restoring lock cookie");
2326 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
2329 *old_lock = LDLM_LOCK_GET(lock);
2330 spin_unlock(&exp->exp_ldlm_data.led_lock);
2334 spin_unlock(&exp->exp_ldlm_data.led_lock);
2336 /* If the xid matches, then we know this is a resent request,
2337 * and allow it. (It's probably an OPEN, for which we don't
2340 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_xid))
2344 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_close_xid))
2347 /* This remote handle isn't enqueued, so we never received or
2348 * processed this request. Clear MSG_RESENT, because it can
2349 * be handled like any normal request now. */
2351 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2353 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
2357 int intent_disposition(struct ldlm_reply *rep, int flag)
2361 return (rep->lock_policy_res1 & flag);
2364 void intent_set_disposition(struct ldlm_reply *rep, int flag)
2368 rep->lock_policy_res1 |= flag;
2371 static int mds_intent_policy(struct ldlm_namespace *ns,
2372 struct ldlm_lock **lockp, void *req_cookie,
2373 ldlm_mode_t mode, int flags, void *data)
2375 struct ptlrpc_request *req = req_cookie;
2376 struct ldlm_lock *lock = *lockp;
2377 struct ldlm_intent *it;
2378 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
2379 struct ldlm_reply *rep;
2380 struct lustre_handle lockh = { 0 };
2381 struct ldlm_lock *new_lock = NULL;
2382 int getattr_part = MDS_INODELOCK_UPDATE;
2383 int repsize[5] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
2384 [DLM_LOCKREPLY_OFF] = sizeof(struct ldlm_reply),
2385 [DLM_REPLY_REC_OFF] = sizeof(struct mds_body),
2386 [DLM_REPLY_REC_OFF+1] = mds->mds_max_mdsize };
2387 int repbufcnt = 4, rc;
2390 LASSERT(req != NULL);
2392 if (lustre_msg_bufcount(req->rq_reqmsg) <= DLM_INTENT_IT_OFF) {
2393 /* No intent was provided */
2394 rc = lustre_pack_reply(req, 2, repsize, NULL);
2399 it = lustre_swab_reqbuf(req, DLM_INTENT_IT_OFF, sizeof(*it),
2400 lustre_swab_ldlm_intent);
2402 CERROR("Intent missing\n");
2403 RETURN(req->rq_status = -EFAULT);
2406 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
2408 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
2409 (it->opc & (IT_OPEN | IT_GETATTR | IT_LOOKUP)))
2410 /* we should never allow OBD_CONNECT_ACL if not configured */
2411 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
2412 else if (it->opc & IT_UNLINK)
2413 repsize[repbufcnt++] = mds->mds_max_cookiesize;
2415 rc = lustre_pack_reply(req, repbufcnt, repsize, NULL);
2417 RETURN(req->rq_status = rc);
2419 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
2420 intent_set_disposition(rep, DISP_IT_EXECD);
2423 /* execute policy */
2424 switch ((long)it->opc) {
2426 case IT_CREAT|IT_OPEN:
2427 mds_counter_incr(req->rq_export, LPROC_MDS_OPEN);
2428 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock, NULL,
2430 /* XXX swab here to assert that an mds_open reint
2431 * packet is following */
2432 rep->lock_policy_res2 = mds_reint(req, DLM_INTENT_REC_OFF,
2435 /* We abort the lock if the lookup was negative and
2436 * we did not make it to the OPEN portion */
2437 if (!intent_disposition(rep, DISP_LOOKUP_EXECD))
2438 RETURN(ELDLM_LOCK_ABORTED);
2439 if (intent_disposition(rep, DISP_LOOKUP_NEG) &&
2440 !intent_disposition(rep, DISP_OPEN_OPEN))
2443 /* If there was an error of some sort or if we are not
2444 * returning any locks */
2445 if (rep->lock_policy_res2 ||
2446 !intent_disposition(rep, DISP_OPEN_LOCK))
2447 RETURN(ELDLM_LOCK_ABORTED);
2450 getattr_part = MDS_INODELOCK_LOOKUP;
2452 getattr_part |= MDS_INODELOCK_LOOKUP;
2453 OBD_COUNTER_INCREMENT(req->rq_export->exp_obd, getattr);
2455 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock,
2458 /* INODEBITS_INTEROP: if this lock was converted from a
2459 * plain lock (client does not support inodebits), then
2460 * child lock must be taken with both lookup and update
2461 * bits set for all operations.
2463 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS))
2464 getattr_part = MDS_INODELOCK_LOOKUP |
2465 MDS_INODELOCK_UPDATE;
2467 rep->lock_policy_res2 = mds_getattr_lock(req,DLM_INTENT_REC_OFF,
2468 getattr_part, &lockh);
2469 /* FIXME: LDLM can set req->rq_status. MDS sets
2470 policy_res{1,2} with disposition and status.
2471 - replay: returns 0 & req->status is old status
2472 - otherwise: returns req->status */
2473 if (intent_disposition(rep, DISP_LOOKUP_NEG))
2474 rep->lock_policy_res2 = 0;
2475 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
2476 rep->lock_policy_res2)
2477 RETURN(ELDLM_LOCK_ABORTED);
2478 if (req->rq_status != 0) {
2480 rep->lock_policy_res2 = req->rq_status;
2481 RETURN(ELDLM_LOCK_ABORTED);
2485 CERROR("Unhandled intent "LPD64"\n", it->opc);
2489 /* By this point, whatever function we called above must have either
2490 * filled in 'lockh', been an intent replay, or returned an error. We
2491 * want to allow replayed RPCs to not get a lock, since we would just
2492 * drop it below anyways because lock replay is done separately by the
2493 * client afterwards. For regular RPCs we want to give the new lock to
2494 * the client instead of whatever lock it was about to get. */
2495 if (new_lock == NULL)
2496 new_lock = ldlm_handle2lock(&lockh);
2497 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
2500 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
2501 it->opc, lockh.cookie);
2503 /* If we've already given this lock to a client once, then we should
2504 * have no readers or writers. Otherwise, we should have one reader
2505 * _or_ writer ref (which will be zeroed below) before returning the
2506 * lock to a client. */
2507 if (new_lock->l_export == req->rq_export) {
2508 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2510 LASSERT(new_lock->l_export == NULL);
2511 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2516 if (new_lock->l_export == req->rq_export) {
2517 /* Already gave this to the client, which means that we
2518 * reconstructed a reply. */
2519 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2521 RETURN(ELDLM_LOCK_REPLACED);
2524 /* Fixup the lock to be given to the client */
2525 lock_res_and_lock(new_lock);
2526 new_lock->l_readers = 0;
2527 new_lock->l_writers = 0;
2529 new_lock->l_export = class_export_get(req->rq_export);
2530 spin_lock(&req->rq_export->exp_ldlm_data.led_lock);
2531 list_add(&new_lock->l_export_chain,
2532 &new_lock->l_export->exp_ldlm_data.led_held_locks);
2533 spin_unlock(&req->rq_export->exp_ldlm_data.led_lock);
2535 new_lock->l_blocking_ast = lock->l_blocking_ast;
2536 new_lock->l_completion_ast = lock->l_completion_ast;
2538 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
2539 sizeof(lock->l_remote_handle));
2541 new_lock->l_flags &= ~LDLM_FL_LOCAL;
2543 unlock_res_and_lock(new_lock);
2544 LDLM_LOCK_PUT(new_lock);
2546 RETURN(ELDLM_LOCK_REPLACED);
2549 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
2551 struct mds_obd *mds = &obd->u.mds;
2552 struct lprocfs_static_vars lvars;
2553 int mds_min_threads = MDS_THREADS_AUTO_MIN;
2554 int mds_max_threads = MDS_THREADS_AUTO_MAX;
2558 lprocfs_init_vars(mdt, &lvars);
2559 lprocfs_obd_setup(obd, lvars.obd_vars);
2561 sema_init(&mds->mds_health_sem, 1);
2563 if (mds_num_threads) {
2564 /* If mds_num_threads is set, it is the min and the max. */
2565 if (mds_num_threads > MDS_THREADS_MAX)
2566 mds_num_threads = MDS_THREADS_MAX;
2567 if (mds_num_threads < MDS_THREADS_MIN)
2568 mds_num_threads = MDS_THREADS_MIN;
2569 mds_max_threads = mds_min_threads = mds_num_threads;
2573 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2574 MDS_MAXREPSIZE, MDS_REQUEST_PORTAL,
2575 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_TIMEOUT,
2576 mds_handle, LUSTRE_MDS_NAME,
2577 obd->obd_proc_entry, NULL,
2578 mds_min_threads, mds_max_threads, "ll_mdt");
2580 if (!mds->mds_service) {
2581 CERROR("failed to start service\n");
2582 GOTO(err_lprocfs, rc = -ENOMEM);
2585 rc = ptlrpc_start_threads(obd, mds->mds_service);
2587 GOTO(err_thread, rc);
2589 mds->mds_setattr_service =
2590 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2591 MDS_MAXREPSIZE, MDS_SETATTR_PORTAL,
2592 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_TIMEOUT,
2593 mds_handle, "mds_setattr",
2594 obd->obd_proc_entry, NULL,
2595 mds_min_threads, mds_max_threads,
2597 if (!mds->mds_setattr_service) {
2598 CERROR("failed to start getattr service\n");
2599 GOTO(err_thread, rc = -ENOMEM);
2602 rc = ptlrpc_start_threads(obd, mds->mds_setattr_service);
2604 GOTO(err_thread2, rc);
2606 mds->mds_readpage_service =
2607 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2608 MDS_MAXREPSIZE, MDS_READPAGE_PORTAL,
2609 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_TIMEOUT,
2610 mds_handle, "mds_readpage",
2611 obd->obd_proc_entry, NULL,
2612 MDS_THREADS_MIN_READPAGE, mds_max_threads,
2614 if (!mds->mds_readpage_service) {
2615 CERROR("failed to start readpage service\n");
2616 GOTO(err_thread2, rc = -ENOMEM);
2619 rc = ptlrpc_start_threads(obd, mds->mds_readpage_service);
2622 GOTO(err_thread3, rc);
2624 ping_evictor_start();
2629 ptlrpc_unregister_service(mds->mds_readpage_service);
2630 mds->mds_readpage_service = NULL;
2632 ptlrpc_unregister_service(mds->mds_setattr_service);
2633 mds->mds_setattr_service = NULL;
2635 ptlrpc_unregister_service(mds->mds_service);
2636 mds->mds_service = NULL;
2638 lprocfs_obd_cleanup(obd);
2642 static int mdt_cleanup(struct obd_device *obd)
2644 struct mds_obd *mds = &obd->u.mds;
2647 ping_evictor_stop();
2649 down(&mds->mds_health_sem);
2650 ptlrpc_unregister_service(mds->mds_readpage_service);
2651 ptlrpc_unregister_service(mds->mds_setattr_service);
2652 ptlrpc_unregister_service(mds->mds_service);
2653 mds->mds_readpage_service = NULL;
2654 mds->mds_setattr_service = NULL;
2655 mds->mds_service = NULL;
2656 up(&mds->mds_health_sem);
2658 lprocfs_obd_cleanup(obd);
2663 static int mdt_health_check(struct obd_device *obd)
2665 struct mds_obd *mds = &obd->u.mds;
2668 down(&mds->mds_health_sem);
2669 rc |= ptlrpc_service_health_check(mds->mds_readpage_service);
2670 rc |= ptlrpc_service_health_check(mds->mds_setattr_service);
2671 rc |= ptlrpc_service_health_check(mds->mds_service);
2672 up(&mds->mds_health_sem);
2675 * health_check to return 0 on healthy
2676 * and 1 on unhealthy.
2684 static struct dentry *mds_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
2687 struct obd_device *obd = data;
2690 fid.generation = gen;
2691 return mds_fid2dentry(&obd->u.mds, &fid, NULL);
2694 static int mds_health_check(struct obd_device *obd)
2696 struct obd_device_target *odt = &obd->u.obt;
2697 struct mds_obd *mds = &obd->u.mds;
2700 if (odt->obt_sb->s_flags & MS_RDONLY)
2703 LASSERT(mds->mds_health_check_filp != NULL);
2704 rc |= !!lvfs_check_io_health(obd, mds->mds_health_check_filp);
2709 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
2711 struct lustre_cfg *lcfg = buf;
2712 struct lprocfs_static_vars lvars;
2715 lprocfs_init_vars(mds, &lvars);
2717 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, lcfg, obd);
2722 struct lvfs_callback_ops mds_lvfs_ops = {
2723 l_fid2dentry: mds_lvfs_fid2dentry,
2726 /* use obd ops to offer management infrastructure */
2727 static struct obd_ops mds_obd_ops = {
2728 .o_owner = THIS_MODULE,
2729 .o_connect = mds_connect,
2730 .o_reconnect = mds_reconnect,
2731 .o_init_export = mds_init_export,
2732 .o_destroy_export = mds_destroy_export,
2733 .o_disconnect = mds_disconnect,
2734 .o_setup = mds_setup,
2735 .o_precleanup = mds_precleanup,
2736 .o_cleanup = mds_cleanup,
2737 .o_postrecov = mds_postrecov,
2738 .o_statfs = mds_obd_statfs,
2739 .o_iocontrol = mds_iocontrol,
2740 .o_create = mds_obd_create,
2741 .o_destroy = mds_obd_destroy,
2742 .o_llog_init = mds_llog_init,
2743 .o_llog_finish = mds_llog_finish,
2744 .o_notify = mds_notify,
2745 .o_health_check = mds_health_check,
2746 .o_process_config = mds_process_config,
2749 static struct obd_ops mdt_obd_ops = {
2750 .o_owner = THIS_MODULE,
2751 .o_setup = mdt_setup,
2752 .o_cleanup = mdt_cleanup,
2753 .o_health_check = mdt_health_check,
2756 quota_interface_t *mds_quota_interface_ref;
2757 extern quota_interface_t mds_quota_interface;
2759 static int __init mds_init(void)
2762 struct lprocfs_static_vars lvars;
2764 request_module("lquota");
2765 mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
2766 rc = lquota_init(mds_quota_interface_ref);
2768 if (mds_quota_interface_ref)
2769 PORTAL_SYMBOL_PUT(mds_quota_interface);
2772 init_obd_quota_ops(mds_quota_interface_ref, &mds_obd_ops);
2774 lprocfs_init_vars(mds, &lvars);
2775 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2776 lprocfs_init_vars(mdt, &lvars);
2777 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2782 static void /*__exit*/ mds_exit(void)
2784 lquota_exit(mds_quota_interface_ref);
2785 if (mds_quota_interface_ref)
2786 PORTAL_SYMBOL_PUT(mds_quota_interface);
2788 class_unregister_type(LUSTRE_MDS_NAME);
2789 class_unregister_type(LUSTRE_MDT_NAME);
2792 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2793 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2794 MODULE_LICENSE("GPL");
2796 module_init(mds_init);
2797 module_exit(mds_exit);