1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mds/handler.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
41 * Author: Mike Shaver <shaver@clusterfs.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_MDS
49 #include <lustre_mds.h>
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/random.h>
54 #include <linux/jbd.h>
55 # include <linux/smp_lock.h>
56 # include <linux/buffer_head.h>
57 # include <linux/workqueue.h>
58 # include <linux/mount.h>
60 #include <obd_class.h>
61 #include <lustre_dlm.h>
63 #include <lustre_fsfilt.h>
64 #include <lprocfs_status.h>
65 #include <lustre_quota.h>
66 #include <lustre_disk.h>
67 #include <lustre_param.h>
69 #include "mds_internal.h"
72 CFS_MODULE_PARM(mds_num_threads, "i", int, 0444,
73 "number of MDS service threads to start");
75 static int mds_intent_policy(struct ldlm_namespace *ns,
76 struct ldlm_lock **lockp, void *req_cookie,
77 ldlm_mode_t mode, int flags, void *data);
78 static int mds_postsetup(struct obd_device *obd);
79 static int mds_cleanup(struct obd_device *obd);
81 /* Assumes caller has already pushed into the kernel filesystem context */
82 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
83 loff_t offset, int count)
85 struct ptlrpc_bulk_desc *desc;
86 struct obd_export *exp = req->rq_export;
87 struct l_wait_info lwi;
90 int rc = 0, npages, i, tmpcount, tmpsize = 0;
93 LASSERT((offset & ~CFS_PAGE_MASK) == 0); /* I'm dubious about this */
95 npages = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
96 OBD_ALLOC(pages, sizeof(*pages) * npages);
98 GOTO(out, rc = -ENOMEM);
100 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
103 GOTO(out_free, rc = -ENOMEM);
105 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
106 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
108 OBD_PAGE_ALLOC(pages[i], CFS_ALLOC_STD);
109 if (pages[i] == NULL)
110 GOTO(cleanup_buf, rc = -ENOMEM);
112 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
115 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
116 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
117 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
118 tmpsize, offset, file->f_dentry->d_inode->i_ino,
119 i_size_read(file->f_dentry->d_inode));
121 rc = fsfilt_readpage(exp->exp_obd, file,
122 kmap(pages[i]), tmpsize, &offset);
126 GOTO(cleanup_buf, rc = -EIO);
129 LASSERT(desc->bd_nob == count);
131 rc = ptlrpc_start_bulk_transfer(desc);
133 GOTO(cleanup_buf, rc);
135 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
136 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
137 OBD_FAIL_MDS_SENDPAGE, rc);
138 GOTO(abort_bulk, rc);
141 timeout = (int)req->rq_deadline - (int)cfs_time_current_sec();
143 CERROR("Req deadline already passed %lu (now: %lu)\n",
144 req->rq_deadline, cfs_time_current_sec());
146 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(max(timeout, 1)),
147 cfs_time_seconds(1), NULL, NULL);
148 rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc) ||
149 exp->exp_failed || exp->exp_abort_active_req, &lwi);
150 LASSERT (rc == 0 || rc == -ETIMEDOUT);
153 if (desc->bd_success &&
154 desc->bd_nob_transferred == count)
155 GOTO(cleanup_buf, rc);
157 if (exp->exp_abort_active_req || exp->exp_failed)
158 GOTO(abort_bulk, rc);
161 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
162 (rc == -ETIMEDOUT) ? "timeout" : "network error",
163 desc->bd_nob_transferred, count,
164 req->rq_export->exp_client_uuid.uuid,
165 req->rq_export->exp_connection->c_remote_uuid.uuid);
167 class_fail_export(req->rq_export);
171 ptlrpc_abort_bulk(desc);
173 for (i = 0; i < npages; i++)
175 OBD_PAGE_FREE(pages[i]);
177 ptlrpc_free_bulk(desc);
179 OBD_FREE(pages, sizeof(*pages) * npages);
184 /* only valid locked dentries or errors should be returned */
185 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
186 struct vfsmount **mnt, int lock_mode,
187 struct lustre_handle *lockh,
188 char *name, int namelen, __u64 lockpart)
190 struct mds_obd *mds = &obd->u.mds;
191 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
192 struct ldlm_res_id res_id = { .name = {0} };
193 int flags = LDLM_FL_ATOMIC_CB, rc;
194 ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
200 res_id.name[0] = de->d_inode->i_ino;
201 res_id.name[1] = de->d_inode->i_generation;
202 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id,
203 LDLM_IBITS, &policy, lock_mode, &flags,
204 ldlm_blocking_ast, ldlm_completion_ast,
205 NULL, NULL, 0, NULL, lockh);
206 if (rc != ELDLM_OK) {
208 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
214 /* Look up an entry by inode number. */
215 /* this function ONLY returns valid dget'd dentries with an initialized inode
217 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
218 struct vfsmount **mnt)
220 struct obd_device *obd = container_of(mds, struct obd_device, u.mds);
222 unsigned long ino = fid->id;
223 __u32 generation = fid->generation;
225 struct dentry *result;
228 RETURN(ERR_PTR(-ESTALE));
230 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
232 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino/gen %lu/%u, sb %p\n",
233 ino, generation, mds->mds_obt.obt_sb);
235 /* under ext3 this is neither supposed to return bad inodes
237 result = mds_lookup(obd, fid_name, mds->mds_fid_de, strlen(fid_name));
241 inode = result->d_inode;
243 RETURN(ERR_PTR(-ENOENT));
245 if (inode->i_nlink == 0) {
246 if (inode->i_mode == 0 &&
247 LTIME_S(inode->i_ctime) == 0 ) {
248 LCONSOLE_WARN("Found inode with zero nlink, mode and "
249 "ctime -- this may indicate disk"
250 "corruption (device %s, inode %lu, link:"
251 " %lu, count: %d)\n", obd->obd_name, inode->i_ino,
252 (unsigned long)inode->i_nlink,
253 atomic_read(&inode->i_count));
256 RETURN(ERR_PTR(-ENOENT));
259 if (generation && inode->i_generation != generation) {
260 /* we didn't find the right inode.. */
261 CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
262 "count: %d, generation %u/%u\n", inode->i_ino,
263 (unsigned long)inode->i_nlink,
264 atomic_read(&inode->i_count), inode->i_generation,
267 RETURN(ERR_PTR(-ENOENT));
271 *mnt = mds->mds_vfsmnt;
278 static int mds_connect_internal(struct obd_export *exp,
279 struct obd_connect_data *data)
281 struct obd_device *obd = exp->exp_obd;
283 data->ocd_connect_flags &= MDS_CONNECT_SUPPORTED;
284 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
286 /* If no known bits (which should not happen, probably,
287 as everybody should support LOOKUP and UPDATE bits at least)
288 revert to compat mode with plain locks. */
289 if (!data->ocd_ibits_known &&
290 data->ocd_connect_flags & OBD_CONNECT_IBITS)
291 data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
293 if (!obd->u.mds.mds_fl_acl)
294 data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
296 if (!obd->u.mds.mds_fl_user_xattr)
297 data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
299 exp->exp_connect_flags = data->ocd_connect_flags;
300 data->ocd_version = LUSTRE_VERSION_CODE;
301 exp->exp_mds_data.med_ibits_known = data->ocd_ibits_known;
304 if (obd->u.mds.mds_fl_acl &&
305 ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
306 CWARN("%s: MDS requires ACL support but client does not\n",
313 static int mds_reconnect(struct obd_export *exp, struct obd_device *obd,
314 struct obd_uuid *cluuid,
315 struct obd_connect_data *data,
321 if (exp == NULL || obd == NULL || cluuid == NULL)
324 rc = mds_connect_internal(exp, data);
326 mds_export_stats_init(obd, exp, 1, localdata);
331 /* Establish a connection to the MDS.
333 * This will set up an export structure for the client to hold state data
334 * about that client, like open files, the last operation number it did
335 * on the server, etc.
337 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
338 struct obd_uuid *cluuid, struct obd_connect_data *data,
341 struct obd_export *exp;
342 struct mds_export_data *med;
343 struct lsd_client_data *lcd = NULL;
347 if (!conn || !obd || !cluuid)
350 /* Check for aborted recovery. */
351 target_recovery_check_and_stop(obd);
353 /* XXX There is a small race between checking the list and adding a
354 * new connection for the same UUID, but the real threat (list
355 * corruption when multiple different clients connect) is solved.
357 * There is a second race between adding the export to the list,
358 * and filling in the client data below. Hence skipping the case
359 * of NULL lcd above. We should already be controlling multiple
360 * connects at the client, and we can't hold the spinlock over
361 * memory allocations without risk of deadlocking.
363 rc = class_connect(conn, obd, cluuid);
366 exp = class_conn2export(conn);
368 med = &exp->exp_mds_data;
370 rc = mds_connect_internal(exp, data);
376 GOTO(out, rc = -ENOMEM);
378 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
381 rc = mds_client_add(obd, exp, -1, localdata);
390 class_disconnect(exp);
391 /* release nid stat refererence */
392 lprocfs_exp_cleanup(exp);
394 class_export_put(exp);
400 int mds_init_export(struct obd_export *exp)
402 struct mds_export_data *med = &exp->exp_mds_data;
405 INIT_LIST_HEAD(&med->med_open_head);
406 spin_lock_init(&med->med_open_lock);
408 spin_lock(&exp->exp_lock);
409 exp->exp_connecting = 1;
410 spin_unlock(&exp->exp_lock);
412 RETURN(ldlm_init_export(exp));
415 static int mds_destroy_export(struct obd_export *exp)
419 target_destroy_export(exp);
420 ldlm_destroy_export(exp);
422 LASSERT(list_empty(&exp->exp_mds_data.med_open_head));
423 mds_client_free(exp);
428 static int mds_cleanup_mfd(struct obd_export *exp)
430 struct mds_export_data *med;
431 struct obd_device *obd = exp->exp_obd;
432 struct mds_obd *mds = &obd->u.mds;
433 struct lvfs_run_ctxt saved;
434 struct lov_mds_md *lmm;
435 __u32 lmm_sz, cookie_sz;
436 struct llog_cookie *logcookies;
437 struct list_head closing_list;
438 struct mds_file_data *mfd, *n;
442 med = &exp->exp_mds_data;
444 spin_lock(&med->med_open_lock);
445 if (list_empty(&med->med_open_head)) {
446 spin_unlock(&med->med_open_lock);
450 CFS_INIT_LIST_HEAD(&closing_list);
451 while (!list_empty(&med->med_open_head)) {
452 struct list_head *tmp = med->med_open_head.next;
453 struct mds_file_data *mfd =
454 list_entry(tmp, struct mds_file_data, mfd_list);
456 /* Remove mfd handle so it can't be found again.
457 * We are consuming the mfd_list reference here. */
458 mds_mfd_unlink(mfd, 0);
459 list_add_tail(&mfd->mfd_list, &closing_list);
461 spin_unlock(&med->med_open_lock);
463 lmm_sz = mds->mds_max_mdsize;
464 OBD_ALLOC(lmm, lmm_sz);
466 CWARN("%s: allocation failure during cleanup; can not force "
467 "close file handles on this service.\n", obd->obd_name);
468 GOTO(out, rc = -ENOMEM);
471 cookie_sz = mds->mds_max_cookiesize;
472 OBD_ALLOC(logcookies, cookie_sz);
473 if (logcookies == NULL) {
474 CWARN("%s: allocation failure during cleanup; can not force "
475 "close file handles on this service.\n", obd->obd_name);
476 OBD_FREE(lmm, lmm_sz);
477 GOTO(out, rc = -ENOMEM);
480 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
481 /* Close any open files (which may also cause orphan unlinking). */
482 list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
483 int lmm_size = lmm_sz;
484 umode_t mode = mfd->mfd_dentry->d_inode->i_mode;
487 /* If you change this message, be sure to update
488 * replay_single:test_46 */
489 CDEBUG(D_INODE|D_IOCTL, "%s: force closing file handle for "
490 "%.*s (ino %lu)\n", obd->obd_name,
491 mfd->mfd_dentry->d_name.len,mfd->mfd_dentry->d_name.name,
492 mfd->mfd_dentry->d_inode->i_ino);
494 rc = mds_get_md(obd, mfd->mfd_dentry->d_inode, lmm,
497 CWARN("mds_get_md failure, rc=%d\n", rc);
499 valid |= OBD_MD_FLEASIZE;
501 /* child orphan sem protects orphan_dec_test and
502 * is_orphan race, mds_mfd_close drops it */
503 MDS_DOWN_WRITE_ORPHAN_SEM(mfd->mfd_dentry->d_inode);
505 list_del_init(&mfd->mfd_list);
506 rc = mds_mfd_close(NULL, REQ_REC_OFF, obd, mfd,
507 !(exp->exp_flags & OBD_OPT_FAILOVER),
508 lmm, lmm_size, logcookies,
509 mds->mds_max_cookiesize,
513 CDEBUG(D_INODE|D_IOCTL, "Error closing file: %d\n", rc);
515 if (valid & OBD_MD_FLCOOKIE) {
516 rc = mds_osc_destroy_orphan(obd, mode, lmm,
517 lmm_size, logcookies, 1);
519 CDEBUG(D_INODE, "%s: destroy of orphan failed,"
520 " rc = %d\n", obd->obd_name, rc);
523 valid &= ~OBD_MD_FLCOOKIE;
527 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
528 OBD_FREE(logcookies, cookie_sz);
529 OBD_FREE(lmm, lmm_sz);
534 static int mds_disconnect(struct obd_export *exp)
540 class_export_get(exp);
542 /* Disconnect early so that clients can't keep using export */
543 rc = class_disconnect(exp);
544 if (exp->exp_obd->obd_namespace != NULL)
545 ldlm_cancel_locks_for_export(exp);
547 /* release nid stat refererence */
548 lprocfs_exp_cleanup(exp);
550 rc = mds_cleanup_mfd(exp);
552 class_export_put(exp);
556 static int mds_getstatus(struct ptlrpc_request *req)
558 struct mds_obd *mds = mds_req2mds(req);
559 struct mds_body *body;
560 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
563 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_PACK, req->rq_status = -ENOMEM);
564 rc = lustre_pack_reply(req, 2, size, NULL);
566 RETURN(req->rq_status = rc);
568 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
569 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
571 /* the last_committed and last_xid fields are filled in for all
572 * replies already - no need to do so here also.
577 /* get the LOV EA from @inode and store it into @md. It can be at most
578 * @size bytes, and @size is updated with the actual EA size.
579 * The EA size is also returned on success, and -ve errno on failure.
580 * If there is no EA then 0 is returned. */
581 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
582 int *size, int lock, int flags,
589 LOCK_INODE_MUTEX(inode);
590 rc = fsfilt_get_md(obd, inode, md, *size, "lov");
592 if (rc == 0 && flags == MDS_GETATTR)
593 rc = mds_get_default_md(obd, md, &lmm_size);
596 CERROR("Error %d reading eadata for ino %lu\n",
600 rc = mds_convert_lov_ea(obd, inode, md, lmm_size,
613 UNLOCK_INODE_MUTEX(inode);
619 /* Call with lock=1 if you want mds_pack_md to take the i_mutex.
620 * Call with lock=0 if the caller has already taken the i_mutex. */
621 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
622 struct mds_body *body, struct inode *inode, int lock, int flags,
625 struct mds_obd *mds = &obd->u.mds;
631 lmm = lustre_msg_buf(msg, offset, 0);
633 /* Some problem with getting eadata when I sized the reply
635 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
639 /* if this replay request we should be silencely exist without fill md*/
640 lmm_size = lustre_msg_buflen(msg, offset);
644 /* I don't really like this, but it is a sanity check on the client
645 * MD request. However, if the client doesn't know how much space
646 * to reserve for the MD, it shouldn't be bad to have too much space.
648 if (lmm_size > mds->mds_max_mdsize) {
649 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
650 inode->i_ino, lmm_size, mds->mds_max_mdsize);
654 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock, flags,
657 if (S_ISDIR(inode->i_mode))
658 body->valid |= OBD_MD_FLDIREA;
660 body->valid |= OBD_MD_FLEASIZE;
661 body->eadatasize = lmm_size;
668 #ifdef CONFIG_FS_POSIX_ACL
670 int mds_pack_posix_acl(struct inode *inode, struct lustre_msg *repmsg,
671 struct mds_body *repbody, int repoff)
673 struct dentry de = { .d_inode = inode };
677 LASSERT(repbody->aclsize == 0);
678 LASSERT(lustre_msg_bufcount(repmsg) > repoff);
680 buflen = lustre_msg_buflen(repmsg, repoff);
684 if (!inode->i_op || !inode->i_op->getxattr)
687 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
688 lustre_msg_buf(repmsg, repoff, buflen),
691 repbody->aclsize = rc;
692 } else if (rc != -ENODATA) {
693 CERROR("buflen %d, get acl: %d\n", buflen, rc);
698 repbody->valid |= OBD_MD_FLACL;
702 #define mds_pack_posix_acl(inode, repmsg, repbody, repoff) 0
705 int mds_pack_acl(struct mds_export_data *med, struct inode *inode,
706 struct lustre_msg *repmsg, struct mds_body *repbody,
709 return mds_pack_posix_acl(inode, repmsg, repbody, repoff);
712 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
713 struct ptlrpc_request *req,
714 struct mds_body *reqbody, int reply_off)
716 struct mds_body *body;
717 struct inode *inode = dentry->d_inode;
725 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
726 LASSERT(body != NULL); /* caller prepped reply */
728 body->flags = reqbody->flags; /* copy MDS_BFLAG_EXT_FLAGS if present */
729 mds_pack_inode2body(body, inode);
732 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
733 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
734 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR &&
735 ((S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))))
738 rc = mds_pack_md(obd, req->rq_repmsg, reply_off, body,
740 req->rq_export->exp_connect_flags);
742 /* If we have LOV EA data, the OST holds size, atime, mtime */
743 if (!(body->valid & OBD_MD_FLEASIZE) &&
744 !(body->valid & OBD_MD_FLDIREA))
745 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
746 OBD_MD_FLATIME | OBD_MD_FLMTIME);
749 } else if (S_ISLNK(inode->i_mode) &&
750 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
751 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off, 0);
754 LASSERT (symname != NULL); /* caller prepped reply */
755 len = lustre_msg_buflen(req->rq_repmsg, reply_off);
757 rc = inode->i_op->readlink(dentry, symname, len);
759 CERROR("readlink failed: %d\n", rc);
760 } else if (rc != len - 1) {
761 CERROR ("Unexpected readlink rc %d: expecting %d\n",
765 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
768 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
769 body->valid |= OBD_MD_LINKNAME;
770 body->eadatasize = rc + 1;
771 symname[rc] = 0; /* NULL terminate */
775 } else if (reqbody->valid == OBD_MD_FLFLAGS &&
776 reqbody->flags & MDS_BFLAG_EXT_FLAGS) {
779 /* We only return the full set of flags on ioctl, otherwise we
780 * get enough flags from the inode in mds_pack_inode2body(). */
781 rc = fsfilt_iocontrol(obd, dentry, FSFILT_IOC_GETFLAGS,
784 body->flags = flags | MDS_BFLAG_EXT_FLAGS;
787 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
788 struct mds_obd *mds = mds_req2mds(req);
789 body->max_cookiesize = mds->mds_max_cookiesize;
790 body->max_mdsize = mds->mds_max_mdsize;
791 body->valid |= OBD_MD_FLMODEASIZE;
797 #ifdef CONFIG_FS_POSIX_ACL
798 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
799 (reqbody->valid & OBD_MD_FLACL)) {
800 rc = mds_pack_acl(&req->rq_export->exp_mds_data,
801 inode, req->rq_repmsg,
812 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
815 struct mds_obd *mds = mds_req2mds(req);
816 struct mds_body *body;
817 int rc, bufcount = REPLY_REC_OFF + 1;
818 int size[4] = { sizeof(struct ptlrpc_body),
822 LASSERT(offset == REQ_REC_OFF); /* non-intent */
824 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
825 LASSERT(body != NULL); /* checked by caller */
826 LASSERT(lustre_req_swabbed(req, offset)); /* swabbed by caller */
828 if (body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) {
829 /* this will be shrinked to actual size before size */
830 if (S_ISREG(inode->i_mode) || (S_ISDIR(inode->i_mode)))
831 size[bufcount ++] = mds->mds_max_mdsize;
833 /* we not want LSM for specfial files */
834 body->valid &= ~(OBD_MD_FLEASIZE | OBD_MD_FLDIREA);
835 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
836 if (i_size_read(inode) > body->eadatasize)
837 CERROR("symlink size: %Lu, reply space: %d\n",
838 i_size_read(inode) + 1, body->eadatasize);
839 size[bufcount ++] = min_t(int, i_size_read(inode) + 1,
841 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
842 i_size_read(inode) + 1, body->eadatasize);
844 #ifdef CONFIG_FS_POSIX_ACL
845 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
846 (body->valid & OBD_MD_FLACL)) {
847 size[bufcount ++] = LUSTRE_POSIX_ACL_MAX_SIZE;
851 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
852 CERROR("failed MDS_GETATTR_PACK test\n");
853 req->rq_status = -ENOMEM;
857 rc = lustre_pack_reply(req, bufcount, size, NULL);
866 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
867 int child_part, struct lustre_handle *child_lockh)
869 struct obd_device *obd = req->rq_export->exp_obd;
870 struct mds_obd *mds = &obd->u.mds;
871 struct ldlm_reply *rep = NULL;
872 struct lvfs_run_ctxt saved;
873 struct mds_body *body;
874 struct dentry *dparent = NULL, *dchild = NULL;
875 struct lvfs_ucred uc = {NULL,};
876 struct lustre_handle parent_lockh;
878 int rc = 0, cleanup_phase = 0, resent_req = 0;
879 int rq_offset = offset;
883 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
884 LASSERT(offset == REQ_REC_OFF || offset == DLM_INTENT_REC_OFF);
885 /* if requests were at offset 2, the getattr reply goes back at 1 */
886 if (offset == DLM_INTENT_REC_OFF) {
887 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
889 offset = DLM_REPLY_REC_OFF;
892 /* Swab now, before anyone looks inside the request */
893 body = lustre_swab_reqbuf(req, rq_offset, sizeof(*body),
894 lustre_swab_mds_body);
896 CERROR("Can't swab mds_body\n");
897 GOTO(cleanup_exit, rc = -EFAULT);
900 lustre_set_req_swabbed(req, rq_offset + 1);
901 name = lustre_msg_string(req->rq_reqmsg, rq_offset + 1, 0);
903 CERROR("Can't unpack name\n");
904 GOTO(cleanup_exit, rc = -EFAULT);
906 namesize = lustre_msg_buflen(req->rq_reqmsg, rq_offset + 1);
907 /* namesize less than 2 means we have empty name, probably came from
908 revalidate by cfid, so no point in having name to be set */
912 rc = mds_init_ucred(&uc, req, rq_offset);
917 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
918 cleanup_phase = 1; /* kernel context */
919 ldlm_reply_set_disposition(rep, DISP_LOOKUP_EXECD);
921 /* FIXME: handle raw lookup */
923 if (body->valid == OBD_MD_FLID) {
924 struct mds_body *mds_reply;
925 int size = sizeof(*mds_reply);
927 // The user requested ONLY the inode number, so do a raw lookup
928 rc = lustre_pack_reply(req, 1, &size, NULL);
930 CERROR("out of memory\n");
934 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
936 mds_reply = lustre_msg_buf(req->rq_repmsg, offset,
938 mds_reply->fid1.id = inum;
939 mds_reply->valid = OBD_MD_FLID;
944 /* child_lockh() is only set in fixup_handle_for_resent_req()
945 * if MSG_RESENT is set */
946 if (lustre_handle_is_used(child_lockh)) {
947 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
951 if (resent_req == 0) {
953 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
954 rc = mds_get_parent_child_locked(obd, &obd->u.mds,
958 MDS_INODELOCK_UPDATE,
960 child_lockh, &dchild,
963 /* For revalidate by fid we always take UPDATE lock */
964 dchild = mds_fid2locked_dentry(obd, &body->fid2, NULL,
966 NULL, 0, child_part);
969 rc = PTR_ERR(dchild);
974 struct ldlm_lock *granted_lock;
975 struct ll_fid child_fid;
976 struct ldlm_resource *res;
977 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
978 granted_lock = ldlm_handle2lock(child_lockh);
979 /* lock was granted in fixup_handle_for_resent_req() if
980 * MSG_RESENT is set */
981 LASSERTF(granted_lock != NULL, LPU64"/%u lockh "LPX64"\n",
982 body->fid1.id, body->fid1.generation,
983 child_lockh->cookie);
986 res = granted_lock->l_resource;
987 child_fid.id = res->lr_name.name[0];
988 child_fid.generation = res->lr_name.name[1];
989 dchild = mds_fid2dentry(&obd->u.mds, &child_fid, NULL);
990 if (IS_ERR(dchild)) {
991 rc = PTR_ERR(dchild);
992 LCONSOLE_WARN("Child "LPU64"/%u lookup error %d.",
993 child_fid.id, child_fid.generation, rc);
996 LDLM_LOCK_PUT(granted_lock);
999 cleanup_phase = 2; /* dchild, dparent, locks */
1001 if (dchild->d_inode == NULL) {
1002 ldlm_reply_set_disposition(rep, DISP_LOOKUP_NEG);
1003 /* in the intent case, the policy clears this error:
1004 the disposition is enough */
1005 GOTO(cleanup, rc = -ENOENT);
1007 ldlm_reply_set_disposition(rep, DISP_LOOKUP_POS);
1010 if (req->rq_repmsg == NULL) {
1011 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
1013 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1018 rc = mds_getattr_internal(obd, dchild, req, body, offset);
1019 GOTO(cleanup, rc); /* returns the lock to the client */
1022 switch (cleanup_phase) {
1024 if (resent_req == 0) {
1025 if (rc && dchild->d_inode)
1026 ldlm_lock_decref(child_lockh, LCK_CR);
1028 ldlm_lock_decref(&parent_lockh, LCK_CR);
1034 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1036 mds_exit_ucred(&uc, mds);
1037 if (!req->rq_packed_final) {
1038 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1041 req->rq_status = rc;
1048 static int mds_getattr(struct ptlrpc_request *req, int offset)
1050 struct mds_obd *mds = mds_req2mds(req);
1051 struct obd_device *obd = req->rq_export->exp_obd;
1052 struct lvfs_run_ctxt saved;
1054 struct mds_body *body;
1055 struct lvfs_ucred uc = { NULL, };
1059 OBD_COUNTER_INCREMENT(obd, getattr);
1061 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1062 lustre_swab_mds_body);
1064 GOTO(cleanup_exit, rc = -EFAULT);
1066 rc = mds_init_ucred(&uc, req, offset);
1068 GOTO(out_ucred, rc);
1070 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1071 de = mds_fid2dentry(mds, &body->fid1, NULL);
1073 req->rq_status = PTR_ERR(de);
1077 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1079 CERROR("mds_getattr_pack_msg: %d\n", rc);
1083 req->rq_status = mds_getattr_internal(obd, de, req, body,REPLY_REC_OFF);
1088 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1090 if (!req->rq_packed_final) {
1091 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1095 req->rq_status = rc;
1097 mds_shrink_body_reply(req, offset, REPLY_REC_OFF);
1099 mds_exit_ucred(&uc, mds);
1105 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1106 __u64 max_age, __u32 flags)
1110 spin_lock(&obd->obd_osfs_lock);
1111 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
1113 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1114 spin_unlock(&obd->obd_osfs_lock);
1119 static int mds_statfs(struct ptlrpc_request *req)
1121 struct obd_device *obd = req->rq_export->exp_obd;
1122 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
1123 int rc, size[2] = { sizeof(struct ptlrpc_body),
1124 sizeof(struct obd_statfs) };
1127 /* This will trigger a watchdog timeout */
1128 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1129 (MDS_SERVICE_WATCHDOG_FACTOR *
1130 at_get(&svc->srv_at_estimate)) + 1);
1131 OBD_COUNTER_INCREMENT(obd, statfs);
1133 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
1134 GOTO(out, rc = -ENOMEM);
1135 rc = lustre_pack_reply(req, 2, size, NULL);
1139 /* We call this so that we can cache a bit - 1 jiffie worth */
1140 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1141 size[REPLY_REC_OFF]),
1142 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), 0);
1144 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1150 req->rq_status = rc;
1154 static int mds_sync(struct ptlrpc_request *req, int offset)
1156 struct obd_device *obd = req->rq_export->exp_obd;
1157 struct mds_obd *mds = &obd->u.mds;
1158 struct mds_body *body;
1159 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
1162 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1163 lustre_swab_mds_body);
1165 GOTO(out, rc = -EFAULT);
1167 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1168 GOTO(out, rc = -ENOMEM);
1169 rc = lustre_pack_reply(req, 2, size, NULL);
1173 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
1174 if (rc == 0 && body->fid1.id != 0) {
1177 de = mds_fid2dentry(mds, &body->fid1, NULL);
1179 GOTO(out, rc = PTR_ERR(de));
1181 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1183 mds_pack_inode2body(body, de->d_inode);
1189 req->rq_status = rc;
1193 /* mds_readpage does not take a DLM lock on the inode, because the client must
1194 * already have a PR lock.
1196 * If we were to take another one here, a deadlock will result, if another
1197 * thread is already waiting for a PW lock. */
1198 static int mds_readpage(struct ptlrpc_request *req, int offset)
1200 struct obd_device *obd = req->rq_export->exp_obd;
1201 struct mds_obd *mds = &obd->u.mds;
1202 struct vfsmount *mnt;
1205 struct mds_body *body, *repbody;
1206 struct lvfs_run_ctxt saved;
1207 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) };
1208 struct lvfs_ucred uc = {NULL,};
1211 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_PACK, -ENOMEM);
1212 rc = lustre_pack_reply(req, 2, size, NULL);
1216 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1217 lustre_swab_mds_body);
1219 GOTO (out, rc = -EFAULT);
1221 rc = mds_init_ucred(&uc, req, offset);
1225 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1226 de = mds_fid2dentry(&obd->u.mds, &body->fid1, &mnt);
1228 GOTO(out_pop, rc = PTR_ERR(de));
1230 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1232 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1233 /* note: in case of an error, dentry_open puts dentry */
1235 GOTO(out_pop, rc = PTR_ERR(file));
1237 /* body->size is actually the offset -eeb */
1238 if ((body->size & (de->d_inode->i_sb->s_blocksize - 1)) != 0) {
1239 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1240 body->size, de->d_inode->i_sb->s_blocksize);
1241 GOTO(out_file, rc = -EFAULT);
1244 /* body->nlink is actually the #bytes to read -eeb */
1245 if (body->nlink & (de->d_inode->i_sb->s_blocksize - 1)) {
1246 CERROR("size %u is not multiple of blocksize %lu\n",
1247 body->nlink, de->d_inode->i_sb->s_blocksize);
1248 GOTO(out_file, rc = -EFAULT);
1251 repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1253 repbody->size = i_size_read(file->f_dentry->d_inode);
1254 repbody->valid = OBD_MD_FLSIZE;
1256 /* to make this asynchronous make sure that the handling function
1257 doesn't send a reply when this function completes. Instead a
1258 callback function would send the reply */
1259 /* body->size is actually the offset -eeb */
1260 rc = mds_sendpage(req, file, body->size, body->nlink);
1263 filp_close(file, 0);
1265 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1267 mds_exit_ucred(&uc, mds);
1268 req->rq_status = rc;
1272 int mds_reint(struct ptlrpc_request *req, int offset,
1273 struct lustre_handle *lockh)
1275 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1278 OBD_ALLOC(rec, sizeof(*rec));
1282 rc = mds_update_unpack(req, offset, rec);
1283 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1284 CERROR("invalid record\n");
1285 GOTO(out, req->rq_status = -EINVAL);
1288 mds_root_squash(&req->rq_export->exp_obd->u.mds, &req->rq_peer.nid,
1289 &rec->ur_uc.luc_fsuid, &rec->ur_uc.luc_fsgid,
1290 &rec->ur_uc.luc_cap, &rec->ur_uc.luc_suppgid1,
1291 &rec->ur_uc.luc_suppgid2);
1293 /* rc will be used to interrupt a for loop over multiple records */
1294 rc = mds_reint_rec(rec, offset, req, lockh);
1296 OBD_FREE(rec, sizeof(*rec));
1300 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1301 struct obd_device *obd, int *process)
1303 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1304 case MDS_CONNECT: /* This will never get here, but for completeness. */
1305 case OST_CONNECT: /* This will never get here, but for completeness. */
1306 case MDS_DISCONNECT:
1307 case OST_DISCONNECT:
1312 case MDS_SYNC: /* used in unmounting */
1317 *process = target_queue_recovery_request(req, obd);
1321 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1323 /* XXX what should we set rq_status to here? */
1324 req->rq_status = -EAGAIN;
1325 RETURN(ptlrpc_error(req));
1329 static char *reint_names[] = {
1330 [REINT_SETATTR] "setattr",
1331 [REINT_CREATE] "create",
1332 [REINT_LINK] "link",
1333 [REINT_UNLINK] "unlink",
1334 [REINT_RENAME] "rename",
1335 [REINT_OPEN] "open",
1338 static int mds_set_info_rpc(struct obd_export *exp, struct ptlrpc_request *req)
1341 int keylen, vallen, rc = 0;
1344 key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1);
1346 DEBUG_REQ(D_HA, req, "no set_info key");
1349 keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF);
1351 val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, 0);
1352 vallen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1);
1354 rc = lustre_pack_reply(req, 1, NULL, NULL);
1358 lustre_msg_set_status(req->rq_repmsg, 0);
1360 /* Accept the broken "read-only" key from 1.6.6 clients. b=17493 */
1361 if (KEY_IS(KEY_READONLY) || KEY_IS(KEY_READONLY_166COMPAT)) {
1362 if (val == NULL || vallen < sizeof(__u32)) {
1363 DEBUG_REQ(D_HA, req, "no set_info val");
1368 exp->exp_connect_flags |= OBD_CONNECT_RDONLY;
1370 exp->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1378 #ifdef HAVE_QUOTA_SUPPORT
1379 static int mds_handle_quotacheck(struct ptlrpc_request *req)
1381 struct obd_quotactl *oqctl;
1385 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1386 lustre_swab_obd_quotactl);
1390 rc = lustre_pack_reply(req, 1, NULL, NULL);
1394 req->rq_status = obd_quotacheck(req->rq_export, oqctl);
1398 static int mds_handle_quotactl(struct ptlrpc_request *req)
1400 struct obd_quotactl *oqctl, *repoqc;
1401 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repoqc) };
1404 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1405 lustre_swab_obd_quotactl);
1409 rc = lustre_pack_reply(req, 2, size, NULL);
1413 repoqc = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*repoqc));
1415 req->rq_status = obd_quotactl(req->rq_export, oqctl);
1421 static int mds_msg_check_version(struct lustre_msg *msg)
1425 switch (lustre_msg_get_opc(msg)) {
1427 case MDS_DISCONNECT:
1429 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
1431 CERROR("bad opc %u version %08x, expecting %08x\n",
1432 lustre_msg_get_opc(msg),
1433 lustre_msg_get_version(msg),
1434 LUSTRE_OBD_VERSION);
1438 case MDS_GETATTR_NAME:
1443 case MDS_DONE_WRITING:
1449 case MDS_QUOTACHECK:
1453 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
1455 CERROR("bad opc %u version %08x, expecting %08x\n",
1456 lustre_msg_get_opc(msg),
1457 lustre_msg_get_version(msg),
1458 LUSTRE_MDS_VERSION);
1462 case LDLM_BL_CALLBACK:
1463 case LDLM_CP_CALLBACK:
1464 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1466 CERROR("bad opc %u version %08x, expecting %08x\n",
1467 lustre_msg_get_opc(msg),
1468 lustre_msg_get_version(msg),
1469 LUSTRE_DLM_VERSION);
1471 case OBD_LOG_CANCEL:
1472 case LLOG_ORIGIN_HANDLE_CREATE:
1473 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1474 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1475 case LLOG_ORIGIN_HANDLE_CLOSE:
1476 case LLOG_ORIGIN_HANDLE_DESTROY:
1477 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1479 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1481 CERROR("bad opc %u version %08x, expecting %08x\n",
1482 lustre_msg_get_opc(msg),
1483 lustre_msg_get_version(msg),
1484 LUSTRE_LOG_VERSION);
1487 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
1493 int mds_handle(struct ptlrpc_request *req)
1495 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
1497 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1498 struct obd_device *obd = NULL;
1501 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1503 LASSERT(current->journal_info == NULL);
1505 rc = mds_msg_check_version(req->rq_reqmsg);
1507 CERROR("MDS drop mal-formed request\n");
1511 if (lustre_msg_get_opc(req->rq_reqmsg) != MDS_CONNECT) {
1512 struct mds_export_data *med;
1515 if (!class_connected_export(req->rq_export)) {
1516 CERROR("operation %d on unconnected MDS from %s\n",
1517 lustre_msg_get_opc(req->rq_reqmsg),
1518 libcfs_id2str(req->rq_peer));
1519 req->rq_status = -ENOTCONN;
1520 GOTO(out, rc = -ENOTCONN);
1523 med = &req->rq_export->exp_mds_data;
1524 obd = req->rq_export->exp_obd;
1527 /* sanity check: if the xid matches, the request must
1528 * be marked as a resent or replayed */
1529 if (req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_xid) ||
1530 req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_close_xid))
1531 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
1532 (MSG_RESENT | MSG_REPLAY))) {
1533 CERROR("rq_xid "LPU64" matches last_xid, "
1534 "expected RESENT flag\n",
1536 req->rq_status = -ENOTCONN;
1537 GOTO(out, rc = -EFAULT);
1539 /* else: note the opposite is not always true; a
1540 * RESENT req after a failover will usually not match
1541 * the last_xid, since it was likely never
1542 * committed. A REPLAYed request will almost never
1543 * match the last xid, however it could for a
1544 * committed, but still retained, open. */
1546 /* Check for aborted recovery. */
1547 spin_lock_bh(&obd->obd_processing_task_lock);
1548 recovering = obd->obd_recovering;
1549 spin_unlock_bh(&obd->obd_processing_task_lock);
1551 target_recovery_check_and_stop(obd) == 0) {
1552 rc = mds_filter_recovery_request(req, obd,
1554 if (rc || !should_process)
1559 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1561 DEBUG_REQ(D_INODE, req, "connect");
1562 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1563 rc = target_handle_connect(req, mds_handle);
1565 /* Now that we have an export, set obd. */
1566 obd = req->rq_export->exp_obd;
1570 case MDS_DISCONNECT:
1571 DEBUG_REQ(D_INODE, req, "disconnect");
1572 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1573 rc = target_handle_disconnect(req);
1574 req->rq_status = rc; /* superfluous? */
1578 DEBUG_REQ(D_INODE, req, "getstatus");
1579 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1580 rc = mds_getstatus(req);
1584 DEBUG_REQ(D_INODE, req, "getattr");
1585 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1586 rc = mds_getattr(req, REQ_REC_OFF);
1590 DEBUG_REQ(D_INODE, req, "setxattr");
1591 OBD_FAIL_RETURN(OBD_FAIL_MDS_SETXATTR_NET, 0);
1592 rc = mds_setxattr(req);
1596 DEBUG_REQ(D_INODE, req, "getxattr");
1597 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETXATTR_NET, 0);
1598 rc = mds_getxattr(req);
1601 case MDS_GETATTR_NAME: {
1602 struct lustre_handle lockh = { 0 };
1603 DEBUG_REQ(D_INODE, req, "getattr_name");
1604 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1606 /* If this request gets a reconstructed reply, we won't be
1607 * acquiring any new locks in mds_getattr_lock, so we don't
1610 rc = mds_getattr_lock(req, REQ_REC_OFF, MDS_INODELOCK_UPDATE,
1612 mds_shrink_body_reply(req, REQ_REC_OFF, REPLY_REC_OFF);
1613 /* this non-intent call (from an ioctl) is special */
1614 req->rq_status = rc;
1615 if (rc == 0 && lustre_handle_is_used(&lockh))
1616 ldlm_lock_decref(&lockh, LCK_CR);
1620 DEBUG_REQ(D_INODE, req, "statfs");
1621 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1622 rc = mds_statfs(req);
1626 DEBUG_REQ(D_INODE, req, "readpage");
1627 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1628 rc = mds_readpage(req, REQ_REC_OFF);
1630 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
1637 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF,
1641 int size[4] = { sizeof(struct ptlrpc_body),
1642 sizeof(struct mds_body),
1643 mds->mds_max_mdsize,
1644 mds->mds_max_cookiesize };
1647 /* NB only peek inside req now; mds_reint() will swab it */
1649 CERROR ("Can't inspect opcode\n");
1654 if (lustre_req_need_swab(req))
1657 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1658 (opc < REINT_MAX) ? reint_names[opc] :
1663 op = PTLRPC_LAST_CNTR + MDS_REINT_CREATE;
1666 op = PTLRPC_LAST_CNTR + MDS_REINT_LINK;
1669 op = PTLRPC_LAST_CNTR + MDS_REINT_OPEN;
1672 op = PTLRPC_LAST_CNTR + MDS_REINT_SETATTR;
1675 op = PTLRPC_LAST_CNTR + MDS_REINT_RENAME;
1678 op = PTLRPC_LAST_CNTR + MDS_REINT_UNLINK;
1685 if (op && req->rq_rqbd->rqbd_service->srv_stats)
1686 lprocfs_counter_incr(
1687 req->rq_rqbd->rqbd_service->srv_stats, op);
1689 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1691 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1693 else if (opc == REINT_OPEN)
1698 /* if we do recovery we isn't send reply mds state is restored */
1699 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
1700 size[DLM_REPLY_REC_OFF] = 0;
1701 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1702 size[DLM_REPLY_REC_OFF + 1] = 0;
1705 rc = lustre_pack_reply(req, bufcount, size, NULL);
1709 rc = mds_reint(req, REQ_REC_OFF, NULL);
1710 mds_shrink_intent_reply(req, opc, REPLY_REC_OFF);
1711 fail = OBD_FAIL_MDS_REINT_NET_REP;
1716 DEBUG_REQ(D_INODE, req, "close");
1717 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1718 rc = mds_close(req, REQ_REC_OFF);
1719 mds_shrink_body_reply(req, REQ_REC_OFF, REPLY_REC_OFF);
1720 fail = OBD_FAIL_MDS_CLOSE_NET_REP;
1723 case MDS_DONE_WRITING:
1724 DEBUG_REQ(D_INODE, req, "done_writing");
1725 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
1726 rc = mds_done_writing(req, REQ_REC_OFF);
1730 DEBUG_REQ(D_INODE, req, "pin");
1731 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1732 rc = mds_pin(req, REQ_REC_OFF);
1736 DEBUG_REQ(D_INODE, req, "sync");
1737 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
1738 rc = mds_sync(req, REQ_REC_OFF);
1742 DEBUG_REQ(D_INODE, req, "set_info");
1743 rc = mds_set_info_rpc(req->rq_export, req);
1745 #ifdef HAVE_QUOTA_SUPPORT
1746 case MDS_QUOTACHECK:
1747 DEBUG_REQ(D_INODE, req, "quotacheck");
1748 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACHECK_NET, 0);
1749 rc = mds_handle_quotacheck(req);
1753 DEBUG_REQ(D_INODE, req, "quotactl");
1754 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACTL_NET, 0);
1755 rc = mds_handle_quotactl(req);
1759 DEBUG_REQ(D_INODE, req, "ping");
1760 rc = target_handle_ping(req);
1761 if (req->rq_export->exp_delayed)
1762 mds_update_client_epoch(req->rq_export);
1765 case OBD_LOG_CANCEL:
1766 CDEBUG(D_INODE, "log cancel\n");
1767 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1768 rc = -ENOTSUPP; /* la la la */
1772 DEBUG_REQ(D_INODE, req, "enqueue");
1773 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1774 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1775 ldlm_server_blocking_ast, NULL);
1776 fail = OBD_FAIL_LDLM_REPLY;
1779 DEBUG_REQ(D_INODE, req, "convert");
1780 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1781 rc = ldlm_handle_convert(req);
1783 case LDLM_BL_CALLBACK:
1784 case LDLM_CP_CALLBACK:
1785 DEBUG_REQ(D_INODE, req, "callback");
1786 CERROR("callbacks should not happen on MDS\n");
1788 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1790 case LLOG_ORIGIN_HANDLE_CREATE:
1791 DEBUG_REQ(D_INODE, req, "llog_init");
1792 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1793 rc = llog_origin_handle_create(req);
1795 case LLOG_ORIGIN_HANDLE_DESTROY:
1796 DEBUG_REQ(D_INODE, req, "llog_init");
1797 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1798 rc = llog_origin_handle_destroy(req);
1800 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1801 DEBUG_REQ(D_INODE, req, "llog next block");
1802 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1803 rc = llog_origin_handle_next_block(req);
1805 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1806 DEBUG_REQ(D_INODE, req, "llog prev block");
1807 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1808 rc = llog_origin_handle_prev_block(req);
1810 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1811 DEBUG_REQ(D_INODE, req, "llog read header");
1812 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1813 rc = llog_origin_handle_read_header(req);
1815 case LLOG_ORIGIN_HANDLE_CLOSE:
1816 DEBUG_REQ(D_INODE, req, "llog close");
1817 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1818 rc = llog_origin_handle_close(req);
1821 DEBUG_REQ(D_INODE, req, "llog catinfo");
1822 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1823 rc = llog_catinfo(req);
1826 req->rq_status = -ENOTSUPP;
1827 rc = ptlrpc_error(req);
1831 LASSERT(current->journal_info == NULL);
1833 /* If we're DISCONNECTing, the mds_export_data is already freed */
1834 if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != MDS_DISCONNECT) {
1835 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1837 /* I don't think last_xid is used for anyway, so I'm not sure
1838 if we need to care about last_close_xid here.*/
1839 lustre_msg_set_last_xid(req->rq_repmsg,
1840 le64_to_cpu(med->med_lcd->lcd_last_xid));
1841 target_committed_to_req(req);
1846 return target_handle_reply(req, rc, fail);
1849 /* Update the server data on disk. This stores the new mount_count and
1850 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1851 * then the server last_rcvd value may be less than that of the clients.
1852 * This will alert us that we may need to do client recovery.
1854 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1856 int mds_update_server_data(struct obd_device *obd, int force_sync)
1858 struct mds_obd *mds = &obd->u.mds;
1859 struct lr_server_data *lsd = mds->mds_server_data;
1860 struct file *filp = mds->mds_rcvd_filp;
1861 struct lvfs_run_ctxt saved;
1866 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1867 mds->mds_mount_count, mds->mds_last_transno);
1869 spin_lock(&mds->mds_transno_lock);
1870 lsd->lsd_last_transno = cpu_to_le64(mds->mds_last_transno);
1871 spin_unlock(&mds->mds_transno_lock);
1873 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1874 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off,force_sync);
1875 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1877 CERROR("error writing MDS server data: rc = %d\n", rc);
1882 static void fsoptions_to_mds_flags(struct mds_obd *mds, char *options)
1892 while (*p && *p != ',')
1896 if (len == sizeof("user_xattr") - 1 &&
1897 memcmp(options, "user_xattr", len) == 0) {
1898 mds->mds_fl_user_xattr = 1;
1899 LCONSOLE_INFO("Enabling user_xattr\n");
1900 } else if (len == sizeof("nouser_xattr") - 1 &&
1901 memcmp(options, "nouser_xattr", len) == 0) {
1902 mds->mds_fl_user_xattr = 0;
1903 LCONSOLE_INFO("Disabling user_xattr\n");
1904 } else if (len == sizeof("acl") - 1 &&
1905 memcmp(options, "acl", len) == 0) {
1906 #ifdef CONFIG_FS_POSIX_ACL
1907 mds->mds_fl_acl = 1;
1908 LCONSOLE_INFO("Enabling ACL\n");
1910 CWARN("ignoring unsupported acl mount option\n");
1912 } else if (len == sizeof("noacl") - 1 &&
1913 memcmp(options, "noacl", len) == 0) {
1914 #ifdef CONFIG_FS_POSIX_ACL
1915 mds->mds_fl_acl = 0;
1916 LCONSOLE_INFO("Disabling ACL\n");
1924 /* mount the file system (secretly). lustre_cfg parameters are:
1930 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1932 struct lprocfs_static_vars lvars;
1933 struct lustre_cfg* lcfg = buf;
1934 struct mds_obd *mds = &obd->u.mds;
1935 struct lustre_sb_info *lsi;
1936 struct lustre_mount_info *lmi;
1937 struct vfsmount *mnt;
1938 struct obd_uuid uuid;
1945 /* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv */
1947 CLASSERT(offsetof(struct obd_device, u.obt) ==
1948 offsetof(struct obd_device, u.mds.mds_obt));
1950 if (lcfg->lcfg_bufcount < 3)
1953 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
1956 lmi = server_get_mount(obd->obd_name);
1958 CERROR("Not mounted in lustre_fill_super?\n");
1962 /* We mounted in lustre_fill_super.
1963 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1964 lsi = s2lsi(lmi->lmi_sb);
1965 fsoptions_to_mds_flags(mds, lsi->lsi_ldd->ldd_mount_opts);
1966 fsoptions_to_mds_flags(mds, lsi->lsi_lmd->lmd_opts);
1968 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
1969 if (IS_ERR(obd->obd_fsops))
1970 GOTO(err_put, rc = PTR_ERR(obd->obd_fsops));
1972 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
1974 if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
1975 CERROR("%s: Underlying device is marked as read-only. "
1976 "Setup failed\n", obd->obd_name);
1977 GOTO(err_ops, rc = -EROFS);
1980 sema_init(&mds->mds_epoch_sem, 1);
1981 spin_lock_init(&mds->mds_transno_lock);
1982 mds->mds_max_mdsize = sizeof(struct lov_mds_md_v3);
1983 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
1984 mds->mds_atime_diff = MAX_ATIME_DIFF;
1985 mds->mds_evict_ost_nids = 1;
1986 /* sync permission changes */
1987 mds->mds_sync_permission = 0;
1989 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
1990 obd->obd_namespace = ldlm_namespace_new(obd, ns_name, LDLM_NAMESPACE_SERVER,
1991 LDLM_NAMESPACE_GREEDY);
1992 if (obd->obd_namespace == NULL) {
1994 GOTO(err_ops, rc = -ENOMEM);
1996 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
1998 lprocfs_mds_init_vars(&lvars);
1999 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
2000 lprocfs_alloc_obd_stats(obd, LPROC_MDS_LAST) == 0) {
2001 /* Init private stats here */
2002 mds_stats_counter_init(obd->obd_stats);
2003 #ifdef HAVE_DELAYED_RECOVERY
2004 lprocfs_obd_attach_stale_exports(obd);
2006 obd->obd_proc_exports_entry = proc_mkdir("exports",
2007 obd->obd_proc_entry);
2010 rc = mds_fs_setup(obd, mnt);
2012 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
2017 if (obd->obd_proc_exports_entry)
2018 lprocfs_add_simple(obd->obd_proc_exports_entry,
2019 "clear", lprocfs_nid_stats_clear_read,
2020 lprocfs_nid_stats_clear_write, obd, NULL);
2022 if (lcfg->lcfg_bufcount >= 4 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
2025 ll_generate_random_uuid(uuid);
2026 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
2028 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
2029 if (mds->mds_profile == NULL)
2030 GOTO(err_fs, rc = -ENOMEM);
2032 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
2033 LUSTRE_CFG_BUFLEN(lcfg, 3));
2036 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2037 "mds_ldlm_client", &obd->obd_ldlm_client);
2038 obd->obd_replayable = 1;
2040 rc = lquota_setup(mds_quota_interface_ref, obd);
2044 mds->mds_group_hash = upcall_cache_init(obd->obd_name);
2045 if (IS_ERR(mds->mds_group_hash)) {
2046 rc = PTR_ERR(mds->mds_group_hash);
2047 mds->mds_group_hash = NULL;
2048 GOTO(err_qctxt, rc);
2051 /* Don't wait for mds_postrecov trying to clear orphans */
2052 obd->obd_async_recov = 1;
2053 rc = mds_postsetup(obd);
2054 /* Bug 11557 - allow async abort_recov start
2055 FIXME can remove most of this obd_async_recov plumbing
2056 obd->obd_async_recov = 0;
2059 GOTO(err_qctxt, rc);
2061 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2062 if (uuid_ptr != NULL) {
2063 class_uuid_unparse(uuid_ptr, &uuid);
2069 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2070 if (obd->obd_recovering) {
2071 LCONSOLE_WARN("MDT %s now serving %s (%s%s%s), but will be in "
2072 "recovery for at least %d:%.02d, or until %d "
2073 "client%s reconnect%s. \n",
2074 obd->obd_name, lustre_cfg_string(lcfg, 1),
2075 label ?: "", label ? "/" : "", str,
2076 obd->obd_recovery_timeout / 60,
2077 obd->obd_recovery_timeout % 60,
2078 obd->obd_recoverable_clients,
2079 (obd->obd_recoverable_clients == 1) ? "":"s",
2080 (obd->obd_recoverable_clients == 1) ? "s":"");
2082 LCONSOLE_INFO("MDT %s now serving %s (%s%s%s) with recovery "
2083 "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
2084 label ?: "", label ? "/" : "", str,
2085 obd->obd_replayable ? "enabled" : "disabled");
2088 /* Reduce the initial timeout on an MDS because it doesn't need such
2089 * a long timeout as an OST does. Adaptive timeouts will adjust this
2090 * value appropriately. */
2091 if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
2092 ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT;
2097 lquota_cleanup(mds_quota_interface_ref, obd);
2099 /* No extra cleanup needed for llog_init_commit_thread() */
2100 mds_fs_cleanup(obd);
2101 upcall_cache_cleanup(mds->mds_group_hash);
2102 mds->mds_group_hash = NULL;
2103 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2105 lprocfs_free_per_client_stats(obd);
2106 lprocfs_free_obd_stats(obd);
2107 lprocfs_obd_cleanup(obd);
2108 ldlm_namespace_free(obd->obd_namespace, NULL, 0);
2109 obd->obd_namespace = NULL;
2111 fsfilt_put_ops(obd->obd_fsops);
2113 server_put_mount(obd->obd_name, mnt);
2114 obd->u.obt.obt_sb = NULL;
2118 static int mds_lov_clean(struct obd_device *obd)
2120 struct mds_obd *mds = &obd->u.mds;
2121 struct obd_device *lov = mds->mds_lov_obd;
2124 if (mds->mds_profile) {
2125 class_del_profile(mds->mds_profile);
2126 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
2127 mds->mds_profile = NULL;
2130 /* There better be a lov */
2134 RETURN(PTR_ERR(lov));
2136 obd_register_observer(lov, NULL);
2138 /* Give lov our same shutdown flags */
2139 lov->obd_force = obd->obd_force;
2140 lov->obd_fail = obd->obd_fail;
2142 /* Cleanup the lov */
2143 obd_disconnect(mds->mds_lov_exp);
2144 class_manual_cleanup(lov);
2145 mds->mds_lov_exp = NULL;
2150 static int mds_postsetup(struct obd_device *obd)
2152 struct mds_obd *mds = &obd->u.mds;
2153 struct llog_ctxt *ctxt;
2157 rc = llog_setup(obd, LLOG_CONFIG_ORIG_CTXT, obd, 0, NULL,
2162 rc = llog_setup(obd, LLOG_LOVEA_ORIG_CTXT, obd, 0, NULL,
2167 if (mds->mds_profile) {
2168 struct lustre_profile *lprof;
2169 /* The profile defines which osc and mdc to connect to, for a
2170 client. We reuse that here to figure out the name of the
2171 lov to use (and ignore lprof->lp_mdc).
2172 The profile was set in the config log with
2173 LCFG_MOUNTOPT profilenm oscnm mdcnm */
2174 lprof = class_get_profile(mds->mds_profile);
2175 if (lprof == NULL) {
2176 CERROR("No profile found: %s\n", mds->mds_profile);
2177 GOTO(err_cleanup, rc = -ENOENT);
2179 rc = mds_lov_connect(obd, lprof->lp_osc);
2181 GOTO(err_cleanup, rc);
2188 ctxt = llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT);
2192 ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
2198 int mds_postrecov(struct obd_device *obd)
2206 LASSERT(!obd->obd_recovering);
2208 /* VBR: update boot epoch after recovery */
2209 mds_update_last_epoch(obd);
2211 /* clean PENDING dir */
2212 rc = mds_cleanup_pending(obd);
2215 /* FIXME Does target_finish_recovery really need this to block? */
2216 /* Notify the LOV, which will in turn call mds_notify for each tgt */
2217 /* This means that we have to hack obd_notify to think we're obd_set_up
2218 during mds_lov_connect. */
2219 obd_notify(obd->u.mds.mds_lov_obd, NULL,
2220 obd->obd_async_recov ? OBD_NOTIFY_SYNC_NONBLOCK :
2221 OBD_NOTIFY_SYNC, NULL);
2223 /* quota recovery */
2224 if (likely(obd->obd_stopping == 0))
2225 lquota_recovery(mds_quota_interface_ref, obd);
2231 /* We need to be able to stop an mds_lov_synchronize */
2232 static int mds_lov_early_clean(struct obd_device *obd)
2234 struct mds_obd *mds = &obd->u.mds;
2235 struct obd_device *lov = mds->mds_lov_obd;
2237 if (!lov || (!obd->obd_force && !obd->obd_fail))
2240 CDEBUG(D_HA, "abort inflight\n");
2241 return (obd_precleanup(lov, OBD_CLEANUP_EARLY));
2244 static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2250 case OBD_CLEANUP_EARLY:
2252 case OBD_CLEANUP_EXPORTS:
2253 target_cleanup_recovery(obd);
2254 mds_lov_early_clean(obd);
2256 case OBD_CLEANUP_SELF_EXP:
2257 mds_lov_disconnect(obd);
2259 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2260 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2261 rc = obd_llog_finish(obd, 0);
2263 case OBD_CLEANUP_OBD:
2269 static int mds_cleanup(struct obd_device *obd)
2271 struct mds_obd *mds = &obd->u.mds;
2272 lvfs_sbdev_type save_dev;
2275 if (obd->u.obt.obt_sb == NULL)
2277 save_dev = lvfs_sbdev(obd->u.obt.obt_sb);
2279 if (mds->mds_lov_exp)
2280 /* lov export was disconnected by mds_lov_clean;
2281 we just need to drop our ref */
2282 class_export_put(mds->mds_lov_exp);
2284 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2285 lprocfs_free_per_client_stats(obd);
2286 lprocfs_free_obd_stats(obd);
2287 lprocfs_obd_cleanup(obd);
2289 lquota_cleanup(mds_quota_interface_ref, obd);
2291 mds_update_server_data(obd, 1);
2292 mds_fs_cleanup(obd);
2294 upcall_cache_cleanup(mds->mds_group_hash);
2295 mds->mds_group_hash = NULL;
2297 server_put_mount(obd->obd_name, mds->mds_vfsmnt);
2298 obd->u.obt.obt_sb = NULL;
2300 ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2301 obd->obd_namespace = NULL;
2303 spin_lock_bh(&obd->obd_processing_task_lock);
2304 if (obd->obd_recovering) {
2305 target_cancel_recovery_timer(obd);
2306 obd->obd_recovering = 0;
2308 spin_unlock_bh(&obd->obd_processing_task_lock);
2310 fsfilt_put_ops(obd->obd_fsops);
2312 LCONSOLE_INFO("MDT %s has stopped.\n", obd->obd_name);
2317 static void fixup_handle_for_resent_req(struct ptlrpc_request *req, int offset,
2318 struct ldlm_lock *new_lock,
2319 struct ldlm_lock **old_lock,
2320 struct lustre_handle *lockh)
2322 struct obd_export *exp = req->rq_export;
2323 struct ldlm_request *dlmreq =
2324 lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*dlmreq));
2325 struct lustre_handle remote_hdl = dlmreq->lock_handle[0];
2326 struct ldlm_lock *lock;
2328 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2331 lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
2333 if (lock != new_lock) {
2334 lockh->cookie = lock->l_handle.h_cookie;
2335 LDLM_DEBUG(lock, "restoring lock cookie");
2336 DEBUG_REQ(D_DLMTRACE, req, "restoring lock cookie "
2337 LPX64, lockh->cookie);
2339 *old_lock = LDLM_LOCK_GET(lock);
2341 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2344 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2347 /* If the xid matches, then we know this is a resent request,
2348 * and allow it. (It's probably an OPEN, for which we don't
2351 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_xid))
2355 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_close_xid))
2358 /* This remote handle isn't enqueued, so we never received or
2359 * processed this request. Clear MSG_RESENT, because it can
2360 * be handled like any normal request now. */
2362 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2364 DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
2368 #define IS_CLIENT_DISCONNECT_ERROR(error) \
2369 (error == -ENOTCONN || error == -ENODEV)
2371 static int mds_intent_policy(struct ldlm_namespace *ns,
2372 struct ldlm_lock **lockp, void *req_cookie,
2373 ldlm_mode_t mode, int flags, void *data)
2375 struct ptlrpc_request *req = req_cookie;
2376 struct ldlm_lock *lock = *lockp;
2377 struct ldlm_intent *it;
2378 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
2379 struct ldlm_reply *rep;
2380 struct lustre_handle lockh = { 0 };
2381 struct ldlm_lock *new_lock = NULL;
2382 int getattr_part = MDS_INODELOCK_UPDATE;
2383 int repsize[5] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
2384 [DLM_LOCKREPLY_OFF] = sizeof(struct ldlm_reply),
2385 [DLM_REPLY_REC_OFF] = sizeof(struct mds_body),
2386 [DLM_REPLY_REC_OFF+1] = mds->mds_max_mdsize };
2387 int repbufcnt = 4, rc;
2390 LASSERT(req != NULL);
2392 if (lustre_msg_bufcount(req->rq_reqmsg) <= DLM_INTENT_IT_OFF) {
2393 /* No intent was provided */
2394 rc = lustre_pack_reply(req, 2, repsize, NULL);
2400 it = lustre_swab_reqbuf(req, DLM_INTENT_IT_OFF, sizeof(*it),
2401 lustre_swab_ldlm_intent);
2403 CERROR("Intent missing\n");
2404 RETURN(req->rq_status = -EFAULT);
2407 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
2409 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
2410 (it->opc & (IT_OPEN | IT_GETATTR | IT_LOOKUP | IT_READDIR)))
2411 /* we should never allow OBD_CONNECT_ACL if not configured */
2412 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
2413 else if (it->opc & IT_UNLINK)
2414 repsize[repbufcnt++] = mds->mds_max_cookiesize;
2416 /* if we do recovery we isn't send reply mds state is restored */
2417 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2418 repsize[DLM_REPLY_REC_OFF+1] = 0;
2419 if (it->opc & IT_UNLINK)
2420 repsize[DLM_REPLY_REC_OFF+2] = 0;
2423 rc = lustre_pack_reply(req, repbufcnt, repsize, NULL);
2425 RETURN(req->rq_status = rc);
2427 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
2428 ldlm_reply_set_disposition(rep, DISP_IT_EXECD);
2430 /* execute policy */
2431 switch ((long)it->opc) {
2433 case IT_CREAT|IT_OPEN:
2434 mds_counter_incr(req->rq_export, LPROC_MDS_OPEN);
2435 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock, NULL,
2437 /* XXX swab here to assert that an mds_open reint
2438 * packet is following */
2439 rep->lock_policy_res2 = mds_reint(req, DLM_INTENT_REC_OFF,
2441 mds_shrink_intent_reply(req, REINT_OPEN, DLM_REPLY_REC_OFF);
2443 /* We abort the lock if the lookup was negative and
2444 * we did not make it to the OPEN portion */
2445 if (!ldlm_reply_disposition(rep, DISP_LOOKUP_EXECD))
2446 RETURN(ELDLM_LOCK_ABORTED);
2447 if (ldlm_reply_disposition(rep, DISP_LOOKUP_NEG) &&
2448 !ldlm_reply_disposition(rep, DISP_OPEN_OPEN))
2451 /* If there was an error of some sort or if we are not
2452 * returning any locks */
2453 if (rep->lock_policy_res2 ||
2454 !ldlm_reply_disposition(rep, DISP_OPEN_LOCK)) {
2455 /* If it is the disconnect error (ENODEV & ENOCONN)
2456 * ptlrpc layer should know this imediately, it should
2457 * be replied by rq_stats, otherwise, return it by
2460 /* if VBR failure then return error in rq_stats too */
2461 if (IS_CLIENT_DISCONNECT_ERROR(rep->lock_policy_res2) ||
2462 rep->lock_policy_res2 == -EOVERFLOW)
2463 RETURN(rep->lock_policy_res2);
2465 RETURN(ELDLM_LOCK_ABORTED);
2469 getattr_part = MDS_INODELOCK_LOOKUP;
2471 getattr_part |= MDS_INODELOCK_LOOKUP;
2472 OBD_COUNTER_INCREMENT(req->rq_export->exp_obd, getattr);
2474 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock,
2477 /* INODEBITS_INTEROP: if this lock was converted from a
2478 * plain lock (client does not support inodebits), then
2479 * child lock must be taken with both lookup and update
2480 * bits set for all operations.
2482 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS))
2483 getattr_part = MDS_INODELOCK_LOOKUP |
2484 MDS_INODELOCK_UPDATE;
2486 rep->lock_policy_res2 = mds_getattr_lock(req,DLM_INTENT_REC_OFF,
2487 getattr_part, &lockh);
2488 mds_shrink_body_reply(req,DLM_INTENT_REC_OFF, DLM_REPLY_REC_OFF);
2489 /* FIXME: LDLM can set req->rq_status. MDS sets
2490 policy_res{1,2} with disposition and status.
2491 - replay: returns 0 & req->status is old status
2492 - otherwise: returns req->status */
2493 if (ldlm_reply_disposition(rep, DISP_LOOKUP_NEG))
2494 rep->lock_policy_res2 = 0;
2495 if (!ldlm_reply_disposition(rep, DISP_LOOKUP_POS) ||
2496 rep->lock_policy_res2)
2497 RETURN(ELDLM_LOCK_ABORTED);
2498 if (req->rq_status != 0) {
2500 rep->lock_policy_res2 = req->rq_status;
2501 RETURN(ELDLM_LOCK_ABORTED);
2505 CERROR("Unhandled intent "LPD64"\n", it->opc);
2509 /* By this point, whatever function we called above must have either
2510 * filled in 'lockh', been an intent replay, or returned an error. We
2511 * want to allow replayed RPCs to not get a lock, since we would just
2512 * drop it below anyways because lock replay is done separately by the
2513 * client afterwards. For regular RPCs we want to give the new lock to
2514 * the client instead of whatever lock it was about to get. */
2515 if (new_lock == NULL)
2516 new_lock = ldlm_handle2lock(&lockh);
2517 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
2520 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
2521 it->opc, lockh.cookie);
2523 /* If we've already given this lock to a client once, then we should
2524 * have no readers or writers. Otherwise, we should have one reader
2525 * _or_ writer ref (which will be zeroed below) before returning the
2526 * lock to a client. */
2527 if (new_lock->l_export == req->rq_export) {
2528 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2530 LASSERT(new_lock->l_export == NULL);
2531 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2536 if (new_lock->l_export == req->rq_export) {
2537 /* Already gave this to the client, which means that we
2538 * reconstructed a reply. */
2539 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2541 RETURN(ELDLM_LOCK_REPLACED);
2544 /* Fixup the lock to be given to the client */
2545 lock_res_and_lock(new_lock);
2546 new_lock->l_readers = 0;
2547 new_lock->l_writers = 0;
2549 new_lock->l_export = class_export_get(req->rq_export);
2550 new_lock->l_blocking_ast = lock->l_blocking_ast;
2551 new_lock->l_completion_ast = lock->l_completion_ast;
2552 new_lock->l_flags &= ~LDLM_FL_LOCAL;
2554 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
2555 sizeof(lock->l_remote_handle));
2557 unlock_res_and_lock(new_lock);
2559 lustre_hash_add(new_lock->l_export->exp_lock_hash,
2560 &new_lock->l_remote_handle,
2561 &new_lock->l_exp_hash);
2562 LDLM_LOCK_PUT(new_lock);
2564 RETURN(ELDLM_LOCK_REPLACED);
2567 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
2569 struct mds_obd *mds = &obd->u.mds;
2570 struct lprocfs_static_vars lvars;
2571 int mds_min_threads;
2572 int mds_max_threads;
2576 lprocfs_mdt_init_vars(&lvars);
2577 lprocfs_obd_setup(obd, lvars.obd_vars);
2579 sema_init(&mds->mds_health_sem, 1);
2581 if (mds_num_threads) {
2582 /* If mds_num_threads is set, it is the min and the max. */
2583 if (mds_num_threads > MDS_THREADS_MAX)
2584 mds_num_threads = MDS_THREADS_MAX;
2585 if (mds_num_threads < MDS_THREADS_MIN)
2586 mds_num_threads = MDS_THREADS_MIN;
2587 mds_max_threads = mds_min_threads = mds_num_threads;
2589 /* Base min threads on memory and cpus */
2590 mds_min_threads = num_possible_cpus() * CFS_NUM_CACHEPAGES >>
2591 (27 - CFS_PAGE_SHIFT);
2592 if (mds_min_threads < MDS_THREADS_MIN)
2593 mds_min_threads = MDS_THREADS_MIN;
2594 /* Largest auto threads start value */
2595 if (mds_min_threads > 32)
2596 mds_min_threads = 32;
2597 mds_max_threads = min(MDS_THREADS_MAX, mds_min_threads * 4);
2601 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2602 MDS_MAXREPSIZE, MDS_REQUEST_PORTAL,
2603 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2604 mds_handle, LUSTRE_MDS_NAME,
2605 obd->obd_proc_entry, target_print_req,
2606 mds_min_threads, mds_max_threads, "ll_mdt",
2609 if (!mds->mds_service) {
2610 CERROR("failed to start service\n");
2611 GOTO(err_lprocfs, rc = -ENOMEM);
2614 rc = ptlrpc_start_threads(obd, mds->mds_service);
2616 GOTO(err_thread, rc);
2618 mds->mds_setattr_service =
2619 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2620 MDS_MAXREPSIZE, MDS_SETATTR_PORTAL,
2621 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2622 mds_handle, "mds_setattr",
2623 obd->obd_proc_entry, target_print_req,
2624 mds_min_threads, mds_max_threads,
2625 "ll_mdt_attr", NULL);
2626 if (!mds->mds_setattr_service) {
2627 CERROR("failed to start getattr service\n");
2628 GOTO(err_thread, rc = -ENOMEM);
2631 rc = ptlrpc_start_threads(obd, mds->mds_setattr_service);
2633 GOTO(err_thread2, rc);
2635 mds->mds_readpage_service =
2636 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2637 MDS_MAXREPSIZE, MDS_READPAGE_PORTAL,
2638 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2639 mds_handle, "mds_readpage",
2640 obd->obd_proc_entry, target_print_req,
2641 MDS_THREADS_MIN_READPAGE, mds_max_threads,
2642 "ll_mdt_rdpg", NULL);
2643 if (!mds->mds_readpage_service) {
2644 CERROR("failed to start readpage service\n");
2645 GOTO(err_thread2, rc = -ENOMEM);
2648 rc = ptlrpc_start_threads(obd, mds->mds_readpage_service);
2651 GOTO(err_thread3, rc);
2653 ping_evictor_start();
2658 ptlrpc_unregister_service(mds->mds_readpage_service);
2659 mds->mds_readpage_service = NULL;
2661 ptlrpc_unregister_service(mds->mds_setattr_service);
2662 mds->mds_setattr_service = NULL;
2664 ptlrpc_unregister_service(mds->mds_service);
2665 mds->mds_service = NULL;
2667 lprocfs_obd_cleanup(obd);
2671 static int mdt_cleanup(struct obd_device *obd)
2673 struct mds_obd *mds = &obd->u.mds;
2676 ping_evictor_stop();
2678 down(&mds->mds_health_sem);
2679 ptlrpc_unregister_service(mds->mds_readpage_service);
2680 ptlrpc_unregister_service(mds->mds_setattr_service);
2681 ptlrpc_unregister_service(mds->mds_service);
2682 mds->mds_readpage_service = NULL;
2683 mds->mds_setattr_service = NULL;
2684 mds->mds_service = NULL;
2685 up(&mds->mds_health_sem);
2687 lprocfs_obd_cleanup(obd);
2692 static int mdt_health_check(struct obd_device *obd)
2694 struct mds_obd *mds = &obd->u.mds;
2697 down(&mds->mds_health_sem);
2698 rc |= ptlrpc_service_health_check(mds->mds_readpage_service);
2699 rc |= ptlrpc_service_health_check(mds->mds_setattr_service);
2700 rc |= ptlrpc_service_health_check(mds->mds_service);
2701 up(&mds->mds_health_sem);
2704 * health_check to return 0 on healthy
2705 * and 1 on unhealthy.
2713 static struct dentry *mds_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
2716 struct obd_device *obd = data;
2719 fid.generation = gen;
2720 return mds_fid2dentry(&obd->u.mds, &fid, NULL);
2723 static int mds_health_check(struct obd_device *obd)
2725 struct obd_device_target *odt = &obd->u.obt;
2726 #ifdef USE_HEALTH_CHECK_WRITE
2727 struct mds_obd *mds = &obd->u.mds;
2731 if (odt->obt_sb->s_flags & MS_RDONLY)
2734 #ifdef USE_HEALTH_CHECK_WRITE
2735 LASSERT(mds->mds_obt.obt_health_check_filp != NULL);
2736 rc |= !!lvfs_check_io_health(obd, mds->mds_obt.obt_health_check_filp);
2742 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
2744 struct lustre_cfg *lcfg = buf;
2747 switch(lcfg->lcfg_command) {
2749 struct lprocfs_static_vars lvars;
2750 lprocfs_mds_init_vars(&lvars);
2752 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, lcfg, obd);
2762 struct lvfs_callback_ops mds_lvfs_ops = {
2763 l_fid2dentry: mds_lvfs_fid2dentry,
2766 /* use obd ops to offer management infrastructure */
2767 static struct obd_ops mds_obd_ops = {
2768 .o_owner = THIS_MODULE,
2769 .o_connect = mds_connect,
2770 .o_reconnect = mds_reconnect,
2771 .o_init_export = mds_init_export,
2772 .o_destroy_export = mds_destroy_export,
2773 .o_disconnect = mds_disconnect,
2774 .o_setup = mds_setup,
2775 .o_precleanup = mds_precleanup,
2776 .o_cleanup = mds_cleanup,
2777 .o_postrecov = mds_postrecov,
2778 .o_statfs = mds_obd_statfs,
2779 .o_iocontrol = mds_iocontrol,
2780 .o_create = mds_obd_create,
2781 .o_destroy = mds_obd_destroy,
2782 .o_llog_init = mds_llog_init,
2783 .o_llog_finish = mds_llog_finish,
2784 .o_notify = mds_notify,
2785 .o_health_check = mds_health_check,
2786 .o_process_config = mds_process_config,
2789 static struct obd_ops mdt_obd_ops = {
2790 .o_owner = THIS_MODULE,
2791 .o_setup = mdt_setup,
2792 .o_cleanup = mdt_cleanup,
2793 .o_health_check = mdt_health_check,
2796 quota_interface_t *mds_quota_interface_ref;
2797 extern quota_interface_t mds_quota_interface;
2799 static int __init mds_init(void)
2802 struct lprocfs_static_vars lvars;
2804 request_module("lquota");
2805 mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
2806 rc = lquota_init(mds_quota_interface_ref);
2808 if (mds_quota_interface_ref)
2809 PORTAL_SYMBOL_PUT(mds_quota_interface);
2812 init_obd_quota_ops(mds_quota_interface_ref, &mds_obd_ops);
2814 lprocfs_mds_init_vars(&lvars);
2815 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2816 lprocfs_mdt_init_vars(&lvars);
2817 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2822 static void /*__exit*/ mds_exit(void)
2824 lquota_exit(mds_quota_interface_ref);
2825 if (mds_quota_interface_ref)
2826 PORTAL_SYMBOL_PUT(mds_quota_interface);
2828 class_unregister_type(LUSTRE_MDS_NAME);
2829 class_unregister_type(LUSTRE_MDT_NAME);
2832 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2833 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2834 MODULE_LICENSE("GPL");
2836 module_init(mds_init);
2837 module_exit(mds_exit);