1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mds/handler.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
41 * Author: Mike Shaver <shaver@clusterfs.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_MDS
49 #include <lustre_mds.h>
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/random.h>
54 #include <linux/jbd.h>
55 # include <linux/smp_lock.h>
56 # include <linux/buffer_head.h>
57 # include <linux/workqueue.h>
58 # include <linux/mount.h>
60 #include <obd_class.h>
61 #include <lustre_dlm.h>
63 #include <lustre_fsfilt.h>
64 #include <lprocfs_status.h>
65 #include <lustre_quota.h>
66 #include <lustre_disk.h>
67 #include <lustre_param.h>
69 #include "mds_internal.h"
72 CFS_MODULE_PARM(mds_num_threads, "i", int, 0444,
73 "number of MDS service threads to start");
75 static int mds_intent_policy(struct ldlm_namespace *ns,
76 struct ldlm_lock **lockp, void *req_cookie,
77 ldlm_mode_t mode, int flags, void *data);
78 static int mds_postsetup(struct obd_device *obd);
79 static int mds_cleanup(struct obd_device *obd);
81 /* Assumes caller has already pushed into the kernel filesystem context */
82 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
83 loff_t offset, int count)
85 struct ptlrpc_bulk_desc *desc;
86 struct l_wait_info lwi;
89 int rc = 0, npages, i, tmpcount, tmpsize = 0;
92 LASSERT((offset & ~CFS_PAGE_MASK) == 0); /* I'm dubious about this */
94 npages = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
95 OBD_ALLOC(pages, sizeof(*pages) * npages);
97 GOTO(out, rc = -ENOMEM);
99 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
102 GOTO(out_free, rc = -ENOMEM);
104 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
105 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
107 OBD_PAGE_ALLOC(pages[i], CFS_ALLOC_STD);
108 if (pages[i] == NULL)
109 GOTO(cleanup_buf, rc = -ENOMEM);
111 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
114 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
115 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
116 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
117 tmpsize, offset, file->f_dentry->d_inode->i_ino,
118 i_size_read(file->f_dentry->d_inode));
120 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
121 kmap(pages[i]), tmpsize, &offset);
125 GOTO(cleanup_buf, rc = -EIO);
128 LASSERT(desc->bd_nob == count);
130 rc = ptlrpc_start_bulk_transfer(desc);
132 GOTO(cleanup_buf, rc);
134 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
135 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
136 OBD_FAIL_MDS_SENDPAGE, rc);
137 GOTO(abort_bulk, rc);
140 timeout = (int)req->rq_deadline - (int)cfs_time_current_sec();
142 CERROR("Req deadline already passed %lu (now: %lu)\n",
143 req->rq_deadline, cfs_time_current_sec());
145 lwi = LWI_TIMEOUT(cfs_time_seconds(max(timeout, 1)), NULL, NULL);
146 rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc), &lwi);
147 LASSERT (rc == 0 || rc == -ETIMEDOUT);
150 if (desc->bd_success &&
151 desc->bd_nob_transferred == count)
152 GOTO(cleanup_buf, rc);
154 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
157 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
158 (rc == -ETIMEDOUT) ? "timeout" : "network error",
159 desc->bd_nob_transferred, count,
160 req->rq_export->exp_client_uuid.uuid,
161 req->rq_export->exp_connection->c_remote_uuid.uuid);
163 class_fail_export(req->rq_export);
167 ptlrpc_abort_bulk(desc);
169 for (i = 0; i < npages; i++)
171 OBD_PAGE_FREE(pages[i]);
173 ptlrpc_free_bulk(desc);
175 OBD_FREE(pages, sizeof(*pages) * npages);
180 /* only valid locked dentries or errors should be returned */
181 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
182 struct vfsmount **mnt, int lock_mode,
183 struct lustre_handle *lockh,
184 char *name, int namelen, __u64 lockpart)
186 struct mds_obd *mds = &obd->u.mds;
187 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
188 struct ldlm_res_id res_id = { .name = {0} };
189 int flags = LDLM_FL_ATOMIC_CB, rc;
190 ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
196 res_id.name[0] = de->d_inode->i_ino;
197 res_id.name[1] = de->d_inode->i_generation;
198 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id,
199 LDLM_IBITS, &policy, lock_mode, &flags,
200 ldlm_blocking_ast, ldlm_completion_ast,
201 NULL, NULL, 0, NULL, lockh);
202 if (rc != ELDLM_OK) {
204 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
210 /* Look up an entry by inode number. */
211 /* this function ONLY returns valid dget'd dentries with an initialized inode
213 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
214 struct vfsmount **mnt)
216 struct obd_device *obd = container_of(mds, struct obd_device, u.mds);
218 unsigned long ino = fid->id;
219 __u32 generation = fid->generation;
221 struct dentry *result;
224 RETURN(ERR_PTR(-ESTALE));
226 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
228 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino/gen %lu/%u, sb %p\n",
229 ino, generation, mds->mds_obt.obt_sb);
231 /* under ext3 this is neither supposed to return bad inodes
233 result = mds_lookup(obd, fid_name, mds->mds_fid_de, strlen(fid_name));
237 inode = result->d_inode;
239 RETURN(ERR_PTR(-ENOENT));
241 if (inode->i_nlink == 0) {
242 if (inode->i_mode == 0 &&
243 LTIME_S(inode->i_ctime) == 0 ) {
244 LCONSOLE_WARN("Found inode with zero nlink, mode and "
245 "ctime -- this may indicate disk"
246 "corruption (device %s, inode %lu, link:"
247 " %lu, count: %d)\n", obd->obd_name, inode->i_ino,
248 (unsigned long)inode->i_nlink,
249 atomic_read(&inode->i_count));
252 RETURN(ERR_PTR(-ENOENT));
255 if (generation && inode->i_generation != generation) {
256 /* we didn't find the right inode.. */
257 CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
258 "count: %d, generation %u/%u\n", inode->i_ino,
259 (unsigned long)inode->i_nlink,
260 atomic_read(&inode->i_count), inode->i_generation,
263 RETURN(ERR_PTR(-ENOENT));
267 *mnt = mds->mds_vfsmnt;
274 static int mds_connect_internal(struct obd_export *exp,
275 struct obd_connect_data *data)
277 struct obd_device *obd = exp->exp_obd;
279 data->ocd_connect_flags &= MDS_CONNECT_SUPPORTED;
280 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
282 /* If no known bits (which should not happen, probably,
283 as everybody should support LOOKUP and UPDATE bits at least)
284 revert to compat mode with plain locks. */
285 if (!data->ocd_ibits_known &&
286 data->ocd_connect_flags & OBD_CONNECT_IBITS)
287 data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
289 if (!obd->u.mds.mds_fl_acl)
290 data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
292 if (!obd->u.mds.mds_fl_user_xattr)
293 data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
295 exp->exp_connect_flags = data->ocd_connect_flags;
296 data->ocd_version = LUSTRE_VERSION_CODE;
297 exp->exp_mds_data.med_ibits_known = data->ocd_ibits_known;
300 if (obd->u.mds.mds_fl_acl &&
301 ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
302 CWARN("%s: MDS requires ACL support but client does not\n",
309 static int mds_reconnect(struct obd_export *exp, struct obd_device *obd,
310 struct obd_uuid *cluuid,
311 struct obd_connect_data *data,
317 if (exp == NULL || obd == NULL || cluuid == NULL)
320 rc = mds_connect_internal(exp, data);
322 mds_export_stats_init(obd, exp, localdata);
327 /* Establish a connection to the MDS.
329 * This will set up an export structure for the client to hold state data
330 * about that client, like open files, the last operation number it did
331 * on the server, etc.
333 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
334 struct obd_uuid *cluuid, struct obd_connect_data *data,
337 struct obd_export *exp;
338 struct mds_export_data *med;
339 struct lsd_client_data *lcd = NULL;
343 if (!conn || !obd || !cluuid)
346 /* Check for aborted recovery. */
347 target_recovery_check_and_stop(obd);
349 /* XXX There is a small race between checking the list and adding a
350 * new connection for the same UUID, but the real threat (list
351 * corruption when multiple different clients connect) is solved.
353 * There is a second race between adding the export to the list,
354 * and filling in the client data below. Hence skipping the case
355 * of NULL lcd above. We should already be controlling multiple
356 * connects at the client, and we can't hold the spinlock over
357 * memory allocations without risk of deadlocking.
359 rc = class_connect(conn, obd, cluuid);
362 exp = class_conn2export(conn);
364 med = &exp->exp_mds_data;
366 rc = mds_connect_internal(exp, data);
372 GOTO(out, rc = -ENOMEM);
374 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
377 rc = mds_client_add(obd, exp, -1, localdata);
386 class_disconnect(exp);
388 class_export_put(exp);
394 int mds_init_export(struct obd_export *exp)
396 struct mds_export_data *med = &exp->exp_mds_data;
399 INIT_LIST_HEAD(&med->med_open_head);
400 spin_lock_init(&med->med_open_lock);
402 spin_lock(&exp->exp_lock);
403 exp->exp_connecting = 1;
404 spin_unlock(&exp->exp_lock);
406 RETURN(ldlm_init_export(exp));
409 static int mds_destroy_export(struct obd_export *export)
411 struct mds_export_data *med;
412 struct obd_device *obd = export->exp_obd;
413 struct mds_obd *mds = &obd->u.mds;
414 struct lvfs_run_ctxt saved;
415 struct lov_mds_md *lmm;
416 __u32 lmm_sz, cookie_sz;
417 struct llog_cookie *logcookies;
418 struct list_head closing_list;
419 struct mds_file_data *mfd, *n;
423 med = &export->exp_mds_data;
424 target_destroy_export(export);
425 ldlm_destroy_export(export);
427 if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
430 lmm_sz = mds->mds_max_mdsize;
431 OBD_ALLOC(lmm, lmm_sz);
433 CWARN("%s: allocation failure during cleanup; can not force "
434 "close file handles on this service.\n", obd->obd_name);
435 GOTO(out, rc = -ENOMEM);
438 cookie_sz = mds->mds_max_cookiesize;
439 OBD_ALLOC(logcookies, cookie_sz);
440 if (logcookies == NULL) {
441 CWARN("%s: allocation failure during cleanup; can not force "
442 "close file handles on this service.\n", obd->obd_name);
443 OBD_FREE(lmm, lmm_sz);
444 GOTO(out, rc = -ENOMEM);
447 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
448 /* Close any open files (which may also cause orphan unlinking). */
449 CFS_INIT_LIST_HEAD(&closing_list);
450 spin_lock(&med->med_open_lock);
451 while (!list_empty(&med->med_open_head)) {
452 struct list_head *tmp = med->med_open_head.next;
453 struct mds_file_data *mfd =
454 list_entry(tmp, struct mds_file_data, mfd_list);
456 /* Remove mfd handle so it can't be found again.
457 * We are consuming the mfd_list reference here. */
458 mds_mfd_unlink(mfd, 0);
459 list_add_tail(&mfd->mfd_list, &closing_list);
461 spin_unlock(&med->med_open_lock);
463 list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
464 int lmm_size = lmm_sz;
465 umode_t mode = mfd->mfd_dentry->d_inode->i_mode;
468 /* If you change this message, be sure to update
469 * replay_single:test_46 */
470 CDEBUG(D_INODE|D_IOCTL, "%s: force closing file handle for "
471 "%.*s (ino %lu)\n", obd->obd_name,
472 mfd->mfd_dentry->d_name.len,mfd->mfd_dentry->d_name.name,
473 mfd->mfd_dentry->d_inode->i_ino);
475 rc = mds_get_md(obd, mfd->mfd_dentry->d_inode, lmm,
478 CWARN("mds_get_md failure, rc=%d\n", rc);
480 valid |= OBD_MD_FLEASIZE;
482 /* child orphan sem protects orphan_dec_test and
483 * is_orphan race, mds_mfd_close drops it */
484 MDS_DOWN_WRITE_ORPHAN_SEM(mfd->mfd_dentry->d_inode);
486 list_del_init(&mfd->mfd_list);
487 rc = mds_mfd_close(NULL, REQ_REC_OFF, obd, mfd,
488 !(export->exp_flags & OBD_OPT_FAILOVER),
489 lmm, lmm_size, logcookies,
490 mds->mds_max_cookiesize,
494 CDEBUG(D_INODE|D_IOCTL, "Error closing file: %d\n", rc);
496 if (valid & OBD_MD_FLCOOKIE) {
497 rc = mds_osc_destroy_orphan(obd, mode, lmm,
498 lmm_size, logcookies, 1);
500 CDEBUG(D_INODE, "%s: destroy of orphan failed,"
501 " rc = %d\n", obd->obd_name, rc);
504 valid &= ~OBD_MD_FLCOOKIE;
509 OBD_FREE(logcookies, cookie_sz);
510 OBD_FREE(lmm, lmm_sz);
512 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
513 mds_client_free(export);
519 static int mds_disconnect(struct obd_export *exp)
525 class_export_get(exp);
527 /* Disconnect early so that clients can't keep using export */
528 rc = class_disconnect(exp);
529 if (exp->exp_obd->obd_namespace != NULL)
530 ldlm_cancel_locks_for_export(exp);
532 /* release nid stat refererence */
533 lprocfs_exp_cleanup(exp);
535 /* complete all outstanding replies */
536 spin_lock(&exp->exp_lock);
537 while (!list_empty(&exp->exp_outstanding_replies)) {
538 struct ptlrpc_reply_state *rs =
539 list_entry(exp->exp_outstanding_replies.next,
540 struct ptlrpc_reply_state, rs_exp_list);
541 struct ptlrpc_service *svc = rs->rs_service;
543 spin_lock(&svc->srv_lock);
544 list_del_init(&rs->rs_exp_list);
545 ptlrpc_schedule_difficult_reply(rs);
546 spin_unlock(&svc->srv_lock);
548 spin_unlock(&exp->exp_lock);
550 class_export_put(exp);
554 static int mds_getstatus(struct ptlrpc_request *req)
556 struct mds_obd *mds = mds_req2mds(req);
557 struct mds_body *body;
558 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
561 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_PACK, req->rq_status = -ENOMEM);
562 rc = lustre_pack_reply(req, 2, size, NULL);
564 RETURN(req->rq_status = rc);
566 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
567 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
569 /* the last_committed and last_xid fields are filled in for all
570 * replies already - no need to do so here also.
575 /* get the LOV EA from @inode and store it into @md. It can be at most
576 * @size bytes, and @size is updated with the actual EA size.
577 * The EA size is also returned on success, and -ve errno on failure.
578 * If there is no EA then 0 is returned. */
579 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
580 int *size, int lock, int flags,
587 LOCK_INODE_MUTEX(inode);
588 rc = fsfilt_get_md(obd, inode, md, *size, "lov");
590 if (rc == 0 && flags == MDS_GETATTR)
591 rc = mds_get_default_md(obd, md, &lmm_size);
594 CERROR("Error %d reading eadata for ino %lu\n",
598 rc = mds_convert_lov_ea(obd, inode, md, lmm_size,
611 UNLOCK_INODE_MUTEX(inode);
617 /* Call with lock=1 if you want mds_pack_md to take the i_mutex.
618 * Call with lock=0 if the caller has already taken the i_mutex. */
619 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
620 struct mds_body *body, struct inode *inode, int lock, int flags,
623 struct mds_obd *mds = &obd->u.mds;
629 lmm = lustre_msg_buf(msg, offset, 0);
631 /* Some problem with getting eadata when I sized the reply
633 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
637 /* if this replay request we should be silencely exist without fill md*/
638 lmm_size = lustre_msg_buflen(msg, offset);
642 /* I don't really like this, but it is a sanity check on the client
643 * MD request. However, if the client doesn't know how much space
644 * to reserve for the MD, it shouldn't be bad to have too much space.
646 if (lmm_size > mds->mds_max_mdsize) {
647 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
648 inode->i_ino, lmm_size, mds->mds_max_mdsize);
652 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock, flags,
655 if (S_ISDIR(inode->i_mode))
656 body->valid |= OBD_MD_FLDIREA;
658 body->valid |= OBD_MD_FLEASIZE;
659 body->eadatasize = lmm_size;
666 #ifdef CONFIG_FS_POSIX_ACL
668 int mds_pack_posix_acl(struct inode *inode, struct lustre_msg *repmsg,
669 struct mds_body *repbody, int repoff)
671 struct dentry de = { .d_inode = inode };
675 LASSERT(repbody->aclsize == 0);
676 LASSERT(lustre_msg_bufcount(repmsg) > repoff);
678 buflen = lustre_msg_buflen(repmsg, repoff);
682 if (!inode->i_op || !inode->i_op->getxattr)
685 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
686 lustre_msg_buf(repmsg, repoff, buflen),
689 repbody->aclsize = rc;
690 else if (rc != -ENODATA) {
691 CERROR("buflen %d, get acl: %d\n", buflen, rc);
696 repbody->valid |= OBD_MD_FLACL;
700 #define mds_pack_posix_acl(inode, repmsg, repbody, repoff) 0
703 int mds_pack_acl(struct mds_export_data *med, struct inode *inode,
704 struct lustre_msg *repmsg, struct mds_body *repbody,
707 return mds_pack_posix_acl(inode, repmsg, repbody, repoff);
710 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
711 struct ptlrpc_request *req,
712 struct mds_body *reqbody, int reply_off)
714 struct mds_body *body;
715 struct inode *inode = dentry->d_inode;
723 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
724 LASSERT(body != NULL); /* caller prepped reply */
726 body->flags = reqbody->flags; /* copy MDS_BFLAG_EXT_FLAGS if present */
727 mds_pack_inode2body(body, inode);
730 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
731 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
732 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR &&
733 ((S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))))
736 rc = mds_pack_md(obd, req->rq_repmsg, reply_off, body,
738 req->rq_export->exp_connect_flags);
740 /* If we have LOV EA data, the OST holds size, atime, mtime */
741 if (!(body->valid & OBD_MD_FLEASIZE) &&
742 !(body->valid & OBD_MD_FLDIREA))
743 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
744 OBD_MD_FLATIME | OBD_MD_FLMTIME);
746 lustre_shrink_reply(req, reply_off, body->eadatasize, 0);
747 if (body->eadatasize)
749 } else if (S_ISLNK(inode->i_mode) &&
750 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
751 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off, 0);
754 LASSERT (symname != NULL); /* caller prepped reply */
755 len = lustre_msg_buflen(req->rq_repmsg, reply_off);
757 rc = inode->i_op->readlink(dentry, symname, len);
759 CERROR("readlink failed: %d\n", rc);
760 } else if (rc != len - 1) {
761 CERROR ("Unexpected readlink rc %d: expecting %d\n",
765 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
766 body->valid |= OBD_MD_LINKNAME;
767 body->eadatasize = rc + 1;
768 symname[rc] = 0; /* NULL terminate */
772 } else if (reqbody->valid == OBD_MD_FLFLAGS &&
773 reqbody->flags & MDS_BFLAG_EXT_FLAGS) {
776 /* We only return the full set of flags on ioctl, otherwise we
777 * get enough flags from the inode in mds_pack_inode2body(). */
778 rc = fsfilt_iocontrol(obd, inode, NULL, EXT3_IOC_GETFLAGS,
781 body->flags = flags | MDS_BFLAG_EXT_FLAGS;
784 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
785 struct mds_obd *mds = mds_req2mds(req);
786 body->max_cookiesize = mds->mds_max_cookiesize;
787 body->max_mdsize = mds->mds_max_mdsize;
788 body->valid |= OBD_MD_FLMODEASIZE;
794 #ifdef CONFIG_FS_POSIX_ACL
795 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
796 (reqbody->valid & OBD_MD_FLACL)) {
797 rc = mds_pack_acl(&req->rq_export->exp_mds_data,
798 inode, req->rq_repmsg,
801 lustre_shrink_reply(req, reply_off, body->aclsize, 0);
810 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
813 struct mds_obd *mds = mds_req2mds(req);
814 struct mds_body *body;
815 int rc, bufcount = 2;
816 int size[4] = { sizeof(struct ptlrpc_body), sizeof(*body) };
819 LASSERT(offset == REQ_REC_OFF); /* non-intent */
821 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
822 LASSERT(body != NULL); /* checked by caller */
823 LASSERT(lustre_req_swabbed(req, offset)); /* swabbed by caller */
825 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
826 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
827 LOCK_INODE_MUTEX(inode);
828 rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0,
830 UNLOCK_INODE_MUTEX(inode);
831 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
833 if ((rc == 0) && (lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) &&
834 ((S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))))
835 rc = sizeof(struct lov_mds_md_v3);
837 if (rc != -ENODATA) {
838 CERROR("error getting inode %lu MD: rc = %d\n",
843 } else if (rc > mds->mds_max_mdsize) {
845 CERROR("MD size %d larger than maximum possible %u\n",
846 rc, mds->mds_max_mdsize);
851 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
852 if (i_size_read(inode) + 1 != body->eadatasize)
853 CERROR("symlink size: %Lu, reply space: %d\n",
854 i_size_read(inode) + 1, body->eadatasize);
855 size[bufcount] = min_t(int, i_size_read(inode) + 1,
858 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
859 i_size_read(inode) + 1, body->eadatasize);
862 #ifdef CONFIG_FS_POSIX_ACL
863 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
864 (body->valid & OBD_MD_FLACL)) {
865 struct dentry de = { .d_inode = inode };
868 if (inode->i_op && inode->i_op->getxattr) {
869 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
872 if (rc != -ENODATA) {
873 CERROR("got acl size: %d\n", rc);
883 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
884 CERROR("failed MDS_GETATTR_PACK test\n");
885 req->rq_status = -ENOMEM;
889 rc = lustre_pack_reply(req, bufcount, size, NULL);
898 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
899 int child_part, struct lustre_handle *child_lockh)
901 struct obd_device *obd = req->rq_export->exp_obd;
902 struct mds_obd *mds = &obd->u.mds;
903 struct ldlm_reply *rep = NULL;
904 struct lvfs_run_ctxt saved;
905 struct mds_body *body;
906 struct dentry *dparent = NULL, *dchild = NULL;
907 struct lvfs_ucred uc = {NULL,};
908 struct lustre_handle parent_lockh;
910 int rc = 0, cleanup_phase = 0, resent_req = 0;
914 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
916 /* Swab now, before anyone looks inside the request */
917 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
918 lustre_swab_mds_body);
920 CERROR("Can't swab mds_body\n");
924 lustre_set_req_swabbed(req, offset + 1);
925 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
927 CERROR("Can't unpack name\n");
930 namesize = lustre_msg_buflen(req->rq_reqmsg, offset + 1);
931 /* namesize less than 2 means we have empty name, probably came from
932 revalidate by cfid, so no point in having name to be set */
936 rc = mds_init_ucred(&uc, req, offset);
940 LASSERT(offset == REQ_REC_OFF || offset == DLM_INTENT_REC_OFF);
941 /* if requests were at offset 2, the getattr reply goes back at 1 */
942 if (offset == DLM_INTENT_REC_OFF) {
943 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
945 offset = DLM_REPLY_REC_OFF;
948 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
949 cleanup_phase = 1; /* kernel context */
950 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
952 /* FIXME: handle raw lookup */
954 if (body->valid == OBD_MD_FLID) {
955 struct mds_body *mds_reply;
956 int size = sizeof(*mds_reply);
958 // The user requested ONLY the inode number, so do a raw lookup
959 rc = lustre_pack_reply(req, 1, &size, NULL);
961 CERROR("out of memory\n");
965 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
967 mds_reply = lustre_msg_buf(req->rq_repmsg, offset,
969 mds_reply->fid1.id = inum;
970 mds_reply->valid = OBD_MD_FLID;
975 /* child_lockh() is only set in fixup_handle_for_resent_req()
976 * if MSG_RESENT is set */
977 if (lustre_handle_is_used(child_lockh)) {
978 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
982 if (resent_req == 0) {
984 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
985 rc = mds_get_parent_child_locked(obd, &obd->u.mds,
989 MDS_INODELOCK_UPDATE,
991 child_lockh, &dchild,
994 /* For revalidate by fid we always take UPDATE lock */
995 dchild = mds_fid2locked_dentry(obd, &body->fid2, NULL,
997 NULL, 0, child_part);
1000 rc = PTR_ERR(dchild);
1005 struct ldlm_lock *granted_lock;
1006 struct ll_fid child_fid;
1007 struct ldlm_resource *res;
1008 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
1009 granted_lock = ldlm_handle2lock(child_lockh);
1010 /* lock was granted in fixup_handle_for_resent_req() if
1011 * MSG_RESENT is set */
1012 LASSERTF(granted_lock != NULL, LPU64"/%u lockh "LPX64"\n",
1013 body->fid1.id, body->fid1.generation,
1014 child_lockh->cookie);
1017 res = granted_lock->l_resource;
1018 child_fid.id = res->lr_name.name[0];
1019 child_fid.generation = res->lr_name.name[1];
1020 dchild = mds_fid2dentry(&obd->u.mds, &child_fid, NULL);
1021 if (IS_ERR(dchild)) {
1022 rc = PTR_ERR(dchild);
1023 LCONSOLE_WARN("Child "LPU64"/%u lookup error %d.",
1024 child_fid.id, child_fid.generation, rc);
1027 LDLM_LOCK_PUT(granted_lock);
1030 cleanup_phase = 2; /* dchild, dparent, locks */
1032 if (dchild->d_inode == NULL) {
1033 intent_set_disposition(rep, DISP_LOOKUP_NEG);
1034 /* in the intent case, the policy clears this error:
1035 the disposition is enough */
1036 GOTO(cleanup, rc = -ENOENT);
1038 intent_set_disposition(rep, DISP_LOOKUP_POS);
1041 if (req->rq_repmsg == NULL) {
1042 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
1044 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1049 rc = mds_getattr_internal(obd, dchild, req, body, offset);
1050 GOTO(cleanup, rc); /* returns the lock to the client */
1053 switch (cleanup_phase) {
1055 if (resent_req == 0) {
1056 if (rc && dchild->d_inode)
1057 ldlm_lock_decref(child_lockh, LCK_CR);
1059 ldlm_lock_decref(&parent_lockh, LCK_CR);
1065 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1067 mds_exit_ucred(&uc, mds);
1068 if (!req->rq_packed_final) {
1069 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1072 req->rq_status = rc;
1078 static int mds_getattr(struct ptlrpc_request *req, int offset)
1080 struct mds_obd *mds = mds_req2mds(req);
1081 struct obd_device *obd = req->rq_export->exp_obd;
1082 struct lvfs_run_ctxt saved;
1084 struct mds_body *body;
1085 struct lvfs_ucred uc = { NULL, };
1089 OBD_COUNTER_INCREMENT(obd, getattr);
1091 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1092 lustre_swab_mds_body);
1096 rc = mds_init_ucred(&uc, req, offset);
1098 GOTO(out_ucred, rc);
1100 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1101 de = mds_fid2dentry(mds, &body->fid1, NULL);
1103 rc = req->rq_status = PTR_ERR(de);
1107 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1109 CERROR("mds_getattr_pack_msg: %d\n", rc);
1113 req->rq_status = mds_getattr_internal(obd, de, req, body,REPLY_REC_OFF);
1118 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1120 if (!req->rq_packed_final) {
1121 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1124 req->rq_status = rc;
1126 mds_exit_ucred(&uc, mds);
1130 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1131 __u64 max_age, __u32 flags)
1135 spin_lock(&obd->obd_osfs_lock);
1136 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
1138 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1139 spin_unlock(&obd->obd_osfs_lock);
1144 static int mds_statfs(struct ptlrpc_request *req)
1146 struct obd_device *obd = req->rq_export->exp_obd;
1147 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
1148 int rc, size[2] = { sizeof(struct ptlrpc_body),
1149 sizeof(struct obd_statfs) };
1152 /* This will trigger a watchdog timeout */
1153 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1154 (MDS_SERVICE_WATCHDOG_FACTOR *
1155 at_get(&svc->srv_at_estimate) / 1000) + 1);
1156 OBD_COUNTER_INCREMENT(obd, statfs);
1158 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
1159 GOTO(out, rc = -ENOMEM);
1160 rc = lustre_pack_reply(req, 2, size, NULL);
1164 /* We call this so that we can cache a bit - 1 jiffie worth */
1165 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1166 size[REPLY_REC_OFF]),
1167 cfs_time_current_64() - HZ, 0);
1169 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1175 req->rq_status = rc;
1179 static int mds_sync(struct ptlrpc_request *req, int offset)
1181 struct obd_device *obd = req->rq_export->exp_obd;
1182 struct mds_obd *mds = &obd->u.mds;
1183 struct mds_body *body;
1184 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
1187 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1188 lustre_swab_mds_body);
1190 GOTO(out, rc = -EFAULT);
1192 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1193 GOTO(out, rc = -ENOMEM);
1194 rc = lustre_pack_reply(req, 2, size, NULL);
1198 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
1199 if (rc == 0 && body->fid1.id != 0) {
1202 de = mds_fid2dentry(mds, &body->fid1, NULL);
1204 GOTO(out, rc = PTR_ERR(de));
1206 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1208 mds_pack_inode2body(body, de->d_inode);
1214 req->rq_status = rc;
1218 /* mds_readpage does not take a DLM lock on the inode, because the client must
1219 * already have a PR lock.
1221 * If we were to take another one here, a deadlock will result, if another
1222 * thread is already waiting for a PW lock. */
1223 static int mds_readpage(struct ptlrpc_request *req, int offset)
1225 struct obd_device *obd = req->rq_export->exp_obd;
1226 struct mds_obd *mds = &obd->u.mds;
1227 struct vfsmount *mnt;
1230 struct mds_body *body, *repbody;
1231 struct lvfs_run_ctxt saved;
1232 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) };
1233 struct lvfs_ucred uc = {NULL,};
1236 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_PACK, -ENOMEM);
1237 rc = lustre_pack_reply(req, 2, size, NULL);
1241 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1242 lustre_swab_mds_body);
1244 GOTO (out, rc = -EFAULT);
1246 rc = mds_init_ucred(&uc, req, offset);
1250 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1251 de = mds_fid2dentry(&obd->u.mds, &body->fid1, &mnt);
1253 GOTO(out_pop, rc = PTR_ERR(de));
1255 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1257 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1258 /* note: in case of an error, dentry_open puts dentry */
1260 GOTO(out_pop, rc = PTR_ERR(file));
1262 /* body->size is actually the offset -eeb */
1263 if ((body->size & (de->d_inode->i_sb->s_blocksize - 1)) != 0) {
1264 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1265 body->size, de->d_inode->i_sb->s_blocksize);
1266 GOTO(out_file, rc = -EFAULT);
1269 /* body->nlink is actually the #bytes to read -eeb */
1270 if (body->nlink & (de->d_inode->i_sb->s_blocksize - 1)) {
1271 CERROR("size %u is not multiple of blocksize %lu\n",
1272 body->nlink, de->d_inode->i_sb->s_blocksize);
1273 GOTO(out_file, rc = -EFAULT);
1276 repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1278 repbody->size = i_size_read(file->f_dentry->d_inode);
1279 repbody->valid = OBD_MD_FLSIZE;
1281 /* to make this asynchronous make sure that the handling function
1282 doesn't send a reply when this function completes. Instead a
1283 callback function would send the reply */
1284 /* body->size is actually the offset -eeb */
1285 rc = mds_sendpage(req, file, body->size, body->nlink);
1288 filp_close(file, 0);
1290 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1292 mds_exit_ucred(&uc, mds);
1293 req->rq_status = rc;
1297 int mds_reint(struct ptlrpc_request *req, int offset,
1298 struct lustre_handle *lockh)
1300 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1303 OBD_ALLOC(rec, sizeof(*rec));
1307 rc = mds_update_unpack(req, offset, rec);
1308 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1309 CERROR("invalid record\n");
1310 GOTO(out, req->rq_status = -EINVAL);
1313 mds_root_squash(&req->rq_export->exp_obd->u.mds, &req->rq_peer.nid,
1314 &rec->ur_uc.luc_fsuid, &rec->ur_uc.luc_fsgid,
1315 &rec->ur_uc.luc_cap, &rec->ur_uc.luc_suppgid1,
1316 &rec->ur_uc.luc_suppgid2);
1318 /* rc will be used to interrupt a for loop over multiple records */
1319 rc = mds_reint_rec(rec, offset, req, lockh);
1321 OBD_FREE(rec, sizeof(*rec));
1325 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1326 struct obd_device *obd, int *process)
1328 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1329 case MDS_CONNECT: /* This will never get here, but for completeness. */
1330 case OST_CONNECT: /* This will never get here, but for completeness. */
1331 case MDS_DISCONNECT:
1332 case OST_DISCONNECT:
1337 case MDS_SYNC: /* used in unmounting */
1342 *process = target_queue_recovery_request(req, obd);
1346 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1348 /* XXX what should we set rq_status to here? */
1349 req->rq_status = -EAGAIN;
1350 RETURN(ptlrpc_error(req));
1354 static char *reint_names[] = {
1355 [REINT_SETATTR] "setattr",
1356 [REINT_CREATE] "create",
1357 [REINT_LINK] "link",
1358 [REINT_UNLINK] "unlink",
1359 [REINT_RENAME] "rename",
1360 [REINT_OPEN] "open",
1363 static int mds_set_info_rpc(struct obd_export *exp, struct ptlrpc_request *req)
1366 int keylen, vallen, rc = 0;
1369 key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1);
1371 DEBUG_REQ(D_HA, req, "no set_info key");
1374 keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF);
1376 val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, 0);
1377 vallen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1);
1379 rc = lustre_pack_reply(req, 1, NULL, NULL);
1383 lustre_msg_set_status(req->rq_repmsg, 0);
1385 /* Accept the broken "read-only" key from 1.6.6 clients. b=17493 */
1386 if (KEY_IS(KEY_READONLY) || KEY_IS(KEY_READONLY_166COMPAT)) {
1387 if (val == NULL || vallen < sizeof(__u32)) {
1388 DEBUG_REQ(D_HA, req, "no set_info val");
1393 exp->exp_connect_flags |= OBD_CONNECT_RDONLY;
1395 exp->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1403 #ifdef HAVE_QUOTA_SUPPORT
1404 static int mds_handle_quotacheck(struct ptlrpc_request *req)
1406 struct obd_quotactl *oqctl;
1410 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1411 lustre_swab_obd_quotactl);
1415 rc = lustre_pack_reply(req, 1, NULL, NULL);
1419 req->rq_status = obd_quotacheck(req->rq_export, oqctl);
1423 static int mds_handle_quotactl(struct ptlrpc_request *req)
1425 struct obd_quotactl *oqctl, *repoqc;
1426 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repoqc) };
1429 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1430 lustre_swab_obd_quotactl);
1434 rc = lustre_pack_reply(req, 2, size, NULL);
1438 repoqc = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*repoqc));
1440 req->rq_status = obd_quotactl(req->rq_export, oqctl);
1446 static int mds_msg_check_version(struct lustre_msg *msg)
1450 switch (lustre_msg_get_opc(msg)) {
1452 case MDS_DISCONNECT:
1454 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
1456 CERROR("bad opc %u version %08x, expecting %08x\n",
1457 lustre_msg_get_opc(msg),
1458 lustre_msg_get_version(msg),
1459 LUSTRE_OBD_VERSION);
1463 case MDS_GETATTR_NAME:
1468 case MDS_DONE_WRITING:
1474 case MDS_QUOTACHECK:
1478 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
1480 CERROR("bad opc %u version %08x, expecting %08x\n",
1481 lustre_msg_get_opc(msg),
1482 lustre_msg_get_version(msg),
1483 LUSTRE_MDS_VERSION);
1487 case LDLM_BL_CALLBACK:
1488 case LDLM_CP_CALLBACK:
1489 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1491 CERROR("bad opc %u version %08x, expecting %08x\n",
1492 lustre_msg_get_opc(msg),
1493 lustre_msg_get_version(msg),
1494 LUSTRE_DLM_VERSION);
1496 case OBD_LOG_CANCEL:
1497 case LLOG_ORIGIN_HANDLE_CREATE:
1498 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1499 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1500 case LLOG_ORIGIN_HANDLE_CLOSE:
1501 case LLOG_ORIGIN_HANDLE_DESTROY:
1502 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1504 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1506 CERROR("bad opc %u version %08x, expecting %08x\n",
1507 lustre_msg_get_opc(msg),
1508 lustre_msg_get_version(msg),
1509 LUSTRE_LOG_VERSION);
1512 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
1518 int mds_handle(struct ptlrpc_request *req)
1520 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
1522 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1523 struct obd_device *obd = NULL;
1526 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1528 LASSERT(current->journal_info == NULL);
1530 rc = mds_msg_check_version(req->rq_reqmsg);
1532 CERROR("MDS drop mal-formed request\n");
1536 /* XXX identical to OST */
1537 if (lustre_msg_get_opc(req->rq_reqmsg) != MDS_CONNECT) {
1538 struct mds_export_data *med;
1541 if (req->rq_export == NULL) {
1542 CERROR("operation %d on unconnected MDS from %s\n",
1543 lustre_msg_get_opc(req->rq_reqmsg),
1544 libcfs_id2str(req->rq_peer));
1545 req->rq_status = -ENOTCONN;
1546 GOTO(out, rc = -ENOTCONN);
1549 med = &req->rq_export->exp_mds_data;
1550 obd = req->rq_export->exp_obd;
1553 /* sanity check: if the xid matches, the request must
1554 * be marked as a resent or replayed */
1555 if (req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_xid) ||
1556 req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_close_xid))
1557 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
1558 (MSG_RESENT | MSG_REPLAY))) {
1559 CERROR("rq_xid "LPU64" matches last_xid, "
1560 "expected RESENT flag\n",
1562 req->rq_status = -ENOTCONN;
1563 GOTO(out, rc = -EFAULT);
1565 /* else: note the opposite is not always true; a
1566 * RESENT req after a failover will usually not match
1567 * the last_xid, since it was likely never
1568 * committed. A REPLAYed request will almost never
1569 * match the last xid, however it could for a
1570 * committed, but still retained, open. */
1572 /* Check for aborted recovery. */
1573 spin_lock_bh(&obd->obd_processing_task_lock);
1574 recovering = obd->obd_recovering;
1575 spin_unlock_bh(&obd->obd_processing_task_lock);
1577 target_recovery_check_and_stop(obd) == 0) {
1578 rc = mds_filter_recovery_request(req, obd,
1580 if (rc || !should_process)
1585 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1587 DEBUG_REQ(D_INODE, req, "connect");
1588 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1589 rc = target_handle_connect(req, mds_handle);
1591 /* Now that we have an export, set mds. */
1592 obd = req->rq_export->exp_obd;
1593 mds = mds_req2mds(req);
1597 case MDS_DISCONNECT:
1598 DEBUG_REQ(D_INODE, req, "disconnect");
1599 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1600 rc = target_handle_disconnect(req);
1601 req->rq_status = rc; /* superfluous? */
1605 DEBUG_REQ(D_INODE, req, "getstatus");
1606 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1607 rc = mds_getstatus(req);
1611 DEBUG_REQ(D_INODE, req, "getattr");
1612 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1613 rc = mds_getattr(req, REQ_REC_OFF);
1617 DEBUG_REQ(D_INODE, req, "setxattr");
1618 OBD_FAIL_RETURN(OBD_FAIL_MDS_SETXATTR_NET, 0);
1619 rc = mds_setxattr(req);
1623 DEBUG_REQ(D_INODE, req, "getxattr");
1624 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETXATTR_NET, 0);
1625 rc = mds_getxattr(req);
1628 case MDS_GETATTR_NAME: {
1629 struct lustre_handle lockh = { 0 };
1630 DEBUG_REQ(D_INODE, req, "getattr_name");
1631 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1633 /* If this request gets a reconstructed reply, we won't be
1634 * acquiring any new locks in mds_getattr_lock, so we don't
1637 rc = mds_getattr_lock(req, REQ_REC_OFF, MDS_INODELOCK_UPDATE,
1639 /* this non-intent call (from an ioctl) is special */
1640 req->rq_status = rc;
1641 if (rc == 0 && lustre_handle_is_used(&lockh))
1642 ldlm_lock_decref(&lockh, LCK_CR);
1646 DEBUG_REQ(D_INODE, req, "statfs");
1647 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1648 rc = mds_statfs(req);
1652 DEBUG_REQ(D_INODE, req, "readpage");
1653 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1654 rc = mds_readpage(req, REQ_REC_OFF);
1656 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
1663 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF,
1667 int size[4] = { sizeof(struct ptlrpc_body),
1668 sizeof(struct mds_body),
1669 mds->mds_max_mdsize,
1670 mds->mds_max_cookiesize };
1673 /* NB only peek inside req now; mds_reint() will swab it */
1675 CERROR ("Can't inspect opcode\n");
1680 if (lustre_req_need_swab(req))
1683 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1684 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
1685 reint_names[opc] == NULL) ? reint_names[opc] :
1690 op = PTLRPC_LAST_CNTR + MDS_REINT_CREATE;
1693 op = PTLRPC_LAST_CNTR + MDS_REINT_LINK;
1696 op = PTLRPC_LAST_CNTR + MDS_REINT_OPEN;
1699 op = PTLRPC_LAST_CNTR + MDS_REINT_SETATTR;
1702 op = PTLRPC_LAST_CNTR + MDS_REINT_RENAME;
1705 op = PTLRPC_LAST_CNTR + MDS_REINT_UNLINK;
1712 if (op && req->rq_rqbd->rqbd_service->srv_stats)
1713 lprocfs_counter_incr(
1714 req->rq_rqbd->rqbd_service->srv_stats, op);
1716 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1718 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1720 else if (opc == REINT_OPEN)
1725 rc = lustre_pack_reply(req, bufcount, size, NULL);
1729 rc = mds_reint(req, REQ_REC_OFF, NULL);
1730 fail = OBD_FAIL_MDS_REINT_NET_REP;
1735 DEBUG_REQ(D_INODE, req, "close");
1736 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1737 rc = mds_close(req, REQ_REC_OFF);
1738 fail = OBD_FAIL_MDS_CLOSE_NET_REP;
1741 case MDS_DONE_WRITING:
1742 DEBUG_REQ(D_INODE, req, "done_writing");
1743 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
1744 rc = mds_done_writing(req, REQ_REC_OFF);
1748 DEBUG_REQ(D_INODE, req, "pin");
1749 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1750 rc = mds_pin(req, REQ_REC_OFF);
1754 DEBUG_REQ(D_INODE, req, "sync");
1755 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
1756 rc = mds_sync(req, REQ_REC_OFF);
1760 DEBUG_REQ(D_INODE, req, "set_info");
1761 rc = mds_set_info_rpc(req->rq_export, req);
1763 #ifdef HAVE_QUOTA_SUPPORT
1764 case MDS_QUOTACHECK:
1765 DEBUG_REQ(D_INODE, req, "quotacheck");
1766 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACHECK_NET, 0);
1767 rc = mds_handle_quotacheck(req);
1771 DEBUG_REQ(D_INODE, req, "quotactl");
1772 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACTL_NET, 0);
1773 rc = mds_handle_quotactl(req);
1777 DEBUG_REQ(D_INODE, req, "ping");
1778 rc = target_handle_ping(req);
1779 if (req->rq_export->exp_delayed)
1780 mds_update_client_epoch(req->rq_export);
1783 case OBD_LOG_CANCEL:
1784 CDEBUG(D_INODE, "log cancel\n");
1785 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1786 rc = -ENOTSUPP; /* la la la */
1790 DEBUG_REQ(D_INODE, req, "enqueue");
1791 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1792 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1793 ldlm_server_blocking_ast, NULL);
1794 fail = OBD_FAIL_LDLM_REPLY;
1797 DEBUG_REQ(D_INODE, req, "convert");
1798 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1799 rc = ldlm_handle_convert(req);
1801 case LDLM_BL_CALLBACK:
1802 case LDLM_CP_CALLBACK:
1803 DEBUG_REQ(D_INODE, req, "callback");
1804 CERROR("callbacks should not happen on MDS\n");
1806 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1808 case LLOG_ORIGIN_HANDLE_CREATE:
1809 DEBUG_REQ(D_INODE, req, "llog_init");
1810 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1811 rc = llog_origin_handle_create(req);
1813 case LLOG_ORIGIN_HANDLE_DESTROY:
1814 DEBUG_REQ(D_INODE, req, "llog_init");
1815 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1816 rc = llog_origin_handle_destroy(req);
1818 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1819 DEBUG_REQ(D_INODE, req, "llog next block");
1820 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1821 rc = llog_origin_handle_next_block(req);
1823 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1824 DEBUG_REQ(D_INODE, req, "llog prev block");
1825 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1826 rc = llog_origin_handle_prev_block(req);
1828 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1829 DEBUG_REQ(D_INODE, req, "llog read header");
1830 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1831 rc = llog_origin_handle_read_header(req);
1833 case LLOG_ORIGIN_HANDLE_CLOSE:
1834 DEBUG_REQ(D_INODE, req, "llog close");
1835 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1836 rc = llog_origin_handle_close(req);
1839 DEBUG_REQ(D_INODE, req, "llog catinfo");
1840 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1841 rc = llog_catinfo(req);
1844 req->rq_status = -ENOTSUPP;
1845 rc = ptlrpc_error(req);
1849 LASSERT(current->journal_info == NULL);
1851 /* If we're DISCONNECTing, the mds_export_data is already freed */
1852 if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != MDS_DISCONNECT) {
1853 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1855 /* I don't think last_xid is used for anyway, so I'm not sure
1856 if we need to care about last_close_xid here.*/
1857 lustre_msg_set_last_xid(req->rq_repmsg,
1858 le64_to_cpu(med->med_lcd->lcd_last_xid));
1859 target_committed_to_req(req);
1864 return target_handle_reply(req, rc, fail);
1867 /* Update the server data on disk. This stores the new mount_count and
1868 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1869 * then the server last_rcvd value may be less than that of the clients.
1870 * This will alert us that we may need to do client recovery.
1872 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1874 int mds_update_server_data(struct obd_device *obd, int force_sync)
1876 struct mds_obd *mds = &obd->u.mds;
1877 struct lr_server_data *lsd = mds->mds_server_data;
1878 struct file *filp = mds->mds_rcvd_filp;
1879 struct lvfs_run_ctxt saved;
1884 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1885 mds->mds_mount_count, mds->mds_last_transno);
1887 spin_lock(&mds->mds_transno_lock);
1888 lsd->lsd_last_transno = cpu_to_le64(mds->mds_last_transno);
1889 spin_unlock(&mds->mds_transno_lock);
1891 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1892 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off,force_sync);
1893 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1895 CERROR("error writing MDS server data: rc = %d\n", rc);
1900 static void fsoptions_to_mds_flags(struct mds_obd *mds, char *options)
1910 while (*p && *p != ',')
1914 if (len == sizeof("user_xattr") - 1 &&
1915 memcmp(options, "user_xattr", len) == 0) {
1916 mds->mds_fl_user_xattr = 1;
1917 LCONSOLE_INFO("Enabling user_xattr\n");
1918 } else if (len == sizeof("nouser_xattr") - 1 &&
1919 memcmp(options, "nouser_xattr", len) == 0) {
1920 mds->mds_fl_user_xattr = 0;
1921 LCONSOLE_INFO("Disabling user_xattr\n");
1922 } else if (len == sizeof("acl") - 1 &&
1923 memcmp(options, "acl", len) == 0) {
1924 #ifdef CONFIG_FS_POSIX_ACL
1925 mds->mds_fl_acl = 1;
1926 LCONSOLE_INFO("Enabling ACL\n");
1928 CWARN("ignoring unsupported acl mount option\n");
1930 } else if (len == sizeof("noacl") - 1 &&
1931 memcmp(options, "noacl", len) == 0) {
1932 #ifdef CONFIG_FS_POSIX_ACL
1933 mds->mds_fl_acl = 0;
1934 LCONSOLE_INFO("Disabling ACL\n");
1942 /* mount the file system (secretly). lustre_cfg parameters are:
1948 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1950 struct lprocfs_static_vars lvars;
1951 struct lustre_cfg* lcfg = buf;
1952 struct mds_obd *mds = &obd->u.mds;
1953 struct lustre_sb_info *lsi;
1954 struct lustre_mount_info *lmi;
1955 struct vfsmount *mnt;
1956 struct obd_uuid uuid;
1963 /* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv */
1965 CLASSERT(offsetof(struct obd_device, u.obt) ==
1966 offsetof(struct obd_device, u.mds.mds_obt));
1968 if (lcfg->lcfg_bufcount < 3)
1971 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
1974 lmi = server_get_mount(obd->obd_name);
1976 CERROR("Not mounted in lustre_fill_super?\n");
1980 /* We mounted in lustre_fill_super.
1981 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1982 lsi = s2lsi(lmi->lmi_sb);
1983 fsoptions_to_mds_flags(mds, lsi->lsi_ldd->ldd_mount_opts);
1984 fsoptions_to_mds_flags(mds, lsi->lsi_lmd->lmd_opts);
1986 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
1987 if (IS_ERR(obd->obd_fsops))
1988 GOTO(err_put, rc = PTR_ERR(obd->obd_fsops));
1990 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
1992 LASSERT(!lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb)));
1994 sema_init(&mds->mds_epoch_sem, 1);
1995 spin_lock_init(&mds->mds_transno_lock);
1996 mds->mds_max_mdsize = sizeof(struct lov_mds_md_v3);
1997 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
1998 mds->mds_atime_diff = MAX_ATIME_DIFF;
1999 mds->mds_evict_ost_nids = 1;
2000 /* sync permission changes */
2001 mds->mds_sync_permission = 0;
2003 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
2004 obd->obd_namespace = ldlm_namespace_new(obd, ns_name, LDLM_NAMESPACE_SERVER,
2005 LDLM_NAMESPACE_GREEDY);
2006 if (obd->obd_namespace == NULL) {
2008 GOTO(err_ops, rc = -ENOMEM);
2010 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
2012 lprocfs_mds_init_vars(&lvars);
2013 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
2014 lprocfs_alloc_obd_stats(obd, LPROC_MDS_LAST) == 0) {
2015 /* Init private stats here */
2016 mds_stats_counter_init(obd->obd_stats);
2017 #ifdef HAVE_DELAYED_RECOVERY
2018 lprocfs_obd_attach_stale_exports(obd);
2020 obd->obd_proc_exports_entry = proc_mkdir("exports",
2021 obd->obd_proc_entry);
2024 rc = mds_fs_setup(obd, mnt);
2026 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
2031 if (obd->obd_proc_exports_entry)
2032 lprocfs_add_simple(obd->obd_proc_exports_entry,
2033 "clear", lprocfs_nid_stats_clear_read,
2034 lprocfs_nid_stats_clear_write, obd, NULL);
2036 if (lcfg->lcfg_bufcount >= 4 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
2039 ll_generate_random_uuid(uuid);
2040 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
2042 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
2043 if (mds->mds_profile == NULL)
2044 GOTO(err_fs, rc = -ENOMEM);
2046 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
2047 LUSTRE_CFG_BUFLEN(lcfg, 3));
2050 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2051 "mds_ldlm_client", &obd->obd_ldlm_client);
2052 obd->obd_replayable = 1;
2054 rc = lquota_setup(mds_quota_interface_ref, obd);
2058 mds->mds_group_hash = upcall_cache_init(obd->obd_name);
2059 if (IS_ERR(mds->mds_group_hash)) {
2060 rc = PTR_ERR(mds->mds_group_hash);
2061 mds->mds_group_hash = NULL;
2062 GOTO(err_qctxt, rc);
2065 /* Don't wait for mds_postrecov trying to clear orphans */
2066 obd->obd_async_recov = 1;
2067 rc = mds_postsetup(obd);
2068 /* Bug 11557 - allow async abort_recov start
2069 FIXME can remove most of this obd_async_recov plumbing
2070 obd->obd_async_recov = 0;
2073 GOTO(err_qctxt, rc);
2075 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2076 if (uuid_ptr != NULL) {
2077 class_uuid_unparse(uuid_ptr, &uuid);
2083 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2084 if (obd->obd_recovering) {
2085 LCONSOLE_WARN("MDT %s now serving %s (%s%s%s), but will be in "
2086 "recovery for at least %d:%.02d, or until %d "
2087 "client%s reconnect. During this time new clients"
2088 " will not be allowed to connect. "
2089 "Recovery progress can be monitored by watching "
2090 "/proc/fs/lustre/mds/%s/recovery_status.\n",
2091 obd->obd_name, lustre_cfg_string(lcfg, 1),
2092 label ?: "", label ? "/" : "", str,
2093 obd->obd_recovery_timeout / 60,
2094 obd->obd_recovery_timeout % 60,
2095 obd->obd_recoverable_clients,
2096 (obd->obd_recoverable_clients == 1) ? "":"s",
2099 LCONSOLE_INFO("MDT %s now serving %s (%s%s%s) with recovery "
2100 "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
2101 label ?: "", label ? "/" : "", str,
2102 obd->obd_replayable ? "enabled" : "disabled");
2105 /* Reduce the initial timeout on an MDS because it doesn't need such
2106 * a long timeout as an OST does. Adaptive timeouts will adjust this
2107 * value appropriately. */
2108 if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
2109 ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT;
2114 lquota_cleanup(mds_quota_interface_ref, obd);
2116 /* No extra cleanup needed for llog_init_commit_thread() */
2117 mds_fs_cleanup(obd);
2118 upcall_cache_cleanup(mds->mds_group_hash);
2119 mds->mds_group_hash = NULL;
2121 lprocfs_free_obd_stats(obd);
2122 lprocfs_obd_cleanup(obd);
2123 ldlm_namespace_free(obd->obd_namespace, NULL, 0);
2124 obd->obd_namespace = NULL;
2126 fsfilt_put_ops(obd->obd_fsops);
2128 server_put_mount(obd->obd_name, mnt);
2129 obd->u.obt.obt_sb = NULL;
2133 static int mds_lov_clean(struct obd_device *obd)
2135 struct mds_obd *mds = &obd->u.mds;
2136 struct obd_device *osc = mds->mds_osc_obd;
2139 if (mds->mds_profile) {
2140 class_del_profile(mds->mds_profile);
2141 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
2142 mds->mds_profile = NULL;
2145 /* There better be a lov */
2149 RETURN(PTR_ERR(osc));
2151 obd_register_observer(osc, NULL);
2153 /* Give lov our same shutdown flags */
2154 osc->obd_force = obd->obd_force;
2155 osc->obd_fail = obd->obd_fail;
2157 /* Cleanup the lov */
2158 obd_disconnect(mds->mds_osc_exp);
2159 class_manual_cleanup(osc);
2160 mds->mds_osc_exp = NULL;
2165 static int mds_postsetup(struct obd_device *obd)
2167 struct mds_obd *mds = &obd->u.mds;
2168 struct llog_ctxt *ctxt;
2172 rc = llog_setup(obd, LLOG_CONFIG_ORIG_CTXT, obd, 0, NULL,
2177 rc = llog_setup(obd, LLOG_LOVEA_ORIG_CTXT, obd, 0, NULL,
2182 if (mds->mds_profile) {
2183 struct lustre_profile *lprof;
2184 /* The profile defines which osc and mdc to connect to, for a
2185 client. We reuse that here to figure out the name of the
2186 lov to use (and ignore lprof->lp_mdc).
2187 The profile was set in the config log with
2188 LCFG_MOUNTOPT profilenm oscnm mdcnm */
2189 lprof = class_get_profile(mds->mds_profile);
2190 if (lprof == NULL) {
2191 CERROR("No profile found: %s\n", mds->mds_profile);
2192 GOTO(err_cleanup, rc = -ENOENT);
2194 rc = mds_lov_connect(obd, lprof->lp_osc);
2196 GOTO(err_cleanup, rc);
2203 ctxt = llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT);
2207 ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
2213 int mds_postrecov(struct obd_device *obd)
2221 LASSERT(!obd->obd_recovering);
2223 /* VBR: update boot epoch after recovery */
2224 mds_update_last_epoch(obd);
2226 /* clean PENDING dir */
2227 rc = mds_cleanup_pending(obd);
2230 /* FIXME Does target_finish_recovery really need this to block? */
2231 /* Notify the LOV, which will in turn call mds_notify for each tgt */
2232 /* This means that we have to hack obd_notify to think we're obd_set_up
2233 during mds_lov_connect. */
2234 obd_notify(obd->u.mds.mds_osc_obd, NULL,
2235 obd->obd_async_recov ? OBD_NOTIFY_SYNC_NONBLOCK :
2236 OBD_NOTIFY_SYNC, NULL);
2238 /* quota recovery */
2239 lquota_recovery(mds_quota_interface_ref, obd);
2245 /* We need to be able to stop an mds_lov_synchronize */
2246 static int mds_lov_early_clean(struct obd_device *obd)
2248 struct mds_obd *mds = &obd->u.mds;
2249 struct obd_device *osc = mds->mds_osc_obd;
2251 if (!osc || (!obd->obd_force && !obd->obd_fail))
2254 CDEBUG(D_HA, "abort inflight\n");
2255 return (obd_precleanup(osc, OBD_CLEANUP_EARLY));
2258 static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2264 case OBD_CLEANUP_EARLY:
2266 case OBD_CLEANUP_EXPORTS:
2267 target_cleanup_recovery(obd);
2268 mds_lov_early_clean(obd);
2270 case OBD_CLEANUP_SELF_EXP:
2271 mds_lov_disconnect(obd);
2273 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2274 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2275 rc = obd_llog_finish(obd, 0);
2277 case OBD_CLEANUP_OBD:
2283 static int mds_cleanup(struct obd_device *obd)
2285 struct mds_obd *mds = &obd->u.mds;
2286 lvfs_sbdev_type save_dev;
2289 if (obd->u.obt.obt_sb == NULL)
2291 save_dev = lvfs_sbdev(obd->u.obt.obt_sb);
2293 if (mds->mds_osc_exp)
2294 /* lov export was disconnected by mds_lov_clean;
2295 we just need to drop our ref */
2296 class_export_put(mds->mds_osc_exp);
2298 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2299 lprocfs_free_per_client_stats(obd);
2300 lprocfs_free_obd_stats(obd);
2301 lprocfs_obd_cleanup(obd);
2303 lquota_cleanup(mds_quota_interface_ref, obd);
2305 mds_update_server_data(obd, 1);
2306 mds_fs_cleanup(obd);
2308 upcall_cache_cleanup(mds->mds_group_hash);
2309 mds->mds_group_hash = NULL;
2311 server_put_mount(obd->obd_name, mds->mds_vfsmnt);
2312 obd->u.obt.obt_sb = NULL;
2314 ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2315 obd->obd_namespace = NULL;
2317 spin_lock_bh(&obd->obd_processing_task_lock);
2318 if (obd->obd_recovering) {
2319 target_cancel_recovery_timer(obd);
2320 obd->obd_recovering = 0;
2322 spin_unlock_bh(&obd->obd_processing_task_lock);
2324 fsfilt_put_ops(obd->obd_fsops);
2326 LCONSOLE_INFO("MDT %s has stopped.\n", obd->obd_name);
2331 static void fixup_handle_for_resent_req(struct ptlrpc_request *req, int offset,
2332 struct ldlm_lock *new_lock,
2333 struct ldlm_lock **old_lock,
2334 struct lustre_handle *lockh)
2336 struct obd_export *exp = req->rq_export;
2337 struct ldlm_request *dlmreq =
2338 lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*dlmreq));
2339 struct lustre_handle remote_hdl = dlmreq->lock_handle[0];
2340 struct ldlm_lock *lock;
2342 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2345 lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
2347 if (lock != new_lock) {
2348 lockh->cookie = lock->l_handle.h_cookie;
2349 LDLM_DEBUG(lock, "restoring lock cookie");
2350 DEBUG_REQ(D_DLMTRACE, req, "restoring lock cookie "
2351 LPX64, lockh->cookie);
2353 *old_lock = LDLM_LOCK_GET(lock);
2355 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2358 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2361 /* If the xid matches, then we know this is a resent request,
2362 * and allow it. (It's probably an OPEN, for which we don't
2365 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_xid))
2369 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_close_xid))
2372 /* This remote handle isn't enqueued, so we never received or
2373 * processed this request. Clear MSG_RESENT, because it can
2374 * be handled like any normal request now. */
2376 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2378 DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
2382 int intent_disposition(struct ldlm_reply *rep, int flag)
2386 return (rep->lock_policy_res1 & flag);
2389 void intent_set_disposition(struct ldlm_reply *rep, int flag)
2393 rep->lock_policy_res1 |= flag;
2396 #define IS_CLIENT_DISCONNECT_ERROR(error) \
2397 (error == -ENOTCONN || error == -ENODEV)
2399 static int mds_intent_policy(struct ldlm_namespace *ns,
2400 struct ldlm_lock **lockp, void *req_cookie,
2401 ldlm_mode_t mode, int flags, void *data)
2403 struct ptlrpc_request *req = req_cookie;
2404 struct ldlm_lock *lock = *lockp;
2405 struct ldlm_intent *it;
2406 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
2407 struct ldlm_reply *rep;
2408 struct lustre_handle lockh = { 0 };
2409 struct ldlm_lock *new_lock = NULL;
2410 int getattr_part = MDS_INODELOCK_UPDATE;
2411 int repsize[5] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
2412 [DLM_LOCKREPLY_OFF] = sizeof(struct ldlm_reply),
2413 [DLM_REPLY_REC_OFF] = sizeof(struct mds_body),
2414 [DLM_REPLY_REC_OFF+1] = mds->mds_max_mdsize };
2415 int repbufcnt = 4, rc;
2418 LASSERT(req != NULL);
2420 if (lustre_msg_bufcount(req->rq_reqmsg) <= DLM_INTENT_IT_OFF) {
2421 /* No intent was provided */
2422 rc = lustre_pack_reply(req, 2, repsize, NULL);
2428 it = lustre_swab_reqbuf(req, DLM_INTENT_IT_OFF, sizeof(*it),
2429 lustre_swab_ldlm_intent);
2431 CERROR("Intent missing\n");
2432 RETURN(req->rq_status = -EFAULT);
2435 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
2437 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
2438 (it->opc & (IT_OPEN | IT_GETATTR | IT_LOOKUP)))
2439 /* we should never allow OBD_CONNECT_ACL if not configured */
2440 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
2441 else if (it->opc & IT_UNLINK)
2442 repsize[repbufcnt++] = mds->mds_max_cookiesize;
2444 /* if we do recovery we isn't send reply mds state is restored */
2445 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2446 repsize[DLM_REPLY_REC_OFF+1] = 0;
2447 if (it->opc & IT_UNLINK)
2448 repsize[DLM_REPLY_REC_OFF+2] = 0;
2451 rc = lustre_pack_reply(req, repbufcnt, repsize, NULL);
2453 RETURN(req->rq_status = rc);
2455 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
2456 intent_set_disposition(rep, DISP_IT_EXECD);
2458 /* execute policy */
2459 switch ((long)it->opc) {
2461 case IT_CREAT|IT_OPEN:
2462 mds_counter_incr(req->rq_export, LPROC_MDS_OPEN);
2463 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock, NULL,
2465 /* XXX swab here to assert that an mds_open reint
2466 * packet is following */
2467 rep->lock_policy_res2 = mds_reint(req, DLM_INTENT_REC_OFF,
2470 /* We abort the lock if the lookup was negative and
2471 * we did not make it to the OPEN portion */
2472 if (!intent_disposition(rep, DISP_LOOKUP_EXECD))
2473 RETURN(ELDLM_LOCK_ABORTED);
2474 if (intent_disposition(rep, DISP_LOOKUP_NEG) &&
2475 !intent_disposition(rep, DISP_OPEN_OPEN))
2478 /* If there was an error of some sort or if we are not
2479 * returning any locks */
2480 if (rep->lock_policy_res2 ||
2481 !intent_disposition(rep, DISP_OPEN_LOCK)) {
2482 /* If it is the disconnect error (ENODEV & ENOCONN)
2483 * ptlrpc layer should know this imediately, it should
2484 * be replied by rq_stats, otherwise, return it by
2487 /* if VBR failure then return error in rq_stats too */
2488 if (IS_CLIENT_DISCONNECT_ERROR(rep->lock_policy_res2) ||
2489 rep->lock_policy_res2 == -EOVERFLOW)
2490 RETURN(rep->lock_policy_res2);
2492 RETURN(ELDLM_LOCK_ABORTED);
2496 getattr_part = MDS_INODELOCK_LOOKUP;
2498 getattr_part |= MDS_INODELOCK_LOOKUP;
2499 OBD_COUNTER_INCREMENT(req->rq_export->exp_obd, getattr);
2501 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock,
2504 /* INODEBITS_INTEROP: if this lock was converted from a
2505 * plain lock (client does not support inodebits), then
2506 * child lock must be taken with both lookup and update
2507 * bits set for all operations.
2509 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS))
2510 getattr_part = MDS_INODELOCK_LOOKUP |
2511 MDS_INODELOCK_UPDATE;
2513 rep->lock_policy_res2 = mds_getattr_lock(req,DLM_INTENT_REC_OFF,
2514 getattr_part, &lockh);
2515 /* FIXME: LDLM can set req->rq_status. MDS sets
2516 policy_res{1,2} with disposition and status.
2517 - replay: returns 0 & req->status is old status
2518 - otherwise: returns req->status */
2519 if (intent_disposition(rep, DISP_LOOKUP_NEG))
2520 rep->lock_policy_res2 = 0;
2521 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
2522 rep->lock_policy_res2)
2523 RETURN(ELDLM_LOCK_ABORTED);
2524 if (req->rq_status != 0) {
2526 rep->lock_policy_res2 = req->rq_status;
2527 RETURN(ELDLM_LOCK_ABORTED);
2531 CERROR("Unhandled intent "LPD64"\n", it->opc);
2535 /* By this point, whatever function we called above must have either
2536 * filled in 'lockh', been an intent replay, or returned an error. We
2537 * want to allow replayed RPCs to not get a lock, since we would just
2538 * drop it below anyways because lock replay is done separately by the
2539 * client afterwards. For regular RPCs we want to give the new lock to
2540 * the client instead of whatever lock it was about to get. */
2541 if (new_lock == NULL)
2542 new_lock = ldlm_handle2lock(&lockh);
2543 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
2546 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
2547 it->opc, lockh.cookie);
2549 /* If we've already given this lock to a client once, then we should
2550 * have no readers or writers. Otherwise, we should have one reader
2551 * _or_ writer ref (which will be zeroed below) before returning the
2552 * lock to a client. */
2553 if (new_lock->l_export == req->rq_export) {
2554 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2556 LASSERT(new_lock->l_export == NULL);
2557 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2562 if (new_lock->l_export == req->rq_export) {
2563 /* Already gave this to the client, which means that we
2564 * reconstructed a reply. */
2565 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2567 RETURN(ELDLM_LOCK_REPLACED);
2570 /* Fixup the lock to be given to the client */
2571 lock_res_and_lock(new_lock);
2572 new_lock->l_readers = 0;
2573 new_lock->l_writers = 0;
2575 new_lock->l_export = class_export_get(req->rq_export);
2576 new_lock->l_blocking_ast = lock->l_blocking_ast;
2577 new_lock->l_completion_ast = lock->l_completion_ast;
2578 new_lock->l_flags &= ~LDLM_FL_LOCAL;
2580 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
2581 sizeof(lock->l_remote_handle));
2583 unlock_res_and_lock(new_lock);
2585 lustre_hash_add(new_lock->l_export->exp_lock_hash,
2586 &new_lock->l_remote_handle,
2587 &new_lock->l_exp_hash);
2588 LDLM_LOCK_PUT(new_lock);
2590 RETURN(ELDLM_LOCK_REPLACED);
2593 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
2595 struct mds_obd *mds = &obd->u.mds;
2596 struct lprocfs_static_vars lvars;
2597 int mds_min_threads;
2598 int mds_max_threads;
2602 lprocfs_mdt_init_vars(&lvars);
2603 lprocfs_obd_setup(obd, lvars.obd_vars);
2605 sema_init(&mds->mds_health_sem, 1);
2607 if (mds_num_threads) {
2608 /* If mds_num_threads is set, it is the min and the max. */
2609 if (mds_num_threads > MDS_THREADS_MAX)
2610 mds_num_threads = MDS_THREADS_MAX;
2611 if (mds_num_threads < MDS_THREADS_MIN)
2612 mds_num_threads = MDS_THREADS_MIN;
2613 mds_max_threads = mds_min_threads = mds_num_threads;
2615 /* Base min threads on memory and cpus */
2616 mds_min_threads = num_possible_cpus() * num_physpages >>
2617 (27 - CFS_PAGE_SHIFT);
2618 if (mds_min_threads < MDS_THREADS_MIN)
2619 mds_min_threads = MDS_THREADS_MIN;
2620 /* Largest auto threads start value */
2621 if (mds_min_threads > 32)
2622 mds_min_threads = 32;
2623 mds_max_threads = min(MDS_THREADS_MAX, mds_min_threads * 4);
2627 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2628 MDS_MAXREPSIZE, MDS_REQUEST_PORTAL,
2629 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2630 mds_handle, LUSTRE_MDS_NAME,
2631 obd->obd_proc_entry, target_print_req,
2632 mds_min_threads, mds_max_threads, "ll_mdt",
2635 if (!mds->mds_service) {
2636 CERROR("failed to start service\n");
2637 GOTO(err_lprocfs, rc = -ENOMEM);
2640 rc = ptlrpc_start_threads(obd, mds->mds_service);
2642 GOTO(err_thread, rc);
2644 mds->mds_setattr_service =
2645 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2646 MDS_MAXREPSIZE, MDS_SETATTR_PORTAL,
2647 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2648 mds_handle, "mds_setattr",
2649 obd->obd_proc_entry, target_print_req,
2650 mds_min_threads, mds_max_threads,
2651 "ll_mdt_attr", NULL);
2652 if (!mds->mds_setattr_service) {
2653 CERROR("failed to start getattr service\n");
2654 GOTO(err_thread, rc = -ENOMEM);
2657 rc = ptlrpc_start_threads(obd, mds->mds_setattr_service);
2659 GOTO(err_thread2, rc);
2661 mds->mds_readpage_service =
2662 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2663 MDS_MAXREPSIZE, MDS_READPAGE_PORTAL,
2664 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2665 mds_handle, "mds_readpage",
2666 obd->obd_proc_entry, target_print_req,
2667 MDS_THREADS_MIN_READPAGE, mds_max_threads,
2668 "ll_mdt_rdpg", NULL);
2669 if (!mds->mds_readpage_service) {
2670 CERROR("failed to start readpage service\n");
2671 GOTO(err_thread2, rc = -ENOMEM);
2674 rc = ptlrpc_start_threads(obd, mds->mds_readpage_service);
2677 GOTO(err_thread3, rc);
2679 ping_evictor_start();
2684 ptlrpc_unregister_service(mds->mds_readpage_service);
2685 mds->mds_readpage_service = NULL;
2687 ptlrpc_unregister_service(mds->mds_setattr_service);
2688 mds->mds_setattr_service = NULL;
2690 ptlrpc_unregister_service(mds->mds_service);
2691 mds->mds_service = NULL;
2693 lprocfs_obd_cleanup(obd);
2697 static int mdt_cleanup(struct obd_device *obd)
2699 struct mds_obd *mds = &obd->u.mds;
2702 ping_evictor_stop();
2704 down(&mds->mds_health_sem);
2705 ptlrpc_unregister_service(mds->mds_readpage_service);
2706 ptlrpc_unregister_service(mds->mds_setattr_service);
2707 ptlrpc_unregister_service(mds->mds_service);
2708 mds->mds_readpage_service = NULL;
2709 mds->mds_setattr_service = NULL;
2710 mds->mds_service = NULL;
2711 up(&mds->mds_health_sem);
2713 lprocfs_obd_cleanup(obd);
2718 static int mdt_health_check(struct obd_device *obd)
2720 struct mds_obd *mds = &obd->u.mds;
2723 down(&mds->mds_health_sem);
2724 rc |= ptlrpc_service_health_check(mds->mds_readpage_service);
2725 rc |= ptlrpc_service_health_check(mds->mds_setattr_service);
2726 rc |= ptlrpc_service_health_check(mds->mds_service);
2727 up(&mds->mds_health_sem);
2730 * health_check to return 0 on healthy
2731 * and 1 on unhealthy.
2739 static struct dentry *mds_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
2742 struct obd_device *obd = data;
2745 fid.generation = gen;
2746 return mds_fid2dentry(&obd->u.mds, &fid, NULL);
2749 static int mds_health_check(struct obd_device *obd)
2751 struct obd_device_target *odt = &obd->u.obt;
2752 #ifdef USE_HEALTH_CHECK_WRITE
2753 struct mds_obd *mds = &obd->u.mds;
2757 if (odt->obt_sb->s_flags & MS_RDONLY)
2760 #ifdef USE_HEALTH_CHECK_WRITE
2761 LASSERT(mds->mds_health_check_filp != NULL);
2762 rc |= !!lvfs_check_io_health(obd, mds->mds_health_check_filp);
2768 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
2770 struct lustre_cfg *lcfg = buf;
2773 switch(lcfg->lcfg_command) {
2775 struct lprocfs_static_vars lvars;
2776 lprocfs_mds_init_vars(&lvars);
2778 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, lcfg, obd);
2788 struct lvfs_callback_ops mds_lvfs_ops = {
2789 l_fid2dentry: mds_lvfs_fid2dentry,
2792 /* use obd ops to offer management infrastructure */
2793 static struct obd_ops mds_obd_ops = {
2794 .o_owner = THIS_MODULE,
2795 .o_connect = mds_connect,
2796 .o_reconnect = mds_reconnect,
2797 .o_init_export = mds_init_export,
2798 .o_destroy_export = mds_destroy_export,
2799 .o_disconnect = mds_disconnect,
2800 .o_setup = mds_setup,
2801 .o_precleanup = mds_precleanup,
2802 .o_cleanup = mds_cleanup,
2803 .o_postrecov = mds_postrecov,
2804 .o_statfs = mds_obd_statfs,
2805 .o_iocontrol = mds_iocontrol,
2806 .o_create = mds_obd_create,
2807 .o_destroy = mds_obd_destroy,
2808 .o_llog_init = mds_llog_init,
2809 .o_llog_finish = mds_llog_finish,
2810 .o_notify = mds_notify,
2811 .o_health_check = mds_health_check,
2812 .o_process_config = mds_process_config,
2815 static struct obd_ops mdt_obd_ops = {
2816 .o_owner = THIS_MODULE,
2817 .o_setup = mdt_setup,
2818 .o_cleanup = mdt_cleanup,
2819 .o_health_check = mdt_health_check,
2822 quota_interface_t *mds_quota_interface_ref;
2823 extern quota_interface_t mds_quota_interface;
2825 static int __init mds_init(void)
2828 struct lprocfs_static_vars lvars;
2830 request_module("lquota");
2831 mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
2832 rc = lquota_init(mds_quota_interface_ref);
2834 if (mds_quota_interface_ref)
2835 PORTAL_SYMBOL_PUT(mds_quota_interface);
2838 init_obd_quota_ops(mds_quota_interface_ref, &mds_obd_ops);
2840 lprocfs_mds_init_vars(&lvars);
2841 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2842 lprocfs_mdt_init_vars(&lvars);
2843 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2848 static void /*__exit*/ mds_exit(void)
2850 lquota_exit(mds_quota_interface_ref);
2851 if (mds_quota_interface_ref)
2852 PORTAL_SYMBOL_PUT(mds_quota_interface);
2854 class_unregister_type(LUSTRE_MDS_NAME);
2855 class_unregister_type(LUSTRE_MDT_NAME);
2858 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2859 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2860 MODULE_LICENSE("GPL");
2862 module_init(mds_init);
2863 module_exit(mds_exit);