1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mds/handler.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
41 * Author: Mike Shaver <shaver@clusterfs.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_MDS
49 #include <lustre_mds.h>
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/random.h>
54 #include <linux/jbd.h>
55 # include <linux/smp_lock.h>
56 # include <linux/buffer_head.h>
57 # include <linux/workqueue.h>
58 # include <linux/mount.h>
60 #include <obd_class.h>
61 #include <lustre_dlm.h>
63 #include <lustre_fsfilt.h>
64 #include <lprocfs_status.h>
65 #include <lustre_quota.h>
66 #include <lustre_disk.h>
67 #include <lustre_param.h>
69 #include "mds_internal.h"
72 CFS_MODULE_PARM(mds_num_threads, "i", int, 0444,
73 "number of MDS service threads to start");
75 static int mds_intent_policy(struct ldlm_namespace *ns,
76 struct ldlm_lock **lockp, void *req_cookie,
77 ldlm_mode_t mode, int flags, void *data);
78 static int mds_postsetup(struct obd_device *obd);
79 static int mds_cleanup(struct obd_device *obd);
81 /* Assumes caller has already pushed into the kernel filesystem context */
82 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
83 loff_t offset, int count)
85 struct ptlrpc_bulk_desc *desc;
86 struct l_wait_info lwi;
89 int rc = 0, npages, i, tmpcount, tmpsize = 0;
92 LASSERT((offset & ~CFS_PAGE_MASK) == 0); /* I'm dubious about this */
94 npages = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
95 OBD_ALLOC(pages, sizeof(*pages) * npages);
97 GOTO(out, rc = -ENOMEM);
99 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
102 GOTO(out_free, rc = -ENOMEM);
104 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
105 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
107 OBD_PAGE_ALLOC(pages[i], CFS_ALLOC_STD);
108 if (pages[i] == NULL)
109 GOTO(cleanup_buf, rc = -ENOMEM);
111 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
114 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
115 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
116 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
117 tmpsize, offset, file->f_dentry->d_inode->i_ino,
118 i_size_read(file->f_dentry->d_inode));
120 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
121 kmap(pages[i]), tmpsize, &offset);
125 GOTO(cleanup_buf, rc = -EIO);
128 LASSERT(desc->bd_nob == count);
130 rc = ptlrpc_start_bulk_transfer(desc);
132 GOTO(cleanup_buf, rc);
134 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
135 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
136 OBD_FAIL_MDS_SENDPAGE, rc);
137 GOTO(abort_bulk, rc);
140 timeout = (int)req->rq_deadline - (int)cfs_time_current_sec();
142 CERROR("Req deadline already passed %lu (now: %lu)\n",
143 req->rq_deadline, cfs_time_current_sec());
145 lwi = LWI_TIMEOUT(cfs_time_seconds(max(timeout, 1)), NULL, NULL);
146 rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc), &lwi);
147 LASSERT (rc == 0 || rc == -ETIMEDOUT);
150 if (desc->bd_success &&
151 desc->bd_nob_transferred == count)
152 GOTO(cleanup_buf, rc);
154 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
157 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
158 (rc == -ETIMEDOUT) ? "timeout" : "network error",
159 desc->bd_nob_transferred, count,
160 req->rq_export->exp_client_uuid.uuid,
161 req->rq_export->exp_connection->c_remote_uuid.uuid);
163 class_fail_export(req->rq_export);
167 ptlrpc_abort_bulk(desc);
169 for (i = 0; i < npages; i++)
171 OBD_PAGE_FREE(pages[i]);
173 ptlrpc_free_bulk(desc);
175 OBD_FREE(pages, sizeof(*pages) * npages);
180 /* only valid locked dentries or errors should be returned */
181 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
182 struct vfsmount **mnt, int lock_mode,
183 struct lustre_handle *lockh,
184 char *name, int namelen, __u64 lockpart)
186 struct mds_obd *mds = &obd->u.mds;
187 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
188 struct ldlm_res_id res_id = { .name = {0} };
189 int flags = LDLM_FL_ATOMIC_CB, rc;
190 ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
196 res_id.name[0] = de->d_inode->i_ino;
197 res_id.name[1] = de->d_inode->i_generation;
198 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id,
199 LDLM_IBITS, &policy, lock_mode, &flags,
200 ldlm_blocking_ast, ldlm_completion_ast,
201 NULL, NULL, 0, NULL, lockh);
202 if (rc != ELDLM_OK) {
204 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
210 /* Look up an entry by inode number. */
211 /* this function ONLY returns valid dget'd dentries with an initialized inode
213 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
214 struct vfsmount **mnt)
216 struct obd_device *obd = container_of(mds, struct obd_device, u.mds);
218 unsigned long ino = fid->id;
219 __u32 generation = fid->generation;
221 struct dentry *result;
224 RETURN(ERR_PTR(-ESTALE));
226 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
228 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino/gen %lu/%u, sb %p\n",
229 ino, generation, mds->mds_obt.obt_sb);
231 /* under ext3 this is neither supposed to return bad inodes
233 result = mds_lookup(obd, fid_name, mds->mds_fid_de, strlen(fid_name));
237 inode = result->d_inode;
239 RETURN(ERR_PTR(-ENOENT));
241 if (inode->i_nlink == 0) {
242 if (inode->i_mode == 0 &&
243 LTIME_S(inode->i_ctime) == 0 ) {
244 LCONSOLE_WARN("Found inode with zero nlink, mode and "
245 "ctime -- this may indicate disk"
246 "corruption (device %s, inode %lu, link:"
247 " %lu, count: %d)\n", obd->obd_name, inode->i_ino,
248 (unsigned long)inode->i_nlink,
249 atomic_read(&inode->i_count));
252 RETURN(ERR_PTR(-ENOENT));
255 if (generation && inode->i_generation != generation) {
256 /* we didn't find the right inode.. */
257 CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
258 "count: %d, generation %u/%u\n", inode->i_ino,
259 (unsigned long)inode->i_nlink,
260 atomic_read(&inode->i_count), inode->i_generation,
263 RETURN(ERR_PTR(-ENOENT));
267 *mnt = mds->mds_vfsmnt;
274 static int mds_connect_internal(struct obd_export *exp,
275 struct obd_connect_data *data)
277 struct obd_device *obd = exp->exp_obd;
279 data->ocd_connect_flags &= MDS_CONNECT_SUPPORTED;
280 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
282 /* If no known bits (which should not happen, probably,
283 as everybody should support LOOKUP and UPDATE bits at least)
284 revert to compat mode with plain locks. */
285 if (!data->ocd_ibits_known &&
286 data->ocd_connect_flags & OBD_CONNECT_IBITS)
287 data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
289 if (!obd->u.mds.mds_fl_acl)
290 data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
292 if (!obd->u.mds.mds_fl_user_xattr)
293 data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
295 exp->exp_connect_flags = data->ocd_connect_flags;
296 data->ocd_version = LUSTRE_VERSION_CODE;
297 exp->exp_mds_data.med_ibits_known = data->ocd_ibits_known;
300 if (obd->u.mds.mds_fl_acl &&
301 ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
302 CWARN("%s: MDS requires ACL support but client does not\n",
309 static int mds_reconnect(struct obd_export *exp, struct obd_device *obd,
310 struct obd_uuid *cluuid,
311 struct obd_connect_data *data,
317 if (exp == NULL || obd == NULL || cluuid == NULL)
320 rc = mds_connect_internal(exp, data);
322 mds_export_stats_init(obd, exp, localdata);
327 /* Establish a connection to the MDS.
329 * This will set up an export structure for the client to hold state data
330 * about that client, like open files, the last operation number it did
331 * on the server, etc.
333 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
334 struct obd_uuid *cluuid, struct obd_connect_data *data,
337 struct obd_export *exp;
338 struct mds_export_data *med;
339 struct lsd_client_data *lcd = NULL;
343 if (!conn || !obd || !cluuid)
346 /* Check for aborted recovery. */
347 target_recovery_check_and_stop(obd);
349 /* XXX There is a small race between checking the list and adding a
350 * new connection for the same UUID, but the real threat (list
351 * corruption when multiple different clients connect) is solved.
353 * There is a second race between adding the export to the list,
354 * and filling in the client data below. Hence skipping the case
355 * of NULL lcd above. We should already be controlling multiple
356 * connects at the client, and we can't hold the spinlock over
357 * memory allocations without risk of deadlocking.
359 rc = class_connect(conn, obd, cluuid);
362 exp = class_conn2export(conn);
364 med = &exp->exp_mds_data;
366 rc = mds_connect_internal(exp, data);
372 GOTO(out, rc = -ENOMEM);
374 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
377 rc = mds_client_add(obd, exp, -1, localdata);
386 class_disconnect(exp);
388 class_export_put(exp);
394 int mds_init_export(struct obd_export *exp)
396 struct mds_export_data *med = &exp->exp_mds_data;
399 INIT_LIST_HEAD(&med->med_open_head);
400 spin_lock_init(&med->med_open_lock);
402 spin_lock(&exp->exp_lock);
403 exp->exp_connecting = 1;
404 spin_unlock(&exp->exp_lock);
406 RETURN(ldlm_init_export(exp));
409 static int mds_destroy_export(struct obd_export *exp)
413 target_destroy_export(exp);
414 ldlm_destroy_export(exp);
416 LASSERT(list_empty(&exp->exp_mds_data.med_open_head));
417 mds_client_free(exp);
422 static int mds_cleanup_mfd(struct obd_export *exp)
424 struct mds_export_data *med;
425 struct obd_device *obd = exp->exp_obd;
426 struct mds_obd *mds = &obd->u.mds;
427 struct lvfs_run_ctxt saved;
428 struct lov_mds_md *lmm;
429 __u32 lmm_sz, cookie_sz;
430 struct llog_cookie *logcookies;
431 struct list_head closing_list;
432 struct mds_file_data *mfd, *n;
436 med = &exp->exp_mds_data;
438 spin_lock(&med->med_open_lock);
439 if (list_empty(&med->med_open_head)) {
440 spin_unlock(&med->med_open_lock);
444 CFS_INIT_LIST_HEAD(&closing_list);
445 while (!list_empty(&med->med_open_head)) {
446 struct list_head *tmp = med->med_open_head.next;
447 struct mds_file_data *mfd =
448 list_entry(tmp, struct mds_file_data, mfd_list);
450 /* Remove mfd handle so it can't be found again.
451 * We are consuming the mfd_list reference here. */
452 mds_mfd_unlink(mfd, 0);
453 list_add_tail(&mfd->mfd_list, &closing_list);
455 spin_unlock(&med->med_open_lock);
457 lmm_sz = mds->mds_max_mdsize;
458 OBD_ALLOC(lmm, lmm_sz);
460 CWARN("%s: allocation failure during cleanup; can not force "
461 "close file handles on this service.\n", obd->obd_name);
462 GOTO(out, rc = -ENOMEM);
465 cookie_sz = mds->mds_max_cookiesize;
466 OBD_ALLOC(logcookies, cookie_sz);
467 if (logcookies == NULL) {
468 CWARN("%s: allocation failure during cleanup; can not force "
469 "close file handles on this service.\n", obd->obd_name);
470 OBD_FREE(lmm, lmm_sz);
471 GOTO(out, rc = -ENOMEM);
474 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
475 /* Close any open files (which may also cause orphan unlinking). */
476 list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
477 int lmm_size = lmm_sz;
478 umode_t mode = mfd->mfd_dentry->d_inode->i_mode;
481 /* If you change this message, be sure to update
482 * replay_single:test_46 */
483 CDEBUG(D_INODE|D_IOCTL, "%s: force closing file handle for "
484 "%.*s (ino %lu)\n", obd->obd_name,
485 mfd->mfd_dentry->d_name.len,mfd->mfd_dentry->d_name.name,
486 mfd->mfd_dentry->d_inode->i_ino);
488 rc = mds_get_md(obd, mfd->mfd_dentry->d_inode, lmm,
491 CWARN("mds_get_md failure, rc=%d\n", rc);
493 valid |= OBD_MD_FLEASIZE;
495 /* child orphan sem protects orphan_dec_test and
496 * is_orphan race, mds_mfd_close drops it */
497 MDS_DOWN_WRITE_ORPHAN_SEM(mfd->mfd_dentry->d_inode);
499 list_del_init(&mfd->mfd_list);
500 rc = mds_mfd_close(NULL, REQ_REC_OFF, obd, mfd,
501 !(exp->exp_flags & OBD_OPT_FAILOVER),
502 lmm, lmm_size, logcookies,
503 mds->mds_max_cookiesize,
507 CDEBUG(D_INODE|D_IOCTL, "Error closing file: %d\n", rc);
509 if (valid & OBD_MD_FLCOOKIE) {
510 rc = mds_osc_destroy_orphan(obd, mode, lmm,
511 lmm_size, logcookies, 1);
513 CDEBUG(D_INODE, "%s: destroy of orphan failed,"
514 " rc = %d\n", obd->obd_name, rc);
517 valid &= ~OBD_MD_FLCOOKIE;
521 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
522 OBD_FREE(logcookies, cookie_sz);
523 OBD_FREE(lmm, lmm_sz);
528 static int mds_disconnect(struct obd_export *exp)
534 class_export_get(exp);
536 /* Disconnect early so that clients can't keep using export */
537 rc = class_disconnect(exp);
538 if (exp->exp_obd->obd_namespace != NULL)
539 ldlm_cancel_locks_for_export(exp);
541 /* release nid stat refererence */
542 lprocfs_exp_cleanup(exp);
544 /* complete all outstanding replies */
545 spin_lock(&exp->exp_lock);
546 while (!list_empty(&exp->exp_outstanding_replies)) {
547 struct ptlrpc_reply_state *rs =
548 list_entry(exp->exp_outstanding_replies.next,
549 struct ptlrpc_reply_state, rs_exp_list);
550 struct ptlrpc_service *svc = rs->rs_service;
552 spin_lock(&svc->srv_lock);
553 list_del_init(&rs->rs_exp_list);
554 ptlrpc_schedule_difficult_reply(rs);
555 spin_unlock(&svc->srv_lock);
557 spin_unlock(&exp->exp_lock);
558 rc = mds_cleanup_mfd(exp);
560 class_export_put(exp);
564 static int mds_getstatus(struct ptlrpc_request *req)
566 struct mds_obd *mds = mds_req2mds(req);
567 struct mds_body *body;
568 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
571 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_PACK, req->rq_status = -ENOMEM);
572 rc = lustre_pack_reply(req, 2, size, NULL);
574 RETURN(req->rq_status = rc);
576 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
577 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
579 /* the last_committed and last_xid fields are filled in for all
580 * replies already - no need to do so here also.
585 /* get the LOV EA from @inode and store it into @md. It can be at most
586 * @size bytes, and @size is updated with the actual EA size.
587 * The EA size is also returned on success, and -ve errno on failure.
588 * If there is no EA then 0 is returned. */
589 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
590 int *size, int lock, int flags,
597 LOCK_INODE_MUTEX(inode);
598 rc = fsfilt_get_md(obd, inode, md, *size, "lov");
600 if (rc == 0 && flags == MDS_GETATTR)
601 rc = mds_get_default_md(obd, md, &lmm_size);
604 CERROR("Error %d reading eadata for ino %lu\n",
608 rc = mds_convert_lov_ea(obd, inode, md, lmm_size,
621 UNLOCK_INODE_MUTEX(inode);
627 /* Call with lock=1 if you want mds_pack_md to take the i_mutex.
628 * Call with lock=0 if the caller has already taken the i_mutex. */
629 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
630 struct mds_body *body, struct inode *inode, int lock, int flags,
633 struct mds_obd *mds = &obd->u.mds;
639 lmm = lustre_msg_buf(msg, offset, 0);
641 /* Some problem with getting eadata when I sized the reply
643 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
647 /* if this replay request we should be silencely exist without fill md*/
648 lmm_size = lustre_msg_buflen(msg, offset);
652 /* I don't really like this, but it is a sanity check on the client
653 * MD request. However, if the client doesn't know how much space
654 * to reserve for the MD, it shouldn't be bad to have too much space.
656 if (lmm_size > mds->mds_max_mdsize) {
657 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
658 inode->i_ino, lmm_size, mds->mds_max_mdsize);
662 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock, flags,
665 if (S_ISDIR(inode->i_mode))
666 body->valid |= OBD_MD_FLDIREA;
668 body->valid |= OBD_MD_FLEASIZE;
669 body->eadatasize = lmm_size;
676 #ifdef CONFIG_FS_POSIX_ACL
678 int mds_pack_posix_acl(struct inode *inode, struct lustre_msg *repmsg,
679 struct mds_body *repbody, int repoff)
681 struct dentry de = { .d_inode = inode };
685 LASSERT(repbody->aclsize == 0);
686 LASSERT(lustre_msg_bufcount(repmsg) > repoff);
688 buflen = lustre_msg_buflen(repmsg, repoff);
692 if (!inode->i_op || !inode->i_op->getxattr)
695 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
696 lustre_msg_buf(repmsg, repoff, buflen),
699 repbody->aclsize = rc;
700 else if (rc != -ENODATA) {
701 CERROR("buflen %d, get acl: %d\n", buflen, rc);
706 repbody->valid |= OBD_MD_FLACL;
710 #define mds_pack_posix_acl(inode, repmsg, repbody, repoff) 0
713 int mds_pack_acl(struct mds_export_data *med, struct inode *inode,
714 struct lustre_msg *repmsg, struct mds_body *repbody,
717 return mds_pack_posix_acl(inode, repmsg, repbody, repoff);
720 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
721 struct ptlrpc_request *req,
722 struct mds_body *reqbody, int reply_off)
724 struct mds_body *body;
725 struct inode *inode = dentry->d_inode;
733 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
734 LASSERT(body != NULL); /* caller prepped reply */
736 body->flags = reqbody->flags; /* copy MDS_BFLAG_EXT_FLAGS if present */
737 mds_pack_inode2body(body, inode);
740 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
741 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
742 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR &&
743 ((S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))))
746 rc = mds_pack_md(obd, req->rq_repmsg, reply_off, body,
748 req->rq_export->exp_connect_flags);
750 /* If we have LOV EA data, the OST holds size, atime, mtime */
751 if (!(body->valid & OBD_MD_FLEASIZE) &&
752 !(body->valid & OBD_MD_FLDIREA))
753 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
754 OBD_MD_FLATIME | OBD_MD_FLMTIME);
756 lustre_shrink_reply(req, reply_off, body->eadatasize, 0);
757 if (body->eadatasize)
759 } else if (S_ISLNK(inode->i_mode) &&
760 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
761 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off, 0);
764 LASSERT (symname != NULL); /* caller prepped reply */
765 len = lustre_msg_buflen(req->rq_repmsg, reply_off);
767 rc = inode->i_op->readlink(dentry, symname, len);
769 CERROR("readlink failed: %d\n", rc);
770 } else if (rc != len - 1) {
771 CERROR ("Unexpected readlink rc %d: expecting %d\n",
775 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
776 body->valid |= OBD_MD_LINKNAME;
777 body->eadatasize = rc + 1;
778 symname[rc] = 0; /* NULL terminate */
782 } else if (reqbody->valid == OBD_MD_FLFLAGS &&
783 reqbody->flags & MDS_BFLAG_EXT_FLAGS) {
786 /* We only return the full set of flags on ioctl, otherwise we
787 * get enough flags from the inode in mds_pack_inode2body(). */
788 rc = fsfilt_iocontrol(obd, dentry, FSFILT_IOC_GETFLAGS,
791 body->flags = flags | MDS_BFLAG_EXT_FLAGS;
794 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
795 struct mds_obd *mds = mds_req2mds(req);
796 body->max_cookiesize = mds->mds_max_cookiesize;
797 body->max_mdsize = mds->mds_max_mdsize;
798 body->valid |= OBD_MD_FLMODEASIZE;
804 #ifdef CONFIG_FS_POSIX_ACL
805 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
806 (reqbody->valid & OBD_MD_FLACL)) {
807 rc = mds_pack_acl(&req->rq_export->exp_mds_data,
808 inode, req->rq_repmsg,
811 lustre_shrink_reply(req, reply_off, body->aclsize, 0);
820 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
823 struct mds_obd *mds = mds_req2mds(req);
824 struct mds_body *body;
825 int rc, bufcount = 2;
826 int size[4] = { sizeof(struct ptlrpc_body), sizeof(*body) };
829 LASSERT(offset == REQ_REC_OFF); /* non-intent */
831 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
832 LASSERT(body != NULL); /* checked by caller */
833 LASSERT(lustre_req_swabbed(req, offset)); /* swabbed by caller */
835 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
836 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
837 LOCK_INODE_MUTEX(inode);
838 rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0,
840 UNLOCK_INODE_MUTEX(inode);
841 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
843 if ((rc == 0) && (lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) &&
844 ((S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))))
845 rc = sizeof(struct lov_mds_md_v3);
847 if (rc != -ENODATA) {
848 CERROR("error getting inode %lu MD: rc = %d\n",
853 } else if (rc > mds->mds_max_mdsize) {
855 CERROR("MD size %d larger than maximum possible %u\n",
856 rc, mds->mds_max_mdsize);
861 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
862 if (i_size_read(inode) + 1 != body->eadatasize)
863 CERROR("symlink size: %Lu, reply space: %d\n",
864 i_size_read(inode) + 1, body->eadatasize);
865 size[bufcount] = min_t(int, i_size_read(inode) + 1,
868 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
869 i_size_read(inode) + 1, body->eadatasize);
872 #ifdef CONFIG_FS_POSIX_ACL
873 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
874 (body->valid & OBD_MD_FLACL)) {
875 struct dentry de = { .d_inode = inode };
878 if (inode->i_op && inode->i_op->getxattr) {
879 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
882 if (rc != -ENODATA) {
883 CERROR("got acl size: %d\n", rc);
893 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
894 CERROR("failed MDS_GETATTR_PACK test\n");
895 req->rq_status = -ENOMEM;
899 rc = lustre_pack_reply(req, bufcount, size, NULL);
908 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
909 int child_part, struct lustre_handle *child_lockh)
911 struct obd_device *obd = req->rq_export->exp_obd;
912 struct mds_obd *mds = &obd->u.mds;
913 struct ldlm_reply *rep = NULL;
914 struct lvfs_run_ctxt saved;
915 struct mds_body *body;
916 struct dentry *dparent = NULL, *dchild = NULL;
917 struct lvfs_ucred uc = {NULL,};
918 struct lustre_handle parent_lockh;
920 int rc = 0, cleanup_phase = 0, resent_req = 0;
924 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
926 /* Swab now, before anyone looks inside the request */
927 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
928 lustre_swab_mds_body);
930 CERROR("Can't swab mds_body\n");
934 lustre_set_req_swabbed(req, offset + 1);
935 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
937 CERROR("Can't unpack name\n");
940 namesize = lustre_msg_buflen(req->rq_reqmsg, offset + 1);
941 /* namesize less than 2 means we have empty name, probably came from
942 revalidate by cfid, so no point in having name to be set */
946 rc = mds_init_ucred(&uc, req, offset);
950 LASSERT(offset == REQ_REC_OFF || offset == DLM_INTENT_REC_OFF);
951 /* if requests were at offset 2, the getattr reply goes back at 1 */
952 if (offset == DLM_INTENT_REC_OFF) {
953 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
955 offset = DLM_REPLY_REC_OFF;
958 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
959 cleanup_phase = 1; /* kernel context */
960 ldlm_reply_set_disposition(rep, DISP_LOOKUP_EXECD);
962 /* FIXME: handle raw lookup */
964 if (body->valid == OBD_MD_FLID) {
965 struct mds_body *mds_reply;
966 int size = sizeof(*mds_reply);
968 // The user requested ONLY the inode number, so do a raw lookup
969 rc = lustre_pack_reply(req, 1, &size, NULL);
971 CERROR("out of memory\n");
975 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
977 mds_reply = lustre_msg_buf(req->rq_repmsg, offset,
979 mds_reply->fid1.id = inum;
980 mds_reply->valid = OBD_MD_FLID;
985 /* child_lockh() is only set in fixup_handle_for_resent_req()
986 * if MSG_RESENT is set */
987 if (lustre_handle_is_used(child_lockh)) {
988 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
992 if (resent_req == 0) {
994 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
995 rc = mds_get_parent_child_locked(obd, &obd->u.mds,
999 MDS_INODELOCK_UPDATE,
1001 child_lockh, &dchild,
1002 LCK_CR, child_part);
1004 /* For revalidate by fid we always take UPDATE lock */
1005 dchild = mds_fid2locked_dentry(obd, &body->fid2, NULL,
1006 LCK_CR, child_lockh,
1007 NULL, 0, child_part);
1010 rc = PTR_ERR(dchild);
1015 struct ldlm_lock *granted_lock;
1016 struct ll_fid child_fid;
1017 struct ldlm_resource *res;
1018 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
1019 granted_lock = ldlm_handle2lock(child_lockh);
1020 /* lock was granted in fixup_handle_for_resent_req() if
1021 * MSG_RESENT is set */
1022 LASSERTF(granted_lock != NULL, LPU64"/%u lockh "LPX64"\n",
1023 body->fid1.id, body->fid1.generation,
1024 child_lockh->cookie);
1027 res = granted_lock->l_resource;
1028 child_fid.id = res->lr_name.name[0];
1029 child_fid.generation = res->lr_name.name[1];
1030 dchild = mds_fid2dentry(&obd->u.mds, &child_fid, NULL);
1031 if (IS_ERR(dchild)) {
1032 rc = PTR_ERR(dchild);
1033 LCONSOLE_WARN("Child "LPU64"/%u lookup error %d.",
1034 child_fid.id, child_fid.generation, rc);
1037 LDLM_LOCK_PUT(granted_lock);
1040 cleanup_phase = 2; /* dchild, dparent, locks */
1042 if (dchild->d_inode == NULL) {
1043 ldlm_reply_set_disposition(rep, DISP_LOOKUP_NEG);
1044 /* in the intent case, the policy clears this error:
1045 the disposition is enough */
1046 GOTO(cleanup, rc = -ENOENT);
1048 ldlm_reply_set_disposition(rep, DISP_LOOKUP_POS);
1051 if (req->rq_repmsg == NULL) {
1052 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
1054 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1059 rc = mds_getattr_internal(obd, dchild, req, body, offset);
1060 GOTO(cleanup, rc); /* returns the lock to the client */
1063 switch (cleanup_phase) {
1065 if (resent_req == 0) {
1066 if (rc && dchild->d_inode)
1067 ldlm_lock_decref(child_lockh, LCK_CR);
1069 ldlm_lock_decref(&parent_lockh, LCK_CR);
1075 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1077 mds_exit_ucred(&uc, mds);
1078 if (!req->rq_packed_final) {
1079 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1082 req->rq_status = rc;
1088 static int mds_getattr(struct ptlrpc_request *req, int offset)
1090 struct mds_obd *mds = mds_req2mds(req);
1091 struct obd_device *obd = req->rq_export->exp_obd;
1092 struct lvfs_run_ctxt saved;
1094 struct mds_body *body;
1095 struct lvfs_ucred uc = { NULL, };
1099 OBD_COUNTER_INCREMENT(obd, getattr);
1101 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1102 lustre_swab_mds_body);
1106 rc = mds_init_ucred(&uc, req, offset);
1108 GOTO(out_ucred, rc);
1110 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1111 de = mds_fid2dentry(mds, &body->fid1, NULL);
1113 rc = req->rq_status = PTR_ERR(de);
1117 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1119 CERROR("mds_getattr_pack_msg: %d\n", rc);
1123 req->rq_status = mds_getattr_internal(obd, de, req, body,REPLY_REC_OFF);
1128 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1130 if (!req->rq_packed_final) {
1131 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1134 req->rq_status = rc;
1136 mds_exit_ucred(&uc, mds);
1140 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1141 __u64 max_age, __u32 flags)
1145 spin_lock(&obd->obd_osfs_lock);
1146 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
1148 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1149 spin_unlock(&obd->obd_osfs_lock);
1154 static int mds_statfs(struct ptlrpc_request *req)
1156 struct obd_device *obd = req->rq_export->exp_obd;
1157 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
1158 int rc, size[2] = { sizeof(struct ptlrpc_body),
1159 sizeof(struct obd_statfs) };
1162 /* This will trigger a watchdog timeout */
1163 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1164 (MDS_SERVICE_WATCHDOG_FACTOR *
1165 at_get(&svc->srv_at_estimate)) + 1);
1166 OBD_COUNTER_INCREMENT(obd, statfs);
1168 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
1169 GOTO(out, rc = -ENOMEM);
1170 rc = lustre_pack_reply(req, 2, size, NULL);
1174 /* We call this so that we can cache a bit - 1 jiffie worth */
1175 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1176 size[REPLY_REC_OFF]),
1177 cfs_time_current_64() - HZ, 0);
1179 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1185 req->rq_status = rc;
1189 static int mds_sync(struct ptlrpc_request *req, int offset)
1191 struct obd_device *obd = req->rq_export->exp_obd;
1192 struct mds_obd *mds = &obd->u.mds;
1193 struct mds_body *body;
1194 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
1197 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1198 lustre_swab_mds_body);
1200 GOTO(out, rc = -EFAULT);
1202 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1203 GOTO(out, rc = -ENOMEM);
1204 rc = lustre_pack_reply(req, 2, size, NULL);
1208 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
1209 if (rc == 0 && body->fid1.id != 0) {
1212 de = mds_fid2dentry(mds, &body->fid1, NULL);
1214 GOTO(out, rc = PTR_ERR(de));
1216 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1218 mds_pack_inode2body(body, de->d_inode);
1224 req->rq_status = rc;
1228 /* mds_readpage does not take a DLM lock on the inode, because the client must
1229 * already have a PR lock.
1231 * If we were to take another one here, a deadlock will result, if another
1232 * thread is already waiting for a PW lock. */
1233 static int mds_readpage(struct ptlrpc_request *req, int offset)
1235 struct obd_device *obd = req->rq_export->exp_obd;
1236 struct mds_obd *mds = &obd->u.mds;
1237 struct vfsmount *mnt;
1240 struct mds_body *body, *repbody;
1241 struct lvfs_run_ctxt saved;
1242 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) };
1243 struct lvfs_ucred uc = {NULL,};
1246 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_PACK, -ENOMEM);
1247 rc = lustre_pack_reply(req, 2, size, NULL);
1251 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1252 lustre_swab_mds_body);
1254 GOTO (out, rc = -EFAULT);
1256 rc = mds_init_ucred(&uc, req, offset);
1260 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1261 de = mds_fid2dentry(&obd->u.mds, &body->fid1, &mnt);
1263 GOTO(out_pop, rc = PTR_ERR(de));
1265 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1267 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1268 /* note: in case of an error, dentry_open puts dentry */
1270 GOTO(out_pop, rc = PTR_ERR(file));
1272 /* body->size is actually the offset -eeb */
1273 if ((body->size & (de->d_inode->i_sb->s_blocksize - 1)) != 0) {
1274 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1275 body->size, de->d_inode->i_sb->s_blocksize);
1276 GOTO(out_file, rc = -EFAULT);
1279 /* body->nlink is actually the #bytes to read -eeb */
1280 if (body->nlink & (de->d_inode->i_sb->s_blocksize - 1)) {
1281 CERROR("size %u is not multiple of blocksize %lu\n",
1282 body->nlink, de->d_inode->i_sb->s_blocksize);
1283 GOTO(out_file, rc = -EFAULT);
1286 repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1288 repbody->size = i_size_read(file->f_dentry->d_inode);
1289 repbody->valid = OBD_MD_FLSIZE;
1291 /* to make this asynchronous make sure that the handling function
1292 doesn't send a reply when this function completes. Instead a
1293 callback function would send the reply */
1294 /* body->size is actually the offset -eeb */
1295 rc = mds_sendpage(req, file, body->size, body->nlink);
1298 filp_close(file, 0);
1300 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1302 mds_exit_ucred(&uc, mds);
1303 req->rq_status = rc;
1307 int mds_reint(struct ptlrpc_request *req, int offset,
1308 struct lustre_handle *lockh)
1310 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1313 OBD_ALLOC(rec, sizeof(*rec));
1317 rc = mds_update_unpack(req, offset, rec);
1318 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1319 CERROR("invalid record\n");
1320 GOTO(out, req->rq_status = -EINVAL);
1323 mds_root_squash(&req->rq_export->exp_obd->u.mds, &req->rq_peer.nid,
1324 &rec->ur_uc.luc_fsuid, &rec->ur_uc.luc_fsgid,
1325 &rec->ur_uc.luc_cap, &rec->ur_uc.luc_suppgid1,
1326 &rec->ur_uc.luc_suppgid2);
1328 /* rc will be used to interrupt a for loop over multiple records */
1329 rc = mds_reint_rec(rec, offset, req, lockh);
1331 OBD_FREE(rec, sizeof(*rec));
1335 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1336 struct obd_device *obd, int *process)
1338 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1339 case MDS_CONNECT: /* This will never get here, but for completeness. */
1340 case OST_CONNECT: /* This will never get here, but for completeness. */
1341 case MDS_DISCONNECT:
1342 case OST_DISCONNECT:
1347 case MDS_SYNC: /* used in unmounting */
1352 *process = target_queue_recovery_request(req, obd);
1356 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1358 /* XXX what should we set rq_status to here? */
1359 req->rq_status = -EAGAIN;
1360 RETURN(ptlrpc_error(req));
1364 static char *reint_names[] = {
1365 [REINT_SETATTR] "setattr",
1366 [REINT_CREATE] "create",
1367 [REINT_LINK] "link",
1368 [REINT_UNLINK] "unlink",
1369 [REINT_RENAME] "rename",
1370 [REINT_OPEN] "open",
1373 static int mds_set_info_rpc(struct obd_export *exp, struct ptlrpc_request *req)
1376 int keylen, vallen, rc = 0;
1379 key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1);
1381 DEBUG_REQ(D_HA, req, "no set_info key");
1384 keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF);
1386 val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, 0);
1387 vallen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1);
1389 rc = lustre_pack_reply(req, 1, NULL, NULL);
1393 lustre_msg_set_status(req->rq_repmsg, 0);
1395 /* Accept the broken "read-only" key from 1.6.6 clients. b=17493 */
1396 if (KEY_IS(KEY_READONLY) || KEY_IS(KEY_READONLY_166COMPAT)) {
1397 if (val == NULL || vallen < sizeof(__u32)) {
1398 DEBUG_REQ(D_HA, req, "no set_info val");
1403 exp->exp_connect_flags |= OBD_CONNECT_RDONLY;
1405 exp->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1413 #ifdef HAVE_QUOTA_SUPPORT
1414 static int mds_handle_quotacheck(struct ptlrpc_request *req)
1416 struct obd_quotactl *oqctl;
1420 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1421 lustre_swab_obd_quotactl);
1425 rc = lustre_pack_reply(req, 1, NULL, NULL);
1429 req->rq_status = obd_quotacheck(req->rq_export, oqctl);
1433 static int mds_handle_quotactl(struct ptlrpc_request *req)
1435 struct obd_quotactl *oqctl, *repoqc;
1436 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repoqc) };
1439 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1440 lustre_swab_obd_quotactl);
1444 rc = lustre_pack_reply(req, 2, size, NULL);
1448 repoqc = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*repoqc));
1450 req->rq_status = obd_quotactl(req->rq_export, oqctl);
1456 static int mds_msg_check_version(struct lustre_msg *msg)
1460 switch (lustre_msg_get_opc(msg)) {
1462 case MDS_DISCONNECT:
1464 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
1466 CERROR("bad opc %u version %08x, expecting %08x\n",
1467 lustre_msg_get_opc(msg),
1468 lustre_msg_get_version(msg),
1469 LUSTRE_OBD_VERSION);
1473 case MDS_GETATTR_NAME:
1478 case MDS_DONE_WRITING:
1484 case MDS_QUOTACHECK:
1488 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
1490 CERROR("bad opc %u version %08x, expecting %08x\n",
1491 lustre_msg_get_opc(msg),
1492 lustre_msg_get_version(msg),
1493 LUSTRE_MDS_VERSION);
1497 case LDLM_BL_CALLBACK:
1498 case LDLM_CP_CALLBACK:
1499 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1501 CERROR("bad opc %u version %08x, expecting %08x\n",
1502 lustre_msg_get_opc(msg),
1503 lustre_msg_get_version(msg),
1504 LUSTRE_DLM_VERSION);
1506 case OBD_LOG_CANCEL:
1507 case LLOG_ORIGIN_HANDLE_CREATE:
1508 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1509 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1510 case LLOG_ORIGIN_HANDLE_CLOSE:
1511 case LLOG_ORIGIN_HANDLE_DESTROY:
1512 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1514 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1516 CERROR("bad opc %u version %08x, expecting %08x\n",
1517 lustre_msg_get_opc(msg),
1518 lustre_msg_get_version(msg),
1519 LUSTRE_LOG_VERSION);
1522 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
1528 int mds_handle(struct ptlrpc_request *req)
1530 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
1532 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1533 struct obd_device *obd = NULL;
1536 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1538 LASSERT(current->journal_info == NULL);
1540 rc = mds_msg_check_version(req->rq_reqmsg);
1542 CERROR("MDS drop mal-formed request\n");
1546 /* XXX identical to OST */
1547 if (lustre_msg_get_opc(req->rq_reqmsg) != MDS_CONNECT) {
1548 struct mds_export_data *med;
1551 if (req->rq_export == NULL) {
1552 CERROR("operation %d on unconnected MDS from %s\n",
1553 lustre_msg_get_opc(req->rq_reqmsg),
1554 libcfs_id2str(req->rq_peer));
1555 req->rq_status = -ENOTCONN;
1556 GOTO(out, rc = -ENOTCONN);
1559 med = &req->rq_export->exp_mds_data;
1560 obd = req->rq_export->exp_obd;
1563 /* sanity check: if the xid matches, the request must
1564 * be marked as a resent or replayed */
1565 if (req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_xid) ||
1566 req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_close_xid))
1567 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
1568 (MSG_RESENT | MSG_REPLAY))) {
1569 CERROR("rq_xid "LPU64" matches last_xid, "
1570 "expected RESENT flag\n",
1572 req->rq_status = -ENOTCONN;
1573 GOTO(out, rc = -EFAULT);
1575 /* else: note the opposite is not always true; a
1576 * RESENT req after a failover will usually not match
1577 * the last_xid, since it was likely never
1578 * committed. A REPLAYed request will almost never
1579 * match the last xid, however it could for a
1580 * committed, but still retained, open. */
1582 /* Check for aborted recovery. */
1583 spin_lock_bh(&obd->obd_processing_task_lock);
1584 recovering = obd->obd_recovering;
1585 spin_unlock_bh(&obd->obd_processing_task_lock);
1587 target_recovery_check_and_stop(obd) == 0) {
1588 rc = mds_filter_recovery_request(req, obd,
1590 if (rc || !should_process)
1595 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1597 DEBUG_REQ(D_INODE, req, "connect");
1598 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1599 rc = target_handle_connect(req, mds_handle);
1601 /* Now that we have an export, set obd. */
1602 obd = req->rq_export->exp_obd;
1606 case MDS_DISCONNECT:
1607 DEBUG_REQ(D_INODE, req, "disconnect");
1608 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1609 rc = target_handle_disconnect(req);
1610 req->rq_status = rc; /* superfluous? */
1614 DEBUG_REQ(D_INODE, req, "getstatus");
1615 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1616 rc = mds_getstatus(req);
1620 DEBUG_REQ(D_INODE, req, "getattr");
1621 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1622 rc = mds_getattr(req, REQ_REC_OFF);
1626 DEBUG_REQ(D_INODE, req, "setxattr");
1627 OBD_FAIL_RETURN(OBD_FAIL_MDS_SETXATTR_NET, 0);
1628 rc = mds_setxattr(req);
1632 DEBUG_REQ(D_INODE, req, "getxattr");
1633 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETXATTR_NET, 0);
1634 rc = mds_getxattr(req);
1637 case MDS_GETATTR_NAME: {
1638 struct lustre_handle lockh = { 0 };
1639 DEBUG_REQ(D_INODE, req, "getattr_name");
1640 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1642 /* If this request gets a reconstructed reply, we won't be
1643 * acquiring any new locks in mds_getattr_lock, so we don't
1646 rc = mds_getattr_lock(req, REQ_REC_OFF, MDS_INODELOCK_UPDATE,
1648 /* this non-intent call (from an ioctl) is special */
1649 req->rq_status = rc;
1650 if (rc == 0 && lustre_handle_is_used(&lockh))
1651 ldlm_lock_decref(&lockh, LCK_CR);
1655 DEBUG_REQ(D_INODE, req, "statfs");
1656 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1657 rc = mds_statfs(req);
1661 DEBUG_REQ(D_INODE, req, "readpage");
1662 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1663 rc = mds_readpage(req, REQ_REC_OFF);
1665 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
1672 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF,
1676 int size[4] = { sizeof(struct ptlrpc_body),
1677 sizeof(struct mds_body),
1678 mds->mds_max_mdsize,
1679 mds->mds_max_cookiesize };
1682 /* NB only peek inside req now; mds_reint() will swab it */
1684 CERROR ("Can't inspect opcode\n");
1689 if (lustre_req_need_swab(req))
1692 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1693 (opc < REINT_MAX) ? reint_names[opc] :
1698 op = PTLRPC_LAST_CNTR + MDS_REINT_CREATE;
1701 op = PTLRPC_LAST_CNTR + MDS_REINT_LINK;
1704 op = PTLRPC_LAST_CNTR + MDS_REINT_OPEN;
1707 op = PTLRPC_LAST_CNTR + MDS_REINT_SETATTR;
1710 op = PTLRPC_LAST_CNTR + MDS_REINT_RENAME;
1713 op = PTLRPC_LAST_CNTR + MDS_REINT_UNLINK;
1720 if (op && req->rq_rqbd->rqbd_service->srv_stats)
1721 lprocfs_counter_incr(
1722 req->rq_rqbd->rqbd_service->srv_stats, op);
1724 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1726 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1728 else if (opc == REINT_OPEN)
1733 /* if we do recovery we isn't send reply mds state is restored */
1734 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
1735 size[DLM_REPLY_REC_OFF] = 0;
1736 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1737 size[DLM_REPLY_REC_OFF + 1] = 0;
1740 rc = lustre_pack_reply(req, bufcount, size, NULL);
1744 rc = mds_reint(req, REQ_REC_OFF, NULL);
1745 fail = OBD_FAIL_MDS_REINT_NET_REP;
1750 DEBUG_REQ(D_INODE, req, "close");
1751 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1752 rc = mds_close(req, REQ_REC_OFF);
1753 fail = OBD_FAIL_MDS_CLOSE_NET_REP;
1756 case MDS_DONE_WRITING:
1757 DEBUG_REQ(D_INODE, req, "done_writing");
1758 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
1759 rc = mds_done_writing(req, REQ_REC_OFF);
1763 DEBUG_REQ(D_INODE, req, "pin");
1764 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1765 rc = mds_pin(req, REQ_REC_OFF);
1769 DEBUG_REQ(D_INODE, req, "sync");
1770 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
1771 rc = mds_sync(req, REQ_REC_OFF);
1775 DEBUG_REQ(D_INODE, req, "set_info");
1776 rc = mds_set_info_rpc(req->rq_export, req);
1778 #ifdef HAVE_QUOTA_SUPPORT
1779 case MDS_QUOTACHECK:
1780 DEBUG_REQ(D_INODE, req, "quotacheck");
1781 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACHECK_NET, 0);
1782 rc = mds_handle_quotacheck(req);
1786 DEBUG_REQ(D_INODE, req, "quotactl");
1787 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACTL_NET, 0);
1788 rc = mds_handle_quotactl(req);
1792 DEBUG_REQ(D_INODE, req, "ping");
1793 rc = target_handle_ping(req);
1794 if (req->rq_export->exp_delayed)
1795 mds_update_client_epoch(req->rq_export);
1798 case OBD_LOG_CANCEL:
1799 CDEBUG(D_INODE, "log cancel\n");
1800 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1801 rc = -ENOTSUPP; /* la la la */
1805 DEBUG_REQ(D_INODE, req, "enqueue");
1806 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1807 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1808 ldlm_server_blocking_ast, NULL);
1809 fail = OBD_FAIL_LDLM_REPLY;
1812 DEBUG_REQ(D_INODE, req, "convert");
1813 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1814 rc = ldlm_handle_convert(req);
1816 case LDLM_BL_CALLBACK:
1817 case LDLM_CP_CALLBACK:
1818 DEBUG_REQ(D_INODE, req, "callback");
1819 CERROR("callbacks should not happen on MDS\n");
1821 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1823 case LLOG_ORIGIN_HANDLE_CREATE:
1824 DEBUG_REQ(D_INODE, req, "llog_init");
1825 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1826 rc = llog_origin_handle_create(req);
1828 case LLOG_ORIGIN_HANDLE_DESTROY:
1829 DEBUG_REQ(D_INODE, req, "llog_init");
1830 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1831 rc = llog_origin_handle_destroy(req);
1833 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1834 DEBUG_REQ(D_INODE, req, "llog next block");
1835 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1836 rc = llog_origin_handle_next_block(req);
1838 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1839 DEBUG_REQ(D_INODE, req, "llog prev block");
1840 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1841 rc = llog_origin_handle_prev_block(req);
1843 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1844 DEBUG_REQ(D_INODE, req, "llog read header");
1845 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1846 rc = llog_origin_handle_read_header(req);
1848 case LLOG_ORIGIN_HANDLE_CLOSE:
1849 DEBUG_REQ(D_INODE, req, "llog close");
1850 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1851 rc = llog_origin_handle_close(req);
1854 DEBUG_REQ(D_INODE, req, "llog catinfo");
1855 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1856 rc = llog_catinfo(req);
1859 req->rq_status = -ENOTSUPP;
1860 rc = ptlrpc_error(req);
1864 LASSERT(current->journal_info == NULL);
1866 /* If we're DISCONNECTing, the mds_export_data is already freed */
1867 if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != MDS_DISCONNECT) {
1868 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1870 /* I don't think last_xid is used for anyway, so I'm not sure
1871 if we need to care about last_close_xid here.*/
1872 lustre_msg_set_last_xid(req->rq_repmsg,
1873 le64_to_cpu(med->med_lcd->lcd_last_xid));
1874 target_committed_to_req(req);
1879 return target_handle_reply(req, rc, fail);
1882 /* Update the server data on disk. This stores the new mount_count and
1883 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1884 * then the server last_rcvd value may be less than that of the clients.
1885 * This will alert us that we may need to do client recovery.
1887 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1889 int mds_update_server_data(struct obd_device *obd, int force_sync)
1891 struct mds_obd *mds = &obd->u.mds;
1892 struct lr_server_data *lsd = mds->mds_server_data;
1893 struct file *filp = mds->mds_rcvd_filp;
1894 struct lvfs_run_ctxt saved;
1899 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1900 mds->mds_mount_count, mds->mds_last_transno);
1902 spin_lock(&mds->mds_transno_lock);
1903 lsd->lsd_last_transno = cpu_to_le64(mds->mds_last_transno);
1904 spin_unlock(&mds->mds_transno_lock);
1906 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1907 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off,force_sync);
1908 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1910 CERROR("error writing MDS server data: rc = %d\n", rc);
1915 static void fsoptions_to_mds_flags(struct mds_obd *mds, char *options)
1925 while (*p && *p != ',')
1929 if (len == sizeof("user_xattr") - 1 &&
1930 memcmp(options, "user_xattr", len) == 0) {
1931 mds->mds_fl_user_xattr = 1;
1932 LCONSOLE_INFO("Enabling user_xattr\n");
1933 } else if (len == sizeof("nouser_xattr") - 1 &&
1934 memcmp(options, "nouser_xattr", len) == 0) {
1935 mds->mds_fl_user_xattr = 0;
1936 LCONSOLE_INFO("Disabling user_xattr\n");
1937 } else if (len == sizeof("acl") - 1 &&
1938 memcmp(options, "acl", len) == 0) {
1939 #ifdef CONFIG_FS_POSIX_ACL
1940 mds->mds_fl_acl = 1;
1941 LCONSOLE_INFO("Enabling ACL\n");
1943 CWARN("ignoring unsupported acl mount option\n");
1945 } else if (len == sizeof("noacl") - 1 &&
1946 memcmp(options, "noacl", len) == 0) {
1947 #ifdef CONFIG_FS_POSIX_ACL
1948 mds->mds_fl_acl = 0;
1949 LCONSOLE_INFO("Disabling ACL\n");
1957 /* mount the file system (secretly). lustre_cfg parameters are:
1963 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1965 struct lprocfs_static_vars lvars;
1966 struct lustre_cfg* lcfg = buf;
1967 struct mds_obd *mds = &obd->u.mds;
1968 struct lustre_sb_info *lsi;
1969 struct lustre_mount_info *lmi;
1970 struct vfsmount *mnt;
1971 struct obd_uuid uuid;
1978 /* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv */
1980 CLASSERT(offsetof(struct obd_device, u.obt) ==
1981 offsetof(struct obd_device, u.mds.mds_obt));
1983 if (lcfg->lcfg_bufcount < 3)
1986 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
1989 lmi = server_get_mount(obd->obd_name);
1991 CERROR("Not mounted in lustre_fill_super?\n");
1995 /* We mounted in lustre_fill_super.
1996 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1997 lsi = s2lsi(lmi->lmi_sb);
1998 fsoptions_to_mds_flags(mds, lsi->lsi_ldd->ldd_mount_opts);
1999 fsoptions_to_mds_flags(mds, lsi->lsi_lmd->lmd_opts);
2001 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
2002 if (IS_ERR(obd->obd_fsops))
2003 GOTO(err_put, rc = PTR_ERR(obd->obd_fsops));
2005 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
2007 if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
2008 CERROR("%s: Underlying device is marked as read-only. "
2009 "Setup failed\n", obd->obd_name);
2010 GOTO(err_ops, rc = -EROFS);
2013 sema_init(&mds->mds_epoch_sem, 1);
2014 spin_lock_init(&mds->mds_transno_lock);
2015 mds->mds_max_mdsize = sizeof(struct lov_mds_md_v3);
2016 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
2017 mds->mds_atime_diff = MAX_ATIME_DIFF;
2018 mds->mds_evict_ost_nids = 1;
2019 /* sync permission changes */
2020 mds->mds_sync_permission = 0;
2022 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
2023 obd->obd_namespace = ldlm_namespace_new(obd, ns_name, LDLM_NAMESPACE_SERVER,
2024 LDLM_NAMESPACE_GREEDY);
2025 if (obd->obd_namespace == NULL) {
2027 GOTO(err_ops, rc = -ENOMEM);
2029 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
2031 lprocfs_mds_init_vars(&lvars);
2032 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
2033 lprocfs_alloc_obd_stats(obd, LPROC_MDS_LAST) == 0) {
2034 /* Init private stats here */
2035 mds_stats_counter_init(obd->obd_stats);
2036 #ifdef HAVE_DELAYED_RECOVERY
2037 lprocfs_obd_attach_stale_exports(obd);
2039 obd->obd_proc_exports_entry = proc_mkdir("exports",
2040 obd->obd_proc_entry);
2043 rc = mds_fs_setup(obd, mnt);
2045 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
2050 if (obd->obd_proc_exports_entry)
2051 lprocfs_add_simple(obd->obd_proc_exports_entry,
2052 "clear", lprocfs_nid_stats_clear_read,
2053 lprocfs_nid_stats_clear_write, obd, NULL);
2055 if (lcfg->lcfg_bufcount >= 4 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
2058 ll_generate_random_uuid(uuid);
2059 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
2061 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
2062 if (mds->mds_profile == NULL)
2063 GOTO(err_fs, rc = -ENOMEM);
2065 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
2066 LUSTRE_CFG_BUFLEN(lcfg, 3));
2069 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2070 "mds_ldlm_client", &obd->obd_ldlm_client);
2071 obd->obd_replayable = 1;
2073 rc = lquota_setup(mds_quota_interface_ref, obd);
2077 mds->mds_group_hash = upcall_cache_init(obd->obd_name);
2078 if (IS_ERR(mds->mds_group_hash)) {
2079 rc = PTR_ERR(mds->mds_group_hash);
2080 mds->mds_group_hash = NULL;
2081 GOTO(err_qctxt, rc);
2084 /* Don't wait for mds_postrecov trying to clear orphans */
2085 obd->obd_async_recov = 1;
2086 rc = mds_postsetup(obd);
2087 /* Bug 11557 - allow async abort_recov start
2088 FIXME can remove most of this obd_async_recov plumbing
2089 obd->obd_async_recov = 0;
2092 GOTO(err_qctxt, rc);
2094 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2095 if (uuid_ptr != NULL) {
2096 class_uuid_unparse(uuid_ptr, &uuid);
2102 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2103 if (obd->obd_recovering) {
2104 LCONSOLE_WARN("MDT %s now serving %s (%s%s%s), but will be in "
2105 "recovery for at least %d:%.02d, or until %d "
2106 "client%s reconnect%s. \n",
2107 obd->obd_name, lustre_cfg_string(lcfg, 1),
2108 label ?: "", label ? "/" : "", str,
2109 obd->obd_recovery_timeout / 60,
2110 obd->obd_recovery_timeout % 60,
2111 obd->obd_recoverable_clients,
2112 (obd->obd_recoverable_clients == 1) ? "":"s",
2113 (obd->obd_recoverable_clients == 1) ? "s":"");
2115 LCONSOLE_INFO("MDT %s now serving %s (%s%s%s) with recovery "
2116 "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
2117 label ?: "", label ? "/" : "", str,
2118 obd->obd_replayable ? "enabled" : "disabled");
2121 /* Reduce the initial timeout on an MDS because it doesn't need such
2122 * a long timeout as an OST does. Adaptive timeouts will adjust this
2123 * value appropriately. */
2124 if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
2125 ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT;
2130 lquota_cleanup(mds_quota_interface_ref, obd);
2132 /* No extra cleanup needed for llog_init_commit_thread() */
2133 mds_fs_cleanup(obd);
2134 upcall_cache_cleanup(mds->mds_group_hash);
2135 mds->mds_group_hash = NULL;
2136 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2138 lprocfs_free_per_client_stats(obd);
2139 lprocfs_free_obd_stats(obd);
2140 lprocfs_obd_cleanup(obd);
2141 ldlm_namespace_free(obd->obd_namespace, NULL, 0);
2142 obd->obd_namespace = NULL;
2144 fsfilt_put_ops(obd->obd_fsops);
2146 server_put_mount(obd->obd_name, mnt);
2147 obd->u.obt.obt_sb = NULL;
2151 static int mds_lov_clean(struct obd_device *obd)
2153 struct mds_obd *mds = &obd->u.mds;
2154 struct obd_device *osc = mds->mds_osc_obd;
2157 if (mds->mds_profile) {
2158 class_del_profile(mds->mds_profile);
2159 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
2160 mds->mds_profile = NULL;
2163 /* There better be a lov */
2167 RETURN(PTR_ERR(osc));
2169 obd_register_observer(osc, NULL);
2171 /* Give lov our same shutdown flags */
2172 osc->obd_force = obd->obd_force;
2173 osc->obd_fail = obd->obd_fail;
2175 /* Cleanup the lov */
2176 obd_disconnect(mds->mds_osc_exp);
2177 class_manual_cleanup(osc);
2178 mds->mds_osc_exp = NULL;
2183 static int mds_postsetup(struct obd_device *obd)
2185 struct mds_obd *mds = &obd->u.mds;
2186 struct llog_ctxt *ctxt;
2190 rc = llog_setup(obd, LLOG_CONFIG_ORIG_CTXT, obd, 0, NULL,
2195 rc = llog_setup(obd, LLOG_LOVEA_ORIG_CTXT, obd, 0, NULL,
2200 if (mds->mds_profile) {
2201 struct lustre_profile *lprof;
2202 /* The profile defines which osc and mdc to connect to, for a
2203 client. We reuse that here to figure out the name of the
2204 lov to use (and ignore lprof->lp_mdc).
2205 The profile was set in the config log with
2206 LCFG_MOUNTOPT profilenm oscnm mdcnm */
2207 lprof = class_get_profile(mds->mds_profile);
2208 if (lprof == NULL) {
2209 CERROR("No profile found: %s\n", mds->mds_profile);
2210 GOTO(err_cleanup, rc = -ENOENT);
2212 rc = mds_lov_connect(obd, lprof->lp_osc);
2214 GOTO(err_cleanup, rc);
2221 ctxt = llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT);
2225 ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
2231 int mds_postrecov(struct obd_device *obd)
2239 LASSERT(!obd->obd_recovering);
2241 /* VBR: update boot epoch after recovery */
2242 mds_update_last_epoch(obd);
2244 /* clean PENDING dir */
2245 rc = mds_cleanup_pending(obd);
2248 /* FIXME Does target_finish_recovery really need this to block? */
2249 /* Notify the LOV, which will in turn call mds_notify for each tgt */
2250 /* This means that we have to hack obd_notify to think we're obd_set_up
2251 during mds_lov_connect. */
2252 obd_notify(obd->u.mds.mds_osc_obd, NULL,
2253 obd->obd_async_recov ? OBD_NOTIFY_SYNC_NONBLOCK :
2254 OBD_NOTIFY_SYNC, NULL);
2256 /* quota recovery */
2257 if (likely(obd->obd_stopping == 0))
2258 lquota_recovery(mds_quota_interface_ref, obd);
2264 /* We need to be able to stop an mds_lov_synchronize */
2265 static int mds_lov_early_clean(struct obd_device *obd)
2267 struct mds_obd *mds = &obd->u.mds;
2268 struct obd_device *osc = mds->mds_osc_obd;
2270 if (!osc || (!obd->obd_force && !obd->obd_fail))
2273 CDEBUG(D_HA, "abort inflight\n");
2274 return (obd_precleanup(osc, OBD_CLEANUP_EARLY));
2277 static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2283 case OBD_CLEANUP_EARLY:
2285 case OBD_CLEANUP_EXPORTS:
2286 target_cleanup_recovery(obd);
2287 mds_lov_early_clean(obd);
2289 case OBD_CLEANUP_SELF_EXP:
2290 mds_lov_disconnect(obd);
2292 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2293 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2294 rc = obd_llog_finish(obd, 0);
2296 case OBD_CLEANUP_OBD:
2302 static int mds_cleanup(struct obd_device *obd)
2304 struct mds_obd *mds = &obd->u.mds;
2305 lvfs_sbdev_type save_dev;
2308 if (obd->u.obt.obt_sb == NULL)
2310 save_dev = lvfs_sbdev(obd->u.obt.obt_sb);
2312 if (mds->mds_osc_exp)
2313 /* lov export was disconnected by mds_lov_clean;
2314 we just need to drop our ref */
2315 class_export_put(mds->mds_osc_exp);
2317 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2318 lprocfs_free_per_client_stats(obd);
2319 lprocfs_free_obd_stats(obd);
2320 lprocfs_obd_cleanup(obd);
2322 lquota_cleanup(mds_quota_interface_ref, obd);
2324 mds_update_server_data(obd, 1);
2325 mds_fs_cleanup(obd);
2327 upcall_cache_cleanup(mds->mds_group_hash);
2328 mds->mds_group_hash = NULL;
2330 server_put_mount(obd->obd_name, mds->mds_vfsmnt);
2331 obd->u.obt.obt_sb = NULL;
2333 ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2334 obd->obd_namespace = NULL;
2336 spin_lock_bh(&obd->obd_processing_task_lock);
2337 if (obd->obd_recovering) {
2338 target_cancel_recovery_timer(obd);
2339 obd->obd_recovering = 0;
2341 spin_unlock_bh(&obd->obd_processing_task_lock);
2343 fsfilt_put_ops(obd->obd_fsops);
2345 LCONSOLE_INFO("MDT %s has stopped.\n", obd->obd_name);
2350 static void fixup_handle_for_resent_req(struct ptlrpc_request *req, int offset,
2351 struct ldlm_lock *new_lock,
2352 struct ldlm_lock **old_lock,
2353 struct lustre_handle *lockh)
2355 struct obd_export *exp = req->rq_export;
2356 struct ldlm_request *dlmreq =
2357 lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*dlmreq));
2358 struct lustre_handle remote_hdl = dlmreq->lock_handle[0];
2359 struct ldlm_lock *lock;
2361 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2364 lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
2366 if (lock != new_lock) {
2367 lockh->cookie = lock->l_handle.h_cookie;
2368 LDLM_DEBUG(lock, "restoring lock cookie");
2369 DEBUG_REQ(D_DLMTRACE, req, "restoring lock cookie "
2370 LPX64, lockh->cookie);
2372 *old_lock = LDLM_LOCK_GET(lock);
2374 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2377 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2380 /* If the xid matches, then we know this is a resent request,
2381 * and allow it. (It's probably an OPEN, for which we don't
2384 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_xid))
2388 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_close_xid))
2391 /* This remote handle isn't enqueued, so we never received or
2392 * processed this request. Clear MSG_RESENT, because it can
2393 * be handled like any normal request now. */
2395 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2397 DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
2401 #define IS_CLIENT_DISCONNECT_ERROR(error) \
2402 (error == -ENOTCONN || error == -ENODEV)
2404 static int mds_intent_policy(struct ldlm_namespace *ns,
2405 struct ldlm_lock **lockp, void *req_cookie,
2406 ldlm_mode_t mode, int flags, void *data)
2408 struct ptlrpc_request *req = req_cookie;
2409 struct ldlm_lock *lock = *lockp;
2410 struct ldlm_intent *it;
2411 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
2412 struct ldlm_reply *rep;
2413 struct lustre_handle lockh = { 0 };
2414 struct ldlm_lock *new_lock = NULL;
2415 int getattr_part = MDS_INODELOCK_UPDATE;
2416 int repsize[5] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
2417 [DLM_LOCKREPLY_OFF] = sizeof(struct ldlm_reply),
2418 [DLM_REPLY_REC_OFF] = sizeof(struct mds_body),
2419 [DLM_REPLY_REC_OFF+1] = mds->mds_max_mdsize };
2420 int repbufcnt = 4, rc;
2423 LASSERT(req != NULL);
2425 if (lustre_msg_bufcount(req->rq_reqmsg) <= DLM_INTENT_IT_OFF) {
2426 /* No intent was provided */
2427 rc = lustre_pack_reply(req, 2, repsize, NULL);
2433 it = lustre_swab_reqbuf(req, DLM_INTENT_IT_OFF, sizeof(*it),
2434 lustre_swab_ldlm_intent);
2436 CERROR("Intent missing\n");
2437 RETURN(req->rq_status = -EFAULT);
2440 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
2442 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
2443 (it->opc & (IT_OPEN | IT_GETATTR | IT_LOOKUP)))
2444 /* we should never allow OBD_CONNECT_ACL if not configured */
2445 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
2446 else if (it->opc & IT_UNLINK)
2447 repsize[repbufcnt++] = mds->mds_max_cookiesize;
2449 /* if we do recovery we isn't send reply mds state is restored */
2450 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2451 repsize[DLM_REPLY_REC_OFF+1] = 0;
2452 if (it->opc & IT_UNLINK)
2453 repsize[DLM_REPLY_REC_OFF+2] = 0;
2456 rc = lustre_pack_reply(req, repbufcnt, repsize, NULL);
2458 RETURN(req->rq_status = rc);
2460 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
2461 ldlm_reply_set_disposition(rep, DISP_IT_EXECD);
2463 /* execute policy */
2464 switch ((long)it->opc) {
2466 case IT_CREAT|IT_OPEN:
2467 mds_counter_incr(req->rq_export, LPROC_MDS_OPEN);
2468 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock, NULL,
2470 /* XXX swab here to assert that an mds_open reint
2471 * packet is following */
2472 rep->lock_policy_res2 = mds_reint(req, DLM_INTENT_REC_OFF,
2475 /* We abort the lock if the lookup was negative and
2476 * we did not make it to the OPEN portion */
2477 if (!ldlm_reply_disposition(rep, DISP_LOOKUP_EXECD))
2478 RETURN(ELDLM_LOCK_ABORTED);
2479 if (ldlm_reply_disposition(rep, DISP_LOOKUP_NEG) &&
2480 !ldlm_reply_disposition(rep, DISP_OPEN_OPEN))
2483 /* If there was an error of some sort or if we are not
2484 * returning any locks */
2485 if (rep->lock_policy_res2 ||
2486 !ldlm_reply_disposition(rep, DISP_OPEN_LOCK)) {
2487 /* If it is the disconnect error (ENODEV & ENOCONN)
2488 * ptlrpc layer should know this imediately, it should
2489 * be replied by rq_stats, otherwise, return it by
2492 /* if VBR failure then return error in rq_stats too */
2493 if (IS_CLIENT_DISCONNECT_ERROR(rep->lock_policy_res2) ||
2494 rep->lock_policy_res2 == -EOVERFLOW)
2495 RETURN(rep->lock_policy_res2);
2497 RETURN(ELDLM_LOCK_ABORTED);
2501 getattr_part = MDS_INODELOCK_LOOKUP;
2503 getattr_part |= MDS_INODELOCK_LOOKUP;
2504 OBD_COUNTER_INCREMENT(req->rq_export->exp_obd, getattr);
2506 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock,
2509 /* INODEBITS_INTEROP: if this lock was converted from a
2510 * plain lock (client does not support inodebits), then
2511 * child lock must be taken with both lookup and update
2512 * bits set for all operations.
2514 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS))
2515 getattr_part = MDS_INODELOCK_LOOKUP |
2516 MDS_INODELOCK_UPDATE;
2518 rep->lock_policy_res2 = mds_getattr_lock(req,DLM_INTENT_REC_OFF,
2519 getattr_part, &lockh);
2520 /* FIXME: LDLM can set req->rq_status. MDS sets
2521 policy_res{1,2} with disposition and status.
2522 - replay: returns 0 & req->status is old status
2523 - otherwise: returns req->status */
2524 if (ldlm_reply_disposition(rep, DISP_LOOKUP_NEG))
2525 rep->lock_policy_res2 = 0;
2526 if (!ldlm_reply_disposition(rep, DISP_LOOKUP_POS) ||
2527 rep->lock_policy_res2)
2528 RETURN(ELDLM_LOCK_ABORTED);
2529 if (req->rq_status != 0) {
2531 rep->lock_policy_res2 = req->rq_status;
2532 RETURN(ELDLM_LOCK_ABORTED);
2536 CERROR("Unhandled intent "LPD64"\n", it->opc);
2540 /* By this point, whatever function we called above must have either
2541 * filled in 'lockh', been an intent replay, or returned an error. We
2542 * want to allow replayed RPCs to not get a lock, since we would just
2543 * drop it below anyways because lock replay is done separately by the
2544 * client afterwards. For regular RPCs we want to give the new lock to
2545 * the client instead of whatever lock it was about to get. */
2546 if (new_lock == NULL)
2547 new_lock = ldlm_handle2lock(&lockh);
2548 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
2551 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
2552 it->opc, lockh.cookie);
2554 /* If we've already given this lock to a client once, then we should
2555 * have no readers or writers. Otherwise, we should have one reader
2556 * _or_ writer ref (which will be zeroed below) before returning the
2557 * lock to a client. */
2558 if (new_lock->l_export == req->rq_export) {
2559 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2561 LASSERT(new_lock->l_export == NULL);
2562 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2567 if (new_lock->l_export == req->rq_export) {
2568 /* Already gave this to the client, which means that we
2569 * reconstructed a reply. */
2570 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2572 RETURN(ELDLM_LOCK_REPLACED);
2575 /* Fixup the lock to be given to the client */
2576 lock_res_and_lock(new_lock);
2577 new_lock->l_readers = 0;
2578 new_lock->l_writers = 0;
2580 new_lock->l_export = class_export_get(req->rq_export);
2581 new_lock->l_blocking_ast = lock->l_blocking_ast;
2582 new_lock->l_completion_ast = lock->l_completion_ast;
2583 new_lock->l_flags &= ~LDLM_FL_LOCAL;
2585 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
2586 sizeof(lock->l_remote_handle));
2588 unlock_res_and_lock(new_lock);
2590 lustre_hash_add(new_lock->l_export->exp_lock_hash,
2591 &new_lock->l_remote_handle,
2592 &new_lock->l_exp_hash);
2593 LDLM_LOCK_PUT(new_lock);
2595 RETURN(ELDLM_LOCK_REPLACED);
2598 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
2600 struct mds_obd *mds = &obd->u.mds;
2601 struct lprocfs_static_vars lvars;
2602 int mds_min_threads;
2603 int mds_max_threads;
2607 lprocfs_mdt_init_vars(&lvars);
2608 lprocfs_obd_setup(obd, lvars.obd_vars);
2610 sema_init(&mds->mds_health_sem, 1);
2612 if (mds_num_threads) {
2613 /* If mds_num_threads is set, it is the min and the max. */
2614 if (mds_num_threads > MDS_THREADS_MAX)
2615 mds_num_threads = MDS_THREADS_MAX;
2616 if (mds_num_threads < MDS_THREADS_MIN)
2617 mds_num_threads = MDS_THREADS_MIN;
2618 mds_max_threads = mds_min_threads = mds_num_threads;
2620 /* Base min threads on memory and cpus */
2621 mds_min_threads = num_possible_cpus() * num_physpages >>
2622 (27 - CFS_PAGE_SHIFT);
2623 if (mds_min_threads < MDS_THREADS_MIN)
2624 mds_min_threads = MDS_THREADS_MIN;
2625 /* Largest auto threads start value */
2626 if (mds_min_threads > 32)
2627 mds_min_threads = 32;
2628 mds_max_threads = min(MDS_THREADS_MAX, mds_min_threads * 4);
2632 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2633 MDS_MAXREPSIZE, MDS_REQUEST_PORTAL,
2634 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2635 mds_handle, LUSTRE_MDS_NAME,
2636 obd->obd_proc_entry, target_print_req,
2637 mds_min_threads, mds_max_threads, "ll_mdt",
2640 if (!mds->mds_service) {
2641 CERROR("failed to start service\n");
2642 GOTO(err_lprocfs, rc = -ENOMEM);
2645 rc = ptlrpc_start_threads(obd, mds->mds_service);
2647 GOTO(err_thread, rc);
2649 mds->mds_setattr_service =
2650 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2651 MDS_MAXREPSIZE, MDS_SETATTR_PORTAL,
2652 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2653 mds_handle, "mds_setattr",
2654 obd->obd_proc_entry, target_print_req,
2655 mds_min_threads, mds_max_threads,
2656 "ll_mdt_attr", NULL);
2657 if (!mds->mds_setattr_service) {
2658 CERROR("failed to start getattr service\n");
2659 GOTO(err_thread, rc = -ENOMEM);
2662 rc = ptlrpc_start_threads(obd, mds->mds_setattr_service);
2664 GOTO(err_thread2, rc);
2666 mds->mds_readpage_service =
2667 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2668 MDS_MAXREPSIZE, MDS_READPAGE_PORTAL,
2669 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2670 mds_handle, "mds_readpage",
2671 obd->obd_proc_entry, target_print_req,
2672 MDS_THREADS_MIN_READPAGE, mds_max_threads,
2673 "ll_mdt_rdpg", NULL);
2674 if (!mds->mds_readpage_service) {
2675 CERROR("failed to start readpage service\n");
2676 GOTO(err_thread2, rc = -ENOMEM);
2679 rc = ptlrpc_start_threads(obd, mds->mds_readpage_service);
2682 GOTO(err_thread3, rc);
2684 ping_evictor_start();
2689 ptlrpc_unregister_service(mds->mds_readpage_service);
2690 mds->mds_readpage_service = NULL;
2692 ptlrpc_unregister_service(mds->mds_setattr_service);
2693 mds->mds_setattr_service = NULL;
2695 ptlrpc_unregister_service(mds->mds_service);
2696 mds->mds_service = NULL;
2698 lprocfs_obd_cleanup(obd);
2702 static int mdt_cleanup(struct obd_device *obd)
2704 struct mds_obd *mds = &obd->u.mds;
2707 ping_evictor_stop();
2709 down(&mds->mds_health_sem);
2710 ptlrpc_unregister_service(mds->mds_readpage_service);
2711 ptlrpc_unregister_service(mds->mds_setattr_service);
2712 ptlrpc_unregister_service(mds->mds_service);
2713 mds->mds_readpage_service = NULL;
2714 mds->mds_setattr_service = NULL;
2715 mds->mds_service = NULL;
2716 up(&mds->mds_health_sem);
2718 lprocfs_obd_cleanup(obd);
2723 static int mdt_health_check(struct obd_device *obd)
2725 struct mds_obd *mds = &obd->u.mds;
2728 down(&mds->mds_health_sem);
2729 rc |= ptlrpc_service_health_check(mds->mds_readpage_service);
2730 rc |= ptlrpc_service_health_check(mds->mds_setattr_service);
2731 rc |= ptlrpc_service_health_check(mds->mds_service);
2732 up(&mds->mds_health_sem);
2735 * health_check to return 0 on healthy
2736 * and 1 on unhealthy.
2744 static struct dentry *mds_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
2747 struct obd_device *obd = data;
2750 fid.generation = gen;
2751 return mds_fid2dentry(&obd->u.mds, &fid, NULL);
2754 static int mds_health_check(struct obd_device *obd)
2756 struct obd_device_target *odt = &obd->u.obt;
2757 #ifdef USE_HEALTH_CHECK_WRITE
2758 struct mds_obd *mds = &obd->u.mds;
2762 if (odt->obt_sb->s_flags & MS_RDONLY)
2765 #ifdef USE_HEALTH_CHECK_WRITE
2766 LASSERT(mds->mds_obt.obt_health_check_filp != NULL);
2767 rc |= !!lvfs_check_io_health(obd, mds->mds_obt.obt_health_check_filp);
2773 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
2775 struct lustre_cfg *lcfg = buf;
2778 switch(lcfg->lcfg_command) {
2780 struct lprocfs_static_vars lvars;
2781 lprocfs_mds_init_vars(&lvars);
2783 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, lcfg, obd);
2793 struct lvfs_callback_ops mds_lvfs_ops = {
2794 l_fid2dentry: mds_lvfs_fid2dentry,
2797 /* use obd ops to offer management infrastructure */
2798 static struct obd_ops mds_obd_ops = {
2799 .o_owner = THIS_MODULE,
2800 .o_connect = mds_connect,
2801 .o_reconnect = mds_reconnect,
2802 .o_init_export = mds_init_export,
2803 .o_destroy_export = mds_destroy_export,
2804 .o_disconnect = mds_disconnect,
2805 .o_setup = mds_setup,
2806 .o_precleanup = mds_precleanup,
2807 .o_cleanup = mds_cleanup,
2808 .o_postrecov = mds_postrecov,
2809 .o_statfs = mds_obd_statfs,
2810 .o_iocontrol = mds_iocontrol,
2811 .o_create = mds_obd_create,
2812 .o_destroy = mds_obd_destroy,
2813 .o_llog_init = mds_llog_init,
2814 .o_llog_finish = mds_llog_finish,
2815 .o_notify = mds_notify,
2816 .o_health_check = mds_health_check,
2817 .o_process_config = mds_process_config,
2820 static struct obd_ops mdt_obd_ops = {
2821 .o_owner = THIS_MODULE,
2822 .o_setup = mdt_setup,
2823 .o_cleanup = mdt_cleanup,
2824 .o_health_check = mdt_health_check,
2827 quota_interface_t *mds_quota_interface_ref;
2828 extern quota_interface_t mds_quota_interface;
2830 static int __init mds_init(void)
2833 struct lprocfs_static_vars lvars;
2835 request_module("lquota");
2836 mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
2837 rc = lquota_init(mds_quota_interface_ref);
2839 if (mds_quota_interface_ref)
2840 PORTAL_SYMBOL_PUT(mds_quota_interface);
2843 init_obd_quota_ops(mds_quota_interface_ref, &mds_obd_ops);
2845 lprocfs_mds_init_vars(&lvars);
2846 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2847 lprocfs_mdt_init_vars(&lvars);
2848 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2853 static void /*__exit*/ mds_exit(void)
2855 lquota_exit(mds_quota_interface_ref);
2856 if (mds_quota_interface_ref)
2857 PORTAL_SYMBOL_PUT(mds_quota_interface);
2859 class_unregister_type(LUSTRE_MDS_NAME);
2860 class_unregister_type(LUSTRE_MDT_NAME);
2863 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2864 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2865 MODULE_LICENSE("GPL");
2867 module_init(mds_init);
2868 module_exit(mds_exit);