1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/mds/mds_reint.c
5 * Lustre Metadata Server (mds) reintegration routines
7 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
12 * This file is part of Lustre, http://www.lustre.org.
14 * Lustre is free software; you can redistribute it and/or
15 * modify it under the terms of version 2 of the GNU General Public
16 * License as published by the Free Software Foundation.
18 * Lustre is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with Lustre; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 # define EXPORT_SYMTAB
31 #define DEBUG_SUBSYSTEM S_MDS
34 #include <linux/obd_support.h>
35 #include <linux/obd_class.h>
36 #include <linux/obd.h>
37 #include <linux/lustre_lib.h>
38 #include <linux/lustre_idl.h>
39 #include <linux/lustre_mds.h>
40 #include <linux/lustre_dlm.h>
41 #include <linux/lustre_fsfilt.h>
43 #include "mds_internal.h"
45 void mds_commit_cb(struct obd_device *obd, __u64 transno, void *data,
48 obd_transno_commit_cb(obd, transno, error);
51 struct mds_logcancel_data {
52 struct lov_mds_md *mlcd_lmm;
56 struct llog_cookie mlcd_cookies[0];
60 static void mds_cancel_cookies_cb(struct obd_device *obd, __u64 transno,
61 void *cb_data, int error)
63 struct mds_logcancel_data *mlcd = cb_data;
64 struct lov_stripe_md *lsm = NULL;
65 struct llog_ctxt *ctxt;
68 obd_transno_commit_cb(obd, transno, error);
70 CDEBUG(D_HA, "cancelling %d cookies\n",
71 (int)(mlcd->mlcd_cookielen / sizeof(*mlcd->mlcd_cookies)));
73 rc = obd_unpackmd(obd->u.mds.mds_osc_exp, &lsm, mlcd->mlcd_lmm,
74 mlcd->mlcd_eadatalen);
76 CERROR("bad LSM cancelling %d log cookies: rc %d\n",
77 (int)(mlcd->mlcd_cookielen/sizeof(*mlcd->mlcd_cookies)),
80 ///* XXX 0 normally, SENDNOW for debug */);
81 ctxt = llog_get_context(obd,mlcd->mlcd_cookies[0].lgc_subsys+1);
82 rc = llog_cancel(ctxt, lsm, mlcd->mlcd_cookielen /
83 sizeof(*mlcd->mlcd_cookies),
84 mlcd->mlcd_cookies, OBD_LLOG_FL_SENDNOW);
86 CERROR("error cancelling %d log cookies: rc %d\n",
87 (int)(mlcd->mlcd_cookielen /
88 sizeof(*mlcd->mlcd_cookies)), rc);
91 OBD_FREE(mlcd, mlcd->mlcd_size);
94 /* Assumes caller has already pushed us into the kernel context. */
95 int mds_finish_transno(struct mds_obd *mds, struct inode *inode, void *handle,
96 struct ptlrpc_request *req, int rc, __u32 op_data)
98 struct mds_export_data *med = &req->rq_export->exp_mds_data;
99 struct mds_client_data *mcd = med->med_mcd;
100 struct obd_device *obd = req->rq_export->exp_obd;
107 /* if the export has already been failed, we have no last_rcvd slot */
108 if (req->rq_export->exp_failed) {
109 CWARN("committing transaction for disconnected client %s\n",
110 req->rq_export->exp_client_uuid.uuid);
119 if (handle == NULL) {
120 /* if we're starting our own xaction, use our own inode */
121 inode = mds->mds_rcvd_filp->f_dentry->d_inode;
122 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
123 if (IS_ERR(handle)) {
124 CERROR("fsfilt_start: %ld\n", PTR_ERR(handle));
125 RETURN(PTR_ERR(handle));
131 transno = req->rq_reqmsg->transno;
133 LASSERT(transno == 0);
134 } else if (transno == 0) {
135 spin_lock(&mds->mds_transno_lock);
136 transno = ++mds->mds_last_transno;
137 spin_unlock(&mds->mds_transno_lock);
139 spin_lock(&mds->mds_transno_lock);
140 if (transno > mds->mds_last_transno)
141 mds->mds_last_transno = transno;
142 spin_unlock(&mds->mds_transno_lock);
144 req->rq_repmsg->transno = req->rq_transno = transno;
145 mcd->mcd_last_transno = cpu_to_le64(transno);
146 mcd->mcd_last_xid = cpu_to_le64(req->rq_xid);
147 mcd->mcd_last_result = cpu_to_le32(rc);
148 mcd->mcd_last_data = cpu_to_le32(op_data);
150 fsfilt_add_journal_cb(req->rq_export->exp_obd, transno, handle,
151 mds_commit_cb, NULL);
152 err = fsfilt_write_record(obd, mds->mds_rcvd_filp, mcd, sizeof(*mcd),
161 DEBUG_REQ(log_pri, req,
162 "wrote trans #"LPU64" rc %d client %s at idx %u: err = %d",
163 transno, rc, mcd->mcd_uuid, med->med_idx, err);
165 err = mds_lov_write_objids(obd);
171 CDEBUG(log_pri, "wrote objids: err = %d\n", err);
174 err = fsfilt_commit(obd, inode, handle, 0);
176 CERROR("error committing transaction: %d\n", err);
184 /* this gives the same functionality as the code between
185 * sys_chmod and inode_setattr
186 * chown_common and inode_setattr
187 * utimes and inode_setattr
189 int mds_fix_attr(struct inode *inode, struct mds_update_record *rec)
191 time_t now = CURRENT_SECONDS;
192 struct iattr *attr = &rec->ur_iattr;
193 unsigned int ia_valid = attr->ia_valid;
197 /* only fix up attrs if the client VFS didn't already */
198 if (!(ia_valid & ATTR_RAW))
201 if (!(ia_valid & ATTR_CTIME_SET))
202 LTIME_S(attr->ia_ctime) = now;
203 if (!(ia_valid & ATTR_ATIME_SET))
204 LTIME_S(attr->ia_atime) = now;
205 if (!(ia_valid & ATTR_MTIME_SET))
206 LTIME_S(attr->ia_mtime) = now;
208 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
212 if ((ia_valid & (ATTR_MTIME|ATTR_ATIME)) == (ATTR_MTIME|ATTR_ATIME)) {
213 if (rec->ur_fsuid != inode->i_uid &&
214 (error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
218 if (ia_valid & ATTR_SIZE) {
219 if ((error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
223 if (ia_valid & ATTR_UID) {
226 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
228 if (attr->ia_uid == (uid_t) -1)
229 attr->ia_uid = inode->i_uid;
230 if (attr->ia_gid == (gid_t) -1)
231 attr->ia_gid = inode->i_gid;
232 attr->ia_mode = inode->i_mode;
234 * If the user or group of a non-directory has been
235 * changed by a non-root user, remove the setuid bit.
236 * 19981026 David C Niemi <niemi@tux.org>
238 * Changed this to apply to all users, including root,
239 * to avoid some races. This is the behavior we had in
240 * 2.0. The check for non-root was definitely wrong
241 * for 2.2 anyway, as it should have been using
242 * CAP_FSETID rather than fsuid -- 19990830 SD.
244 if ((inode->i_mode & S_ISUID) == S_ISUID &&
245 !S_ISDIR(inode->i_mode)) {
246 attr->ia_mode &= ~S_ISUID;
247 attr->ia_valid |= ATTR_MODE;
250 * Likewise, if the user or group of a non-directory
251 * has been changed by a non-root user, remove the
252 * setgid bit UNLESS there is no group execute bit
253 * (this would be a file marked for mandatory
254 * locking). 19981026 David C Niemi <niemi@tux.org>
256 * Removed the fsuid check (see the comment above) --
259 if (((inode->i_mode & (S_ISGID | S_IXGRP)) ==
260 (S_ISGID | S_IXGRP)) && !S_ISDIR(inode->i_mode)) {
261 attr->ia_mode &= ~S_ISGID;
262 attr->ia_valid |= ATTR_MODE;
264 } else if (ia_valid & ATTR_MODE) {
265 int mode = attr->ia_mode;
267 if (attr->ia_mode == (umode_t)-1)
268 mode = inode->i_mode;
270 (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
275 void mds_steal_ack_locks(struct ptlrpc_request *req)
277 struct obd_export *exp = req->rq_export;
278 struct list_head *tmp;
279 struct ptlrpc_reply_state *oldrep;
280 struct ptlrpc_service *svc;
282 char str[PTL_NALFMT_SIZE];
285 /* CAVEAT EMPTOR: spinlock order */
286 spin_lock_irqsave (&exp->exp_lock, flags);
287 list_for_each (tmp, &exp->exp_outstanding_replies) {
288 oldrep = list_entry(tmp, struct ptlrpc_reply_state,rs_exp_list);
290 if (oldrep->rs_xid != req->rq_xid)
293 if (oldrep->rs_msg.opc != req->rq_reqmsg->opc)
294 CERROR ("Resent req xid "LPX64" has mismatched opc: "
295 "new %d old %d\n", req->rq_xid,
296 req->rq_reqmsg->opc, oldrep->rs_msg.opc);
298 svc = oldrep->rs_srv_ni->sni_service;
299 spin_lock (&svc->srv_lock);
301 list_del_init (&oldrep->rs_exp_list);
303 CWARN("Stealing %d locks from rs %p x"LPD64".t"LPD64
305 oldrep->rs_nlocks, oldrep,
306 oldrep->rs_xid, oldrep->rs_transno, oldrep->rs_msg.opc,
307 ptlrpc_peernid2str(&exp->exp_connection->c_peer, str));
309 for (i = 0; i < oldrep->rs_nlocks; i++)
310 ptlrpc_save_lock(req,
311 &oldrep->rs_locks[i],
312 oldrep->rs_modes[i]);
313 oldrep->rs_nlocks = 0;
315 DEBUG_REQ(D_HA, req, "stole locks for");
316 ptlrpc_schedule_difficult_reply (oldrep);
318 spin_unlock (&svc->srv_lock);
319 spin_unlock_irqrestore (&exp->exp_lock, flags);
322 spin_unlock_irqrestore (&exp->exp_lock, flags);
325 void mds_req_from_mcd(struct ptlrpc_request *req, struct mds_client_data *mcd)
327 DEBUG_REQ(D_HA, req, "restoring transno "LPD64"/status %d",
328 mcd->mcd_last_transno, mcd->mcd_last_result);
329 req->rq_repmsg->transno = req->rq_transno = mcd->mcd_last_transno;
330 req->rq_repmsg->status = req->rq_status = mcd->mcd_last_result;
332 mds_steal_ack_locks(req);
335 static void reconstruct_reint_setattr(struct mds_update_record *rec,
336 int offset, struct ptlrpc_request *req)
338 struct mds_export_data *med = &req->rq_export->exp_mds_data;
339 struct mds_obd *obd = &req->rq_export->exp_obd->u.mds;
341 struct mds_body *body;
343 mds_req_from_mcd(req, med->med_mcd);
345 de = mds_fid2dentry(obd, rec->ur_fid1, NULL);
347 LASSERT(PTR_ERR(de) == req->rq_status);
351 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
352 mds_pack_inode2fid(&body->fid1, de->d_inode);
353 mds_pack_inode2body(body, de->d_inode);
355 /* Don't return OST-specific attributes if we didn't just set them */
356 if (rec->ur_iattr.ia_valid & ATTR_SIZE)
357 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
358 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
359 body->valid |= OBD_MD_FLMTIME;
360 if (rec->ur_iattr.ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
361 body->valid |= OBD_MD_FLATIME;
366 /* In the raw-setattr case, we lock the child inode.
367 * In the write-back case or if being called from open, the client holds a lock
370 * We use the ATTR_FROM_OPEN flag to tell these cases apart. */
371 static int mds_reint_setattr(struct mds_update_record *rec, int offset,
372 struct ptlrpc_request *req,
373 struct lustre_handle *lh)
375 struct mds_obd *mds = mds_req2mds(req);
376 struct obd_device *obd = req->rq_export->exp_obd;
377 struct mds_body *body;
379 struct inode *inode = NULL;
380 struct lustre_handle lockh;
382 struct mds_logcancel_data *mlcd = NULL;
383 int rc = 0, cleanup_phase = 0, err, locked = 0;
386 LASSERT(offset == 0);
388 DEBUG_REQ(D_INODE, req, "setattr "LPU64"/%u %x", rec->ur_fid1->id,
389 rec->ur_fid1->generation, rec->ur_iattr.ia_valid);
391 MDS_CHECK_RESENT(req, reconstruct_reint_setattr(rec, offset, req));
393 if (rec->ur_iattr.ia_valid & ATTR_FROM_OPEN) {
394 de = mds_fid2dentry(mds, rec->ur_fid1, NULL);
396 GOTO(cleanup, rc = PTR_ERR(de));
398 de = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_PW,
401 GOTO(cleanup, rc = PTR_ERR(de));
408 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
409 rec->ur_eadata != NULL)
412 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_SETATTR_WRITE, inode->i_sb);
414 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
416 GOTO(cleanup, rc = PTR_ERR(handle));
418 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
419 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu\n",
420 LTIME_S(rec->ur_iattr.ia_mtime),
421 LTIME_S(rec->ur_iattr.ia_ctime));
422 rc = mds_fix_attr(inode, rec);
426 if (rec->ur_iattr.ia_valid & ATTR_ATTR_FLAG) /* ioctl */
427 rc = fsfilt_iocontrol(obd, inode, NULL, EXT3_IOC_SETFLAGS,
428 (long)&rec->ur_iattr.ia_attr_flags);
430 rc = fsfilt_setattr(obd, de, handle, &rec->ur_iattr, 0);
432 if (rc == 0 && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
433 rec->ur_eadata != NULL) {
434 struct lov_stripe_md *lsm = NULL;
436 rc = ll_permission(inode, MAY_WRITE, NULL);
440 rc = obd_iocontrol(OBD_IOC_LOV_SETSTRIPE,
441 mds->mds_osc_exp, 0, &lsm, rec->ur_eadata);
445 obd_free_memmd(mds->mds_osc_exp, &lsm);
447 rc = fsfilt_set_md(obd, inode, handle, rec->ur_eadata,
453 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
454 mds_pack_inode2fid(&body->fid1, inode);
455 mds_pack_inode2body(body, inode);
457 /* Don't return OST-specific attributes if we didn't just set them */
458 if (rec->ur_iattr.ia_valid & ATTR_SIZE)
459 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
460 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
461 body->valid |= OBD_MD_FLMTIME;
462 if (rec->ur_iattr.ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
463 body->valid |= OBD_MD_FLATIME;
465 if (rc == 0 && rec->ur_cookielen && !IS_ERR(mds->mds_osc_obd)) {
466 OBD_ALLOC(mlcd, sizeof(*mlcd) + rec->ur_cookielen +
469 mlcd->mlcd_size = sizeof(*mlcd) + rec->ur_cookielen +
471 mlcd->mlcd_eadatalen = rec->ur_eadatalen;
472 mlcd->mlcd_cookielen = rec->ur_cookielen;
473 mlcd->mlcd_lmm = (void *)&mlcd->mlcd_cookies +
474 mlcd->mlcd_cookielen;
475 memcpy(&mlcd->mlcd_cookies, rec->ur_logcookies,
476 mlcd->mlcd_cookielen);
477 memcpy(mlcd->mlcd_lmm, rec->ur_eadata,
478 mlcd->mlcd_eadatalen);
480 CERROR("unable to allocate log cancel data\n");
486 fsfilt_add_journal_cb(req->rq_export->exp_obd, 0, handle,
487 mds_cancel_cookies_cb, mlcd);
488 err = mds_finish_transno(mds, inode, handle, req, rc, 0);
489 switch (cleanup_phase) {
491 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
492 rec->ur_eadata != NULL)
497 ldlm_lock_decref(&lockh, LCK_PW);
499 ptlrpc_save_lock (req, &lockh, LCK_PW);
514 static void reconstruct_reint_create(struct mds_update_record *rec, int offset,
515 struct ptlrpc_request *req)
517 struct mds_export_data *med = &req->rq_export->exp_mds_data;
518 struct mds_obd *obd = &req->rq_export->exp_obd->u.mds;
519 struct dentry *parent, *child;
520 struct mds_body *body;
522 mds_req_from_mcd(req, med->med_mcd);
527 parent = mds_fid2dentry(obd, rec->ur_fid1, NULL);
528 LASSERT(!IS_ERR(parent));
529 child = ll_lookup_one_len(rec->ur_name, parent, rec->ur_namelen - 1);
530 LASSERT(!IS_ERR(child));
531 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
532 mds_pack_inode2fid(&body->fid1, child->d_inode);
533 mds_pack_inode2body(body, child->d_inode);
538 static int mds_reint_create(struct mds_update_record *rec, int offset,
539 struct ptlrpc_request *req,
540 struct lustre_handle *lh)
542 struct dentry *dparent = NULL;
543 struct mds_obd *mds = mds_req2mds(req);
544 struct obd_device *obd = req->rq_export->exp_obd;
545 struct dentry *dchild = NULL;
546 struct inode *dir = NULL;
548 struct lustre_handle lockh;
549 int rc = 0, err, type = rec->ur_mode & S_IFMT, cleanup_phase = 0;
551 struct dentry_params dp;
554 LASSERT(offset == 0);
555 LASSERT(!strcmp(req->rq_export->exp_obd->obd_type->typ_name, "mds"));
557 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u name %s mode %o",
558 rec->ur_fid1->id, rec->ur_fid1->generation,
559 rec->ur_name, rec->ur_mode);
561 MDS_CHECK_RESENT(req, reconstruct_reint_create(rec, offset, req));
563 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_CREATE))
564 GOTO(cleanup, rc = -ESTALE);
566 dparent = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_PW, &lockh,
567 rec->ur_name, rec->ur_namelen - 1);
568 if (IS_ERR(dparent)) {
569 rc = PTR_ERR(dparent);
571 CERROR("parent lookup error %d\n", rc);
574 cleanup_phase = 1; /* locked parent dentry */
575 dir = dparent->d_inode;
578 ldlm_lock_dump_handle(D_OTHER, &lockh);
580 dchild = ll_lookup_one_len(rec->ur_name, dparent, rec->ur_namelen - 1);
581 if (IS_ERR(dchild)) {
582 rc = PTR_ERR(dchild);
583 CERROR("child lookup error %d\n", rc);
587 cleanup_phase = 2; /* child dentry */
589 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_CREATE_WRITE, dir->i_sb);
591 if (dir->i_mode & S_ISGID) {
592 if (S_ISDIR(rec->ur_mode))
593 rec->ur_mode |= S_ISGID;
596 dchild->d_fsdata = (void *)&dp;
597 dp.p_inum = (unsigned long)rec->ur_fid2->id;
602 handle = fsfilt_start(obd, dir, FSFILT_OP_CREATE, NULL);
604 GOTO(cleanup, rc = PTR_ERR(handle));
605 rc = ll_vfs_create(dir, dchild, rec->ur_mode, NULL);
610 handle = fsfilt_start(obd, dir, FSFILT_OP_MKDIR, NULL);
612 GOTO(cleanup, rc = PTR_ERR(handle));
613 rc = vfs_mkdir(dir, dchild, rec->ur_mode);
618 handle = fsfilt_start(obd, dir, FSFILT_OP_SYMLINK, NULL);
620 GOTO(cleanup, rc = PTR_ERR(handle));
621 if (rec->ur_tgt == NULL) /* no target supplied */
622 rc = -EINVAL; /* -EPROTO? */
624 rc = ll_vfs_symlink(dir, dchild, rec->ur_tgt, S_IALLUGO);
632 int rdev = rec->ur_rdev;
633 handle = fsfilt_start(obd, dir, FSFILT_OP_MKNOD, NULL);
635 GOTO(cleanup, (handle = NULL, rc = PTR_ERR(handle)));
636 rc = vfs_mknod(dir, dchild, rec->ur_mode, rdev);
641 CERROR("bad file type %o creating %s\n", type, rec->ur_name);
642 dchild->d_fsdata = NULL;
643 GOTO(cleanup, rc = -EINVAL);
646 /* In case we stored the desired inum in here, we want to clean up. */
647 if (dchild->d_fsdata == (void *)(unsigned long)rec->ur_fid2->id)
648 dchild->d_fsdata = NULL;
651 CDEBUG(D_INODE, "error during create: %d\n", rc);
655 struct inode *inode = dchild->d_inode;
656 struct mds_body *body;
659 LTIME_S(iattr.ia_atime) = rec->ur_time;
660 LTIME_S(iattr.ia_ctime) = rec->ur_time;
661 LTIME_S(iattr.ia_mtime) = rec->ur_time;
662 iattr.ia_uid = rec->ur_fsuid;
663 if (dir->i_mode & S_ISGID)
664 iattr.ia_gid = dir->i_gid;
666 iattr.ia_gid = rec->ur_fsgid;
667 iattr.ia_valid = ATTR_UID | ATTR_GID | ATTR_ATIME |
668 ATTR_MTIME | ATTR_CTIME;
670 if (rec->ur_fid2->id) {
671 LASSERT(rec->ur_fid2->id == inode->i_ino);
672 inode->i_generation = rec->ur_fid2->generation;
673 /* Dirtied and committed by the upcoming setattr. */
674 CDEBUG(D_INODE, "recreated ino %lu with gen %u\n",
675 inode->i_ino, inode->i_generation);
677 struct lustre_handle child_ino_lockh;
679 CDEBUG(D_INODE, "created ino %lu with gen %x\n",
680 inode->i_ino, inode->i_generation);
682 /* The inode we were allocated may have just been freed
683 * by an unlink operation. We take this lock to
684 * synchronize against the matching reply-ack-lock taken
685 * in unlink, to avoid replay problems if this reply
686 * makes it out to the client but the unlink's does not.
687 * See bug 2029 for more detail.*/
688 rc = mds_lock_new_child(obd, inode, &child_ino_lockh);
689 if (rc != ELDLM_OK) {
690 CERROR("error locking for unlink/create sync: "
693 ldlm_lock_decref(&child_ino_lockh, LCK_EX);
697 rc = fsfilt_setattr(obd, dchild, handle, &iattr, 0);
699 CERROR("error on child setattr: rc = %d\n", rc);
701 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
702 rc = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
704 CERROR("error on parent setattr: rc = %d\n", rc);
706 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
707 mds_pack_inode2fid(&body->fid1, inode);
708 mds_pack_inode2body(body, inode);
713 err = mds_finish_transno(mds, dir, handle, req, rc, 0);
716 /* Destroy the file we just created. This should not need
717 * extra journal credits, as we have already modified all of
718 * the blocks needed in order to create the file in the first
723 err = vfs_rmdir(dir, dchild);
725 CERROR("rmdir in error path: %d\n", err);
728 err = vfs_unlink(dir, dchild);
730 CERROR("unlink in error path: %d\n", err);
736 switch (cleanup_phase) {
737 case 2: /* child dentry */
739 case 1: /* locked parent dentry */
741 ldlm_lock_decref(&lockh, LCK_PW);
743 ptlrpc_save_lock (req, &lockh, LCK_PW);
749 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
756 int res_gt(struct ldlm_res_id *res1, struct ldlm_res_id *res2)
760 for (i = 0; i < RES_NAME_SIZE; i++) {
761 /* return 1 here, because enqueue_ordered will skip resources
762 * of all zeroes if they're sorted to the end of the list. */
763 if (res1->name[i] == 0 && res2->name[i] != 0)
765 if (res2->name[i] == 0 && res1->name[i] != 0)
768 if (res1->name[i] > res2->name[i])
770 if (res1->name[i] < res2->name[i])
776 /* This function doesn't use ldlm_match_or_enqueue because we're always called
777 * with EX or PW locks, and the MDS is no longer allowed to match write locks,
778 * because they take the place of local semaphores.
780 * One or two locks are taken in numerical order. A res_id->name[0] of 0 means
781 * no lock is taken for that res_id. Must be at least one non-zero res_id. */
782 int enqueue_ordered_locks(struct obd_device *obd, struct ldlm_res_id *p1_res_id,
783 struct lustre_handle *p1_lockh, int p1_lock_mode,
784 struct ldlm_res_id *p2_res_id,
785 struct lustre_handle *p2_lockh, int p2_lock_mode)
787 struct ldlm_res_id *res_id[2] = { p1_res_id, p2_res_id };
788 struct lustre_handle *handles[2] = { p1_lockh, p2_lockh };
789 int lock_modes[2] = { p1_lock_mode, p2_lock_mode };
793 LASSERT(p1_res_id != NULL && p2_res_id != NULL);
795 CDEBUG(D_INFO, "locks before: "LPU64"/"LPU64"\n",
796 res_id[0]->name[0], res_id[1]->name[0]);
798 if (res_gt(p1_res_id, p2_res_id)) {
799 handles[1] = p1_lockh;
800 handles[0] = p2_lockh;
801 res_id[1] = p1_res_id;
802 res_id[0] = p2_res_id;
803 lock_modes[1] = p1_lock_mode;
804 lock_modes[0] = p2_lock_mode;
807 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"\n",
808 res_id[0]->name[0], res_id[1]->name[0]);
810 flags = LDLM_FL_LOCAL_ONLY;
811 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, *res_id[0],
812 LDLM_PLAIN, NULL, lock_modes[0], &flags,
813 mds_blocking_ast, ldlm_completion_ast, NULL, NULL,
814 NULL, 0, NULL, handles[0]);
817 ldlm_lock_dump_handle(D_OTHER, handles[0]);
819 if (memcmp(res_id[0], res_id[1], sizeof(*res_id[0])) == 0) {
820 memcpy(handles[1], handles[0], sizeof(*(handles[1])));
821 ldlm_lock_addref(handles[1], lock_modes[1]);
822 } else if (res_id[1]->name[0] != 0) {
823 flags = LDLM_FL_LOCAL_ONLY;
824 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
825 *res_id[1], LDLM_PLAIN, NULL,
826 lock_modes[1], &flags, mds_blocking_ast,
827 ldlm_completion_ast, NULL, NULL, NULL, 0,
829 if (rc != ELDLM_OK) {
830 ldlm_lock_decref(handles[0], lock_modes[0]);
833 ldlm_lock_dump_handle(D_OTHER, handles[1]);
839 int enqueue_4ordered_locks(struct obd_device *obd,struct ldlm_res_id *p1_res_id,
840 struct lustre_handle *p1_lockh, int p1_lock_mode,
841 struct ldlm_res_id *p2_res_id,
842 struct lustre_handle *p2_lockh, int p2_lock_mode,
843 struct ldlm_res_id *c1_res_id,
844 struct lustre_handle *c1_lockh, int c1_lock_mode,
845 struct ldlm_res_id *c2_res_id,
846 struct lustre_handle *c2_lockh, int c2_lock_mode)
848 struct ldlm_res_id *res_id[5] = { p1_res_id, p2_res_id,
849 c1_res_id, c2_res_id };
850 struct lustre_handle *dlm_handles[5] = { p1_lockh, p2_lockh,
851 c1_lockh, c2_lockh };
852 int lock_modes[5] = { p1_lock_mode, p2_lock_mode,
853 c1_lock_mode, c2_lock_mode };
854 int rc, i, j, sorted, flags;
857 CDEBUG(D_DLMTRACE, "locks before: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
858 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
861 /* simple insertion sort - we have at most 4 elements */
862 for (i = 1; i < 4; i++) {
864 dlm_handles[4] = dlm_handles[i];
865 res_id[4] = res_id[i];
866 lock_modes[4] = lock_modes[i];
870 if (res_gt(res_id[j], res_id[4])) {
871 dlm_handles[j + 1] = dlm_handles[j];
872 res_id[j + 1] = res_id[j];
873 lock_modes[j + 1] = lock_modes[j];
878 } while (j >= 0 && !sorted);
880 dlm_handles[j + 1] = dlm_handles[4];
881 res_id[j + 1] = res_id[4];
882 lock_modes[j + 1] = lock_modes[4];
885 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
886 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
889 /* XXX we could send ASTs on all these locks first before blocking? */
890 for (i = 0; i < 4; i++) {
892 if (res_id[i]->name[0] == 0)
895 memcmp(res_id[i], res_id[i-1], sizeof(*res_id[i])) == 0) {
896 memcpy(dlm_handles[i], dlm_handles[i-1],
897 sizeof(*(dlm_handles[i])));
898 ldlm_lock_addref(dlm_handles[i], lock_modes[i]);
900 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
901 *res_id[i], LDLM_PLAIN, NULL,
902 lock_modes[i], &flags,
904 ldlm_completion_ast, NULL, NULL,
905 NULL, 0, NULL, dlm_handles[i]);
907 GOTO(out_err, rc = -EIO);
908 ldlm_lock_dump_handle(D_OTHER, dlm_handles[i]);
915 ldlm_lock_decref(dlm_handles[i], lock_modes[i]);
920 /* In the unlikely case that the child changed while we were waiting
921 * on the lock, we need to drop the lock on the old child and either:
922 * - if the child has a lower resource name, then we have to also
923 * drop the parent lock and regain the locks in the right order
924 * - in the rename case, if the child has a lower resource name than one of
925 * the other parent/child resources (maxres) we also need to reget the locks
926 * - if the child has a higher resource name (this is the common case)
927 * we can just get the lock on the new child (still in lock order)
929 * Returns 0 if the child did not change or if it changed but could be locked.
930 * Returns 1 if the child changed and we need to re-lock (no locks held).
931 * Returns -ve error with a valid dchild (no locks held). */
932 static int mds_verify_child(struct obd_device *obd,
933 struct ldlm_res_id *parent_res_id,
934 struct lustre_handle *parent_lockh,
935 struct dentry *dparent, int parent_mode,
936 struct ldlm_res_id *child_res_id,
937 struct lustre_handle *child_lockh,
938 struct dentry **dchildp, int child_mode,
939 const char *name, int namelen,
940 struct ldlm_res_id *maxres)
942 struct dentry *vchild, *dchild = *dchildp;
943 int rc = 0, cleanup_phase = 2; /* parent, child locks */
946 vchild = ll_lookup_one_len(name, dparent, namelen - 1);
948 GOTO(cleanup, rc = PTR_ERR(vchild));
950 if (likely((vchild->d_inode == NULL && child_res_id->name[0] == 0) ||
951 (vchild->d_inode != NULL &&
952 child_res_id->name[0] == vchild->d_inode->i_ino &&
953 child_res_id->name[1] == vchild->d_inode->i_generation))) {
961 CDEBUG(D_DLMTRACE, "child inode changed: %p != %p (%lu != "LPU64")\n",
962 vchild->d_inode, dchild ? dchild->d_inode : 0,
963 vchild->d_inode ? vchild->d_inode->i_ino : 0,
964 child_res_id->name[0]);
965 if (child_res_id->name[0] != 0)
966 ldlm_lock_decref(child_lockh, child_mode);
970 cleanup_phase = 1; /* parent lock only */
971 *dchildp = dchild = vchild;
973 if (dchild->d_inode) {
975 child_res_id->name[0] = dchild->d_inode->i_ino;
976 child_res_id->name[1] = dchild->d_inode->i_generation;
978 if (res_gt(parent_res_id, child_res_id) ||
979 res_gt(maxres, child_res_id)) {
980 CDEBUG(D_DLMTRACE, "relock "LPU64"<("LPU64"|"LPU64")\n",
981 child_res_id->name[0], parent_res_id->name[0],
983 GOTO(cleanup, rc = 1);
986 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
987 *child_res_id, LDLM_PLAIN, NULL,
988 child_mode, &flags, mds_blocking_ast,
989 ldlm_completion_ast, NULL, NULL, NULL, 0,
992 GOTO(cleanup, rc = -EIO);
994 memset(child_res_id, 0, sizeof(*child_res_id));
1000 switch(cleanup_phase) {
1002 if (child_res_id->name[0] != 0)
1003 ldlm_lock_decref(child_lockh, child_mode);
1005 ldlm_lock_decref(parent_lockh, parent_mode);
1011 int mds_get_parent_child_locked(struct obd_device *obd, struct mds_obd *mds,
1013 struct lustre_handle *parent_lockh,
1014 struct dentry **dparentp, int parent_mode,
1015 char *name, int namelen,
1016 struct lustre_handle *child_lockh,
1017 struct dentry **dchildp, int child_mode)
1019 struct ldlm_res_id child_res_id = { .name = {0} };
1020 struct ldlm_res_id parent_res_id = { .name = {0} };
1021 struct inode *inode;
1022 int rc = 0, cleanup_phase = 0;
1025 /* Step 1: Lookup parent */
1026 *dparentp = mds_fid2dentry(mds, fid, NULL);
1027 if (IS_ERR(*dparentp)) {
1028 rc = PTR_ERR(*dparentp);
1033 CDEBUG(D_INODE, "parent ino %lu, name %s\n",
1034 (*dparentp)->d_inode->i_ino, name);
1036 parent_res_id.name[0] = (*dparentp)->d_inode->i_ino;
1037 parent_res_id.name[1] = (*dparentp)->d_inode->i_generation;
1039 cleanup_phase = 1; /* parent dentry */
1041 /* Step 2: Lookup child (without DLM lock, to get resource name) */
1042 *dchildp = ll_lookup_one_len(name, *dparentp, namelen - 1);
1043 if (IS_ERR(*dchildp)) {
1044 rc = PTR_ERR(*dchildp);
1045 CDEBUG(D_INODE, "child lookup error %d\n", rc);
1049 cleanup_phase = 2; /* child dentry */
1050 inode = (*dchildp)->d_inode;
1051 if (inode != NULL) {
1052 if (is_bad_inode(inode)) {
1053 CERROR("bad inode returned %lu/%u\n",
1054 inode->i_ino, inode->i_generation);
1055 GOTO(cleanup, rc = -ENOENT);
1057 inode = igrab(inode);
1062 child_res_id.name[0] = inode->i_ino;
1063 child_res_id.name[1] = inode->i_generation;
1067 cleanup_phase = 2; /* child dentry */
1069 /* Step 3: Lock parent and child in resource order. If child doesn't
1070 * exist, we still have to lock the parent and re-lookup. */
1071 rc = enqueue_ordered_locks(obd,&parent_res_id,parent_lockh,parent_mode,
1072 &child_res_id, child_lockh, child_mode);
1076 if (!(*dchildp)->d_inode)
1077 cleanup_phase = 3; /* parent lock */
1079 cleanup_phase = 4; /* child lock */
1081 /* Step 4: Re-lookup child to verify it hasn't changed since locking */
1082 rc = mds_verify_child(obd, &parent_res_id, parent_lockh, *dparentp,
1083 parent_mode, &child_res_id, child_lockh, dchildp,
1084 child_mode, name, namelen, &parent_res_id);
1094 switch (cleanup_phase) {
1096 ldlm_lock_decref(child_lockh, child_mode);
1098 ldlm_lock_decref(parent_lockh, parent_mode);
1109 void mds_reconstruct_generic(struct ptlrpc_request *req)
1111 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1113 mds_req_from_mcd(req, med->med_mcd);
1116 /* If we are unlinking an open file/dir (i.e. creating an orphan) then
1117 * we instead link the inode into the PENDING directory until it is
1118 * finally released. We can't simply call mds_reint_rename() or some
1119 * part thereof, because we don't have the inode to check for link
1120 * count/open status until after it is locked.
1122 * For lock ordering, caller must get child->i_sem first, then pending->i_sem
1123 * before starting journal transaction.
1125 * returns 1 on success
1126 * returns 0 if we lost a race and didn't make a new link
1127 * returns negative on error
1129 static int mds_orphan_add_link(struct mds_update_record *rec,
1130 struct obd_device *obd, struct dentry *dentry)
1132 struct mds_obd *mds = &obd->u.mds;
1133 struct inode *pending_dir = mds->mds_pending_dir->d_inode;
1134 struct inode *inode = dentry->d_inode;
1135 struct dentry *pending_child;
1136 char fidname[LL_FID_NAMELEN];
1137 int fidlen = 0, rc, mode;
1140 LASSERT(inode != NULL);
1141 LASSERT(!mds_inode_is_orphan(inode));
1142 #ifndef HAVE_I_ALLOC_SEM
1143 LASSERT(down_trylock(&inode->i_sem) != 0);
1145 LASSERT(down_trylock(&pending_dir->i_sem) != 0);
1147 fidlen = ll_fid2str(fidname, inode->i_ino, inode->i_generation);
1149 CDEBUG(D_INODE, "pending destroy of %dx open %d linked %s %s = %s\n",
1150 mds_orphan_open_count(inode), inode->i_nlink,
1151 S_ISDIR(inode->i_mode) ? "dir" :
1152 S_ISREG(inode->i_mode) ? "file" : "other",rec->ur_name,fidname);
1154 if (mds_orphan_open_count(inode) == 0 || inode->i_nlink != 0)
1157 pending_child = lookup_one_len(fidname, mds->mds_pending_dir, fidlen);
1158 if (IS_ERR(pending_child))
1159 RETURN(PTR_ERR(pending_child));
1161 if (pending_child->d_inode != NULL) {
1162 CERROR("re-destroying orphan file %s?\n", rec->ur_name);
1163 LASSERT(pending_child->d_inode == inode);
1164 GOTO(out_dput, rc = 0);
1167 /* link() is semanticaly-wrong for S_IFDIR, so we set S_IFREG
1168 * for linking and return real mode back then -bzzz */
1169 mode = inode->i_mode;
1170 inode->i_mode = S_IFREG;
1171 rc = vfs_link(dentry, pending_dir, pending_child);
1173 CERROR("error linking orphan %s to PENDING: rc = %d\n",
1176 mds_inode_set_orphan(inode);
1178 /* return mode and correct i_nlink if inode is directory */
1179 inode->i_mode = mode;
1180 LASSERTF(inode->i_nlink == 1, "%s nlink == %d\n",
1181 S_ISDIR(mode) ? "dir" : S_ISREG(mode) ? "file" : "other",
1183 if (S_ISDIR(mode)) {
1185 pending_dir->i_nlink++;
1186 mark_inode_dirty(inode);
1187 mark_inode_dirty(pending_dir);
1190 GOTO(out_dput, rc = 1);
1192 l_dput(pending_child);
1196 static int mds_reint_unlink(struct mds_update_record *rec, int offset,
1197 struct ptlrpc_request *req,
1198 struct lustre_handle *lh)
1200 struct dentry *dparent = NULL, *dchild;
1201 struct mds_obd *mds = mds_req2mds(req);
1202 struct obd_device *obd = req->rq_export->exp_obd;
1203 struct mds_body *body = NULL;
1204 struct inode *child_inode = NULL;
1205 struct lustre_handle parent_lockh, child_lockh, child_reuse_lockh;
1206 void *handle = NULL;
1207 int rc = 0, cleanup_phase = 0;
1210 LASSERT(offset == 0 || offset == 2);
1212 DEBUG_REQ(D_INODE, req, "parent ino "LPU64"/%u, child %s",
1213 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name);
1215 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1217 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNLINK))
1218 GOTO(cleanup, rc = -ENOENT);
1220 rc = mds_get_parent_child_locked(obd, mds, rec->ur_fid1,
1221 &parent_lockh, &dparent, LCK_PW,
1222 rec->ur_name, rec->ur_namelen,
1223 &child_lockh, &dchild, LCK_EX);
1227 cleanup_phase = 1; /* dchild, dparent, locks */
1230 child_inode = dchild->d_inode;
1231 if (child_inode == NULL) {
1232 CDEBUG(D_INODE, "child doesn't exist (dir %lu, name %s)\n",
1233 dparent->d_inode->i_ino, rec->ur_name);
1234 GOTO(cleanup, rc = -ENOENT);
1237 cleanup_phase = 2; /* dchild has a lock */
1239 /* Step 4: Get a lock on the ino to sync with creation WRT inode
1240 * reuse (see bug 2029). */
1241 rc = mds_lock_new_child(obd, child_inode, &child_reuse_lockh);
1245 cleanup_phase = 3; /* child inum lock */
1247 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_UNLINK_WRITE, dparent->d_inode->i_sb);
1249 /* ldlm_reply in buf[0] if called via intent */
1253 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
1254 LASSERT(body != NULL);
1256 /* child orphan sem protects orphan_dec_test && is_orphan race */
1257 MDS_DOWN_READ_ORPHAN_SEM(child_inode);
1258 cleanup_phase = 4; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
1260 /* If this is potentially the last reference to this inode, get the
1261 * OBD EA data first so the client can destroy OST objects. We
1262 * only do the object removal later if no open files/links remain. */
1263 if ((S_ISDIR(child_inode->i_mode) && child_inode->i_nlink == 2) ||
1264 child_inode->i_nlink == 1) {
1265 if (mds_orphan_open_count(child_inode) > 0) {
1266 /* need to lock pending_dir before transaction */
1267 down(&mds->mds_pending_dir->d_inode->i_sem);
1268 cleanup_phase = 5; /* up(&pending_dir->i_sem) */
1269 } else if (S_ISREG(child_inode->i_mode)) {
1270 mds_pack_inode2fid(&body->fid1, child_inode);
1271 mds_pack_inode2body(body, child_inode);
1272 mds_pack_md(obd, req->rq_repmsg, offset + 1, body,
1273 child_inode, MDS_PACK_MD_LOCK);
1277 /* We have to do these checks ourselves, in case we are making an
1278 * orphan. The client tells us whether rmdir() or unlink() was called,
1279 * so we need to return appropriate errors (bug 72). */
1280 if ((rec->ur_mode & S_IFMT) == S_IFDIR) {
1281 if (!S_ISDIR(child_inode->i_mode))
1282 GOTO(cleanup, rc = -ENOTDIR);
1284 if (S_ISDIR(child_inode->i_mode))
1285 GOTO(cleanup, rc = -EISDIR);
1288 /* Step 4: Do the unlink: we already verified ur_mode above (bug 72) */
1289 switch (child_inode->i_mode & S_IFMT) {
1291 /* Drop any lingering child directories before we start our
1292 * transaction, to avoid doing multiple inode dirty/delete
1293 * in our compound transaction (bug 1321). */
1294 shrink_dcache_parent(dchild);
1295 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_RMDIR,
1298 GOTO(cleanup, rc = PTR_ERR(handle));
1299 rc = vfs_rmdir(dparent->d_inode, dchild);
1302 struct lov_mds_md *lmm = lustre_msg_buf(req->rq_repmsg,
1304 handle = fsfilt_start_log(obd, dparent->d_inode,
1305 FSFILT_OP_UNLINK, NULL,
1306 le32_to_cpu(lmm->lmm_stripe_count));
1308 GOTO(cleanup, rc = PTR_ERR(handle));
1309 rc = vfs_unlink(dparent->d_inode, dchild);
1317 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_UNLINK,
1320 GOTO(cleanup, rc = PTR_ERR(handle));
1321 rc = vfs_unlink(dparent->d_inode, dchild);
1324 CERROR("bad file type %o unlinking %s\n", rec->ur_mode,
1327 GOTO(cleanup, rc = -EINVAL);
1330 if (rc == 0 && child_inode->i_nlink == 0) {
1331 if (mds_orphan_open_count(child_inode) > 0)
1332 rc = mds_orphan_add_link(rec, obd, dchild);
1335 GOTO(cleanup, rc = 0);
1337 if (!S_ISREG(child_inode->i_mode))
1340 if (!(body->valid & OBD_MD_FLEASIZE)) {
1341 body->valid |=(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1342 OBD_MD_FLATIME | OBD_MD_FLMTIME);
1343 } else if (mds_log_op_unlink(obd, child_inode,
1344 lustre_msg_buf(req->rq_repmsg, offset + 1, 0),
1345 req->rq_repmsg->buflens[offset + 1],
1346 lustre_msg_buf(req->rq_repmsg, offset + 2, 0),
1347 req->rq_repmsg->buflens[offset+2]) > 0){
1348 body->valid |= OBD_MD_FLCOOKIE;
1358 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
1359 LTIME_S(iattr.ia_mtime) = rec->ur_time;
1360 LTIME_S(iattr.ia_ctime) = rec->ur_time;
1362 err = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
1364 CERROR("error on parent setattr: rc = %d\n", err);
1367 rc = mds_finish_transno(mds, dparent ? dparent->d_inode : NULL,
1368 handle, req, rc, 0);
1370 (void)obd_set_info(mds->mds_osc_exp, strlen("unlinked"),
1371 "unlinked", 0, NULL);
1372 switch(cleanup_phase) {
1373 case 5: /* pending_dir semaphore */
1374 up(&mds->mds_pending_dir->d_inode->i_sem);
1375 case 4: /* child inode semaphore */
1376 MDS_UP_READ_ORPHAN_SEM(child_inode);
1377 case 3: /* child ino-reuse lock */
1378 if (rc && body != NULL) {
1379 // Don't unlink the OST objects if the MDS unlink failed
1383 ldlm_lock_decref(&child_reuse_lockh, LCK_EX);
1385 ptlrpc_save_lock(req, &child_reuse_lockh, LCK_EX);
1386 case 2: /* child lock */
1387 ldlm_lock_decref(&child_lockh, LCK_EX);
1388 case 1: /* child and parent dentry, parent lock */
1390 ldlm_lock_decref(&parent_lockh, LCK_PW);
1392 ptlrpc_save_lock(req, &parent_lockh, LCK_PW);
1399 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1402 req->rq_status = rc;
1406 static int mds_reint_link(struct mds_update_record *rec, int offset,
1407 struct ptlrpc_request *req,
1408 struct lustre_handle *lh)
1410 struct obd_device *obd = req->rq_export->exp_obd;
1411 struct dentry *de_src = NULL;
1412 struct dentry *de_tgt_dir = NULL;
1413 struct dentry *dchild = NULL;
1414 struct mds_obd *mds = mds_req2mds(req);
1415 struct lustre_handle *handle = NULL, tgt_dir_lockh, src_lockh;
1416 struct ldlm_res_id src_res_id = { .name = {0} };
1417 struct ldlm_res_id tgt_dir_res_id = { .name = {0} };
1418 int rc = 0, cleanup_phase = 0;
1421 LASSERT(offset == 0);
1423 DEBUG_REQ(D_INODE, req, "original "LPU64"/%u to "LPU64"/%u %s",
1424 rec->ur_fid1->id, rec->ur_fid1->generation,
1425 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_name);
1427 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1429 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_LINK))
1430 GOTO(cleanup, rc = -ENOENT);
1432 /* Step 1: Lookup the source inode and target directory by FID */
1433 de_src = mds_fid2dentry(mds, rec->ur_fid1, NULL);
1435 GOTO(cleanup, rc = PTR_ERR(de_src));
1437 cleanup_phase = 1; /* source dentry */
1439 de_tgt_dir = mds_fid2dentry(mds, rec->ur_fid2, NULL);
1440 if (IS_ERR(de_tgt_dir)) {
1441 rc = PTR_ERR(de_tgt_dir);
1446 cleanup_phase = 2; /* target directory dentry */
1448 CDEBUG(D_INODE, "linking %.*s/%s to inode %lu\n",
1449 de_tgt_dir->d_name.len, de_tgt_dir->d_name.name, rec->ur_name,
1450 de_src->d_inode->i_ino);
1452 /* Step 2: Take the two locks */
1453 src_res_id.name[0] = de_src->d_inode->i_ino;
1454 src_res_id.name[1] = de_src->d_inode->i_generation;
1455 tgt_dir_res_id.name[0] = de_tgt_dir->d_inode->i_ino;
1456 tgt_dir_res_id.name[1] = de_tgt_dir->d_inode->i_generation;
1458 rc = enqueue_ordered_locks(obd, &src_res_id, &src_lockh, LCK_EX,
1459 &tgt_dir_res_id, &tgt_dir_lockh, LCK_EX);
1463 cleanup_phase = 3; /* locks */
1465 /* Step 3: Lookup the child */
1466 dchild = ll_lookup_one_len(rec->ur_name, de_tgt_dir, rec->ur_namelen-1);
1467 if (IS_ERR(dchild)) {
1468 rc = PTR_ERR(dchild);
1469 if (rc != -EPERM && rc != -EACCES)
1470 CERROR("child lookup error %d\n", rc);
1474 cleanup_phase = 4; /* child dentry */
1476 if (dchild->d_inode) {
1477 CDEBUG(D_INODE, "child exists (dir %lu, name %s)\n",
1478 de_tgt_dir->d_inode->i_ino, rec->ur_name);
1483 /* Step 4: Do it. */
1484 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_LINK_WRITE, de_src->d_inode->i_sb);
1486 handle = fsfilt_start(obd, de_tgt_dir->d_inode, FSFILT_OP_LINK, NULL);
1487 if (IS_ERR(handle)) {
1488 rc = PTR_ERR(handle);
1492 rc = vfs_link(de_src, de_tgt_dir->d_inode, dchild);
1493 if (rc && rc != -EPERM && rc != -EACCES)
1494 CERROR("vfs_link error %d\n", rc);
1496 rc = mds_finish_transno(mds, de_tgt_dir ? de_tgt_dir->d_inode : NULL,
1497 handle, req, rc, 0);
1500 switch (cleanup_phase) {
1501 case 4: /* child dentry */
1505 ldlm_lock_decref(&src_lockh, LCK_EX);
1506 ldlm_lock_decref(&tgt_dir_lockh, LCK_EX);
1508 ptlrpc_save_lock(req, &src_lockh, LCK_EX);
1509 ptlrpc_save_lock(req, &tgt_dir_lockh, LCK_EX);
1511 case 2: /* target dentry */
1513 case 1: /* source dentry */
1518 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1521 req->rq_status = rc;
1525 /* The idea here is that we need to get four locks in the end:
1526 * one on each parent directory, one on each child. We need to take
1527 * these locks in some kind of order (to avoid deadlocks), and the order
1528 * I selected is "increasing resource number" order. We need to look up
1529 * the children, however, before we know what the resource number(s) are.
1530 * Thus the following plan:
1532 * 1,2. Look up the parents
1533 * 3,4. Look up the children
1534 * 5. Take locks on the parents and children, in order
1535 * 6. Verify that the children haven't changed since they were looked up
1537 * If there was a race and the children changed since they were first looked
1538 * up, it is possible that mds_verify_child() will be able to just grab the
1539 * lock on the new child resource (if it has a higher resource than any other)
1540 * but we need to compare against not only its parent, but also against the
1541 * parent and child of the "other half" of the rename, hence maxres_{src,tgt}.
1543 * We need the fancy igrab() on the child inodes because we aren't holding a
1544 * lock on the parent after the lookup is done, so dentry->d_inode may change
1545 * at any time, and igrab() itself doesn't like getting passed a NULL argument.
1547 static int mds_get_parents_children_locked(struct obd_device *obd,
1548 struct mds_obd *mds,
1549 struct ll_fid *p1_fid,
1550 struct dentry **de_srcdirp,
1551 struct ll_fid *p2_fid,
1552 struct dentry **de_tgtdirp,
1554 const char *old_name, int old_len,
1555 struct dentry **de_oldp,
1556 const char *new_name, int new_len,
1557 struct dentry **de_newp,
1558 struct lustre_handle *dlm_handles,
1561 struct ldlm_res_id p1_res_id = { .name = {0} };
1562 struct ldlm_res_id p2_res_id = { .name = {0} };
1563 struct ldlm_res_id c1_res_id = { .name = {0} };
1564 struct ldlm_res_id c2_res_id = { .name = {0} };
1565 struct ldlm_res_id *maxres_src, *maxres_tgt;
1566 struct inode *inode;
1567 int rc = 0, cleanup_phase = 0;
1570 /* Step 1: Lookup the source directory */
1571 *de_srcdirp = mds_fid2dentry(mds, p1_fid, NULL);
1572 if (IS_ERR(*de_srcdirp))
1573 GOTO(cleanup, rc = PTR_ERR(*de_srcdirp));
1575 cleanup_phase = 1; /* source directory dentry */
1577 p1_res_id.name[0] = (*de_srcdirp)->d_inode->i_ino;
1578 p1_res_id.name[1] = (*de_srcdirp)->d_inode->i_generation;
1580 /* Step 2: Lookup the target directory */
1581 if (memcmp(p1_fid, p2_fid, sizeof(*p1_fid)) == 0) {
1582 *de_tgtdirp = dget(*de_srcdirp);
1584 *de_tgtdirp = mds_fid2dentry(mds, p2_fid, NULL);
1585 if (IS_ERR(*de_tgtdirp)) {
1586 rc = PTR_ERR(*de_tgtdirp);
1592 cleanup_phase = 2; /* target directory dentry */
1594 p2_res_id.name[0] = (*de_tgtdirp)->d_inode->i_ino;
1595 p2_res_id.name[1] = (*de_tgtdirp)->d_inode->i_generation;
1597 /* Step 3: Lookup the source child entry */
1598 *de_oldp = ll_lookup_one_len(old_name, *de_srcdirp, old_len - 1);
1599 if (IS_ERR(*de_oldp)) {
1600 rc = PTR_ERR(*de_oldp);
1601 CERROR("old child lookup error (%.*s): %d\n",
1602 old_len - 1, old_name, rc);
1606 cleanup_phase = 3; /* original name dentry */
1608 inode = (*de_oldp)->d_inode;
1610 inode = igrab(inode);
1612 GOTO(cleanup, rc = -ENOENT);
1614 c1_res_id.name[0] = inode->i_ino;
1615 c1_res_id.name[1] = inode->i_generation;
1619 /* Step 4: Lookup the target child entry */
1620 *de_newp = ll_lookup_one_len(new_name, *de_tgtdirp, new_len - 1);
1621 if (IS_ERR(*de_newp)) {
1622 rc = PTR_ERR(*de_newp);
1623 CERROR("new child lookup error (%.*s): %d\n",
1624 old_len - 1, old_name, rc);
1628 cleanup_phase = 4; /* target dentry */
1630 inode = (*de_newp)->d_inode;
1632 inode = igrab(inode);
1636 c2_res_id.name[0] = inode->i_ino;
1637 c2_res_id.name[1] = inode->i_generation;
1641 /* Step 5: Take locks on the parents and child(ren) */
1642 maxres_src = &p1_res_id;
1643 maxres_tgt = &p2_res_id;
1644 cleanup_phase = 4; /* target dentry */
1646 if (c1_res_id.name[0] != 0 && res_gt(&c1_res_id, &p1_res_id))
1647 maxres_src = &c1_res_id;
1648 if (c2_res_id.name[0] != 0 && res_gt(&c2_res_id, &p2_res_id))
1649 maxres_tgt = &c2_res_id;
1651 rc = enqueue_4ordered_locks(obd, &p1_res_id,&dlm_handles[0],parent_mode,
1652 &p2_res_id, &dlm_handles[1], parent_mode,
1653 &c1_res_id, &dlm_handles[2], child_mode,
1654 &c2_res_id, &dlm_handles[3], child_mode);
1658 cleanup_phase = 6; /* parent and child(ren) locks */
1660 /* Step 6a: Re-lookup source child to verify it hasn't changed */
1661 rc = mds_verify_child(obd, &p1_res_id, &dlm_handles[0], *de_srcdirp,
1662 parent_mode, &c1_res_id, &dlm_handles[2], de_oldp,
1663 child_mode, old_name, old_len, maxres_tgt);
1665 if (c2_res_id.name[0] != 0)
1666 ldlm_lock_decref(&dlm_handles[3], child_mode);
1667 ldlm_lock_decref(&dlm_handles[1], parent_mode);
1674 if ((*de_oldp)->d_inode == NULL)
1675 GOTO(cleanup, rc = -ENOENT);
1677 /* Step 6b: Re-lookup target child to verify it hasn't changed */
1678 rc = mds_verify_child(obd, &p2_res_id, &dlm_handles[1], *de_tgtdirp,
1679 parent_mode, &c2_res_id, &dlm_handles[3], de_newp,
1680 child_mode, new_name, new_len, maxres_src);
1682 ldlm_lock_decref(&dlm_handles[2], child_mode);
1683 ldlm_lock_decref(&dlm_handles[0], parent_mode);
1693 switch (cleanup_phase) {
1694 case 6: /* child lock(s) */
1695 if (c2_res_id.name[0] != 0)
1696 ldlm_lock_decref(&dlm_handles[3], child_mode);
1697 if (c1_res_id.name[0] != 0)
1698 ldlm_lock_decref(&dlm_handles[2], child_mode);
1699 case 5: /* parent locks */
1700 ldlm_lock_decref(&dlm_handles[1], parent_mode);
1701 ldlm_lock_decref(&dlm_handles[0], parent_mode);
1702 case 4: /* target dentry */
1704 case 3: /* source dentry */
1706 case 2: /* target directory dentry */
1707 l_dput(*de_tgtdirp);
1708 case 1: /* source directry dentry */
1709 l_dput(*de_srcdirp);
1716 static int mds_reint_rename(struct mds_update_record *rec, int offset,
1717 struct ptlrpc_request *req,
1718 struct lustre_handle *lockh)
1720 struct obd_device *obd = req->rq_export->exp_obd;
1721 struct dentry *de_srcdir = NULL;
1722 struct dentry *de_tgtdir = NULL;
1723 struct dentry *de_old = NULL;
1724 struct dentry *de_new = NULL;
1725 struct inode *old_inode = NULL, *new_inode = NULL;
1726 struct mds_obd *mds = mds_req2mds(req);
1727 struct lustre_handle dlm_handles[4];
1728 struct mds_body *body = NULL;
1729 struct lov_mds_md *lmm = NULL;
1730 int rc = 0, lock_count = 3, cleanup_phase = 0;
1731 void *handle = NULL;
1734 LASSERT(offset == 0);
1736 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u %s to "LPU64"/%u %s",
1737 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name,
1738 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_tgt);
1740 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1742 rc = mds_get_parents_children_locked(obd, mds, rec->ur_fid1, &de_srcdir,
1743 rec->ur_fid2, &de_tgtdir, LCK_PW,
1744 rec->ur_name, rec->ur_namelen,
1745 &de_old, rec->ur_tgt,
1746 rec->ur_tgtlen, &de_new,
1747 dlm_handles, LCK_EX);
1751 cleanup_phase = 1; /* parent(s), children, locks */
1753 old_inode = de_old->d_inode;
1754 new_inode = de_new->d_inode;
1756 if (new_inode != NULL)
1759 /* sanity check for src inode */
1760 if (old_inode->i_ino == de_srcdir->d_inode->i_ino ||
1761 old_inode->i_ino == de_tgtdir->d_inode->i_ino)
1762 GOTO(cleanup, rc = -EINVAL);
1764 if (new_inode == NULL)
1768 cleanup_phase = 2; /* iput(new_inode) when finished */
1770 /* sanity check for dest inode */
1771 if (new_inode->i_ino == de_srcdir->d_inode->i_ino ||
1772 new_inode->i_ino == de_tgtdir->d_inode->i_ino)
1773 GOTO(cleanup, rc = -EINVAL);
1775 if (old_inode == new_inode)
1776 GOTO(cleanup, rc = 0);
1778 /* if we are about to remove the target at first, pass the EA of
1779 * that inode to client to perform and cleanup on OST */
1780 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
1781 LASSERT(body != NULL);
1783 /* child orphan sem protects orphan_dec_test && is_orphan race */
1784 MDS_DOWN_READ_ORPHAN_SEM(new_inode);
1785 cleanup_phase = 3; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
1787 if ((S_ISDIR(new_inode->i_mode) && new_inode->i_nlink == 2) ||
1788 new_inode->i_nlink == 1) {
1789 if (mds_orphan_open_count(new_inode) > 0) {
1790 /* need to lock pending_dir before transaction */
1791 down(&mds->mds_pending_dir->d_inode->i_sem);
1792 cleanup_phase = 4; /* up(&pending_dir->i_sem) */
1793 } else if (S_ISREG(new_inode->i_mode)) {
1794 mds_pack_inode2fid(&body->fid1, new_inode);
1795 mds_pack_inode2body(body, new_inode);
1796 mds_pack_md(obd, req->rq_repmsg, 1, body, new_inode,
1802 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_RENAME_WRITE,
1803 de_srcdir->d_inode->i_sb);
1805 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1806 /* Check if we are moving old entry into its child. 2.6 does not
1807 check for this in vfs_rename() anymore */
1808 if (is_subdir(de_new, de_old))
1809 GOTO(cleanup, rc = -EINVAL);
1812 lmm = lustre_msg_buf(req->rq_repmsg, 1, 0);
1813 handle = fsfilt_start_log(obd, de_tgtdir->d_inode, FSFILT_OP_RENAME,
1814 NULL, le32_to_cpu(lmm->lmm_stripe_count));
1817 GOTO(cleanup, rc = PTR_ERR(handle));
1820 de_old->d_fsdata = req;
1821 de_new->d_fsdata = req;
1823 rc = vfs_rename(de_srcdir->d_inode, de_old, de_tgtdir->d_inode, de_new);
1826 if (rc == 0 && new_inode != NULL && new_inode->i_nlink == 0) {
1827 if (mds_orphan_open_count(new_inode) > 0)
1828 rc = mds_orphan_add_link(rec, obd, de_new);
1831 GOTO(cleanup, rc = 0);
1833 if (!S_ISREG(new_inode->i_mode))
1836 if (!(body->valid & OBD_MD_FLEASIZE)) {
1837 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1838 OBD_MD_FLATIME | OBD_MD_FLMTIME);
1839 } else if (mds_log_op_unlink(obd, new_inode,
1840 lustre_msg_buf(req->rq_repmsg,1,0),
1841 req->rq_repmsg->buflens[1],
1842 lustre_msg_buf(req->rq_repmsg,2,0),
1843 req->rq_repmsg->buflens[2]) > 0) {
1844 body->valid |= OBD_MD_FLCOOKIE;
1850 rc = mds_finish_transno(mds, de_tgtdir ? de_tgtdir->d_inode : NULL,
1851 handle, req, rc, 0);
1853 switch (cleanup_phase) {
1855 up(&mds->mds_pending_dir->d_inode->i_sem);
1857 MDS_UP_READ_ORPHAN_SEM(new_inode);
1862 if (lock_count == 4)
1863 ldlm_lock_decref(&(dlm_handles[3]), LCK_EX);
1864 ldlm_lock_decref(&(dlm_handles[2]), LCK_EX);
1865 ldlm_lock_decref(&(dlm_handles[1]), LCK_PW);
1866 ldlm_lock_decref(&(dlm_handles[0]), LCK_PW);
1868 if (lock_count == 4)
1869 ptlrpc_save_lock(req,&(dlm_handles[3]), LCK_EX);
1870 ptlrpc_save_lock(req, &(dlm_handles[2]), LCK_EX);
1871 ptlrpc_save_lock(req, &(dlm_handles[1]), LCK_PW);
1872 ptlrpc_save_lock(req, &(dlm_handles[0]), LCK_PW);
1881 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1884 req->rq_status = rc;
1888 typedef int (*mds_reinter)(struct mds_update_record *, int offset,
1889 struct ptlrpc_request *, struct lustre_handle *);
1891 static mds_reinter reinters[REINT_MAX + 1] = {
1892 [REINT_SETATTR] mds_reint_setattr,
1893 [REINT_CREATE] mds_reint_create,
1894 [REINT_LINK] mds_reint_link,
1895 [REINT_UNLINK] mds_reint_unlink,
1896 [REINT_RENAME] mds_reint_rename,
1897 [REINT_OPEN] mds_open
1900 int mds_reint_rec(struct mds_update_record *rec, int offset,
1901 struct ptlrpc_request *req, struct lustre_handle *lockh)
1903 struct obd_device *obd = req->rq_export->exp_obd;
1904 struct obd_run_ctxt saved;
1908 /* checked by unpacker */
1909 LASSERT(rec->ur_opcode <= REINT_MAX &&
1910 reinters[rec->ur_opcode] != NULL);
1912 push_ctxt(&saved, &obd->obd_ctxt, &rec->ur_uc);
1913 rc = reinters[rec->ur_opcode] (rec, offset, req, lockh);
1914 pop_ctxt(&saved, &obd->obd_ctxt, &rec->ur_uc);