1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/mds/mds_reint.c
5 * Lustre Metadata Server (mds) reintegration routines
7 * Copyright (C) 2002-2005 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
12 * This file is part of Lustre, http://www.lustre.org.
14 * Lustre is free software; you can redistribute it and/or
15 * modify it under the terms of version 2 of the GNU General Public
16 * License as published by the Free Software Foundation.
18 * Lustre is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with Lustre; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 # define EXPORT_SYMTAB
31 #define DEBUG_SUBSYSTEM S_MDS
34 #include <linux/obd_support.h>
35 #include <linux/obd_class.h>
36 #include <linux/obd.h>
37 #include <linux/lustre_lib.h>
38 #include <linux/lustre_idl.h>
39 #include <linux/lustre_mds.h>
40 #include <linux/lustre_dlm.h>
41 #include <linux/lustre_fsfilt.h>
43 #include "mds_internal.h"
45 void mds_commit_cb(struct obd_device *obd, __u64 transno, void *data,
48 obd_transno_commit_cb(obd, transno, error);
51 struct mds_logcancel_data {
52 struct lov_mds_md *mlcd_lmm;
56 struct llog_cookie mlcd_cookies[0];
60 static void mds_cancel_cookies_cb(struct obd_device *obd, __u64 transno,
61 void *cb_data, int error)
63 struct mds_logcancel_data *mlcd = cb_data;
64 struct lov_stripe_md *lsm = NULL;
65 struct llog_ctxt *ctxt;
68 obd_transno_commit_cb(obd, transno, error);
70 CDEBUG(D_HA, "cancelling %d cookies\n",
71 (int)(mlcd->mlcd_cookielen / sizeof(*mlcd->mlcd_cookies)));
73 rc = obd_unpackmd(obd->u.mds.mds_osc_exp, &lsm, mlcd->mlcd_lmm,
74 mlcd->mlcd_eadatalen);
76 CERROR("bad LSM cancelling %d log cookies: rc %d\n",
77 (int)(mlcd->mlcd_cookielen/sizeof(*mlcd->mlcd_cookies)),
80 ///* XXX 0 normally, SENDNOW for debug */);
81 ctxt = llog_get_context(obd,mlcd->mlcd_cookies[0].lgc_subsys+1);
82 rc = llog_cancel(ctxt, lsm, mlcd->mlcd_cookielen /
83 sizeof(*mlcd->mlcd_cookies),
84 mlcd->mlcd_cookies, OBD_LLOG_FL_SENDNOW);
86 CERROR("error cancelling %d log cookies: rc %d\n",
87 (int)(mlcd->mlcd_cookielen /
88 sizeof(*mlcd->mlcd_cookies)), rc);
91 OBD_FREE(mlcd, mlcd->mlcd_size);
94 /* Assumes caller has already pushed us into the kernel context. */
95 int mds_finish_transno(struct mds_obd *mds, struct inode *inode, void *handle,
96 struct ptlrpc_request *req, int rc, __u32 op_data)
98 struct mds_export_data *med = &req->rq_export->exp_mds_data;
99 struct mds_client_data *mcd = med->med_mcd;
100 struct obd_device *obd = req->rq_export->exp_obd;
107 /* if the export has already been failed, we have no last_rcvd slot */
108 if (req->rq_export->exp_failed) {
109 CWARN("committing transaction for disconnected client %s\n",
110 req->rq_export->exp_client_uuid.uuid);
119 if (handle == NULL) {
120 /* if we're starting our own xaction, use our own inode */
121 inode = mds->mds_rcvd_filp->f_dentry->d_inode;
122 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
123 if (IS_ERR(handle)) {
124 CERROR("fsfilt_start: %ld\n", PTR_ERR(handle));
125 RETURN(PTR_ERR(handle));
131 transno = req->rq_reqmsg->transno;
133 LASSERT(transno == 0);
134 } else if (transno == 0) {
135 spin_lock(&mds->mds_transno_lock);
136 transno = ++mds->mds_last_transno;
137 spin_unlock(&mds->mds_transno_lock);
139 spin_lock(&mds->mds_transno_lock);
140 if (transno > mds->mds_last_transno)
141 mds->mds_last_transno = transno;
142 spin_unlock(&mds->mds_transno_lock);
144 req->rq_repmsg->transno = req->rq_transno = transno;
145 mcd->mcd_last_transno = cpu_to_le64(transno);
146 mcd->mcd_last_xid = cpu_to_le64(req->rq_xid);
147 mcd->mcd_last_result = cpu_to_le32(rc);
148 mcd->mcd_last_data = cpu_to_le32(op_data);
150 fsfilt_add_journal_cb(req->rq_export->exp_obd, transno, handle,
151 mds_commit_cb, NULL);
152 err = fsfilt_write_record(obd, mds->mds_rcvd_filp, mcd, sizeof(*mcd),
161 DEBUG_REQ(log_pri, req,
162 "wrote trans #"LPU64" rc %d client %s at idx %u: err = %d",
163 transno, rc, mcd->mcd_uuid, med->med_idx, err);
165 err = mds_lov_write_objids(obd);
171 CDEBUG(log_pri, "wrote objids: err = %d\n", err);
174 err = fsfilt_commit(obd, inode, handle, 0);
176 CERROR("error committing transaction: %d\n", err);
184 /* this gives the same functionality as the code between
185 * sys_chmod and inode_setattr
186 * chown_common and inode_setattr
187 * utimes and inode_setattr
189 int mds_fix_attr(struct inode *inode, struct mds_update_record *rec)
191 time_t now = CURRENT_SECONDS;
192 struct iattr *attr = &rec->ur_iattr;
193 unsigned int ia_valid = attr->ia_valid;
197 /* only fix up attrs if the client VFS didn't already */
198 if (!(ia_valid & ATTR_RAW))
201 if (!(ia_valid & ATTR_CTIME_SET))
202 LTIME_S(attr->ia_ctime) = now;
203 if (!(ia_valid & ATTR_ATIME_SET))
204 LTIME_S(attr->ia_atime) = now;
205 if (!(ia_valid & ATTR_MTIME_SET))
206 LTIME_S(attr->ia_mtime) = now;
208 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
212 if ((ia_valid & (ATTR_MTIME|ATTR_ATIME)) == (ATTR_MTIME|ATTR_ATIME)) {
213 if (rec->ur_fsuid != inode->i_uid &&
214 (error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
218 if (ia_valid & ATTR_SIZE) {
219 if ((error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
223 if (ia_valid & ATTR_UID) {
226 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
228 if (attr->ia_uid == (uid_t) -1)
229 attr->ia_uid = inode->i_uid;
230 if (attr->ia_gid == (gid_t) -1)
231 attr->ia_gid = inode->i_gid;
232 attr->ia_mode = inode->i_mode;
234 * If the user or group of a non-directory has been
235 * changed by a non-root user, remove the setuid bit.
236 * 19981026 David C Niemi <niemi@tux.org>
238 * Changed this to apply to all users, including root,
239 * to avoid some races. This is the behavior we had in
240 * 2.0. The check for non-root was definitely wrong
241 * for 2.2 anyway, as it should have been using
242 * CAP_FSETID rather than fsuid -- 19990830 SD.
244 if ((inode->i_mode & S_ISUID) == S_ISUID &&
245 !S_ISDIR(inode->i_mode)) {
246 attr->ia_mode &= ~S_ISUID;
247 attr->ia_valid |= ATTR_MODE;
250 * Likewise, if the user or group of a non-directory
251 * has been changed by a non-root user, remove the
252 * setgid bit UNLESS there is no group execute bit
253 * (this would be a file marked for mandatory
254 * locking). 19981026 David C Niemi <niemi@tux.org>
256 * Removed the fsuid check (see the comment above) --
259 if (((inode->i_mode & (S_ISGID | S_IXGRP)) ==
260 (S_ISGID | S_IXGRP)) && !S_ISDIR(inode->i_mode)) {
261 attr->ia_mode &= ~S_ISGID;
262 attr->ia_valid |= ATTR_MODE;
264 } else if (ia_valid & ATTR_MODE) {
265 int mode = attr->ia_mode;
267 if (attr->ia_mode == (umode_t)-1)
268 mode = inode->i_mode;
270 (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
275 void mds_steal_ack_locks(struct ptlrpc_request *req)
277 struct obd_export *exp = req->rq_export;
278 struct list_head *tmp;
279 struct ptlrpc_reply_state *oldrep;
280 struct ptlrpc_service *svc;
282 char str[PTL_NALFMT_SIZE];
285 /* CAVEAT EMPTOR: spinlock order */
286 spin_lock_irqsave (&exp->exp_lock, flags);
287 list_for_each (tmp, &exp->exp_outstanding_replies) {
288 oldrep = list_entry(tmp, struct ptlrpc_reply_state,rs_exp_list);
290 if (oldrep->rs_xid != req->rq_xid)
293 if (oldrep->rs_msg.opc != req->rq_reqmsg->opc)
294 CERROR ("Resent req xid "LPX64" has mismatched opc: "
295 "new %d old %d\n", req->rq_xid,
296 req->rq_reqmsg->opc, oldrep->rs_msg.opc);
298 svc = oldrep->rs_srv_ni->sni_service;
299 spin_lock (&svc->srv_lock);
301 list_del_init (&oldrep->rs_exp_list);
303 CWARN("Stealing %d locks from rs %p x"LPD64".t"LPD64
305 oldrep->rs_nlocks, oldrep,
306 oldrep->rs_xid, oldrep->rs_transno, oldrep->rs_msg.opc,
307 ptlrpc_peernid2str(&exp->exp_connection->c_peer, str));
309 for (i = 0; i < oldrep->rs_nlocks; i++)
310 ptlrpc_save_lock(req,
311 &oldrep->rs_locks[i],
312 oldrep->rs_modes[i]);
313 oldrep->rs_nlocks = 0;
315 DEBUG_REQ(D_HA, req, "stole locks for");
316 ptlrpc_schedule_difficult_reply (oldrep);
318 spin_unlock (&svc->srv_lock);
319 spin_unlock_irqrestore (&exp->exp_lock, flags);
322 spin_unlock_irqrestore (&exp->exp_lock, flags);
325 void mds_req_from_mcd(struct ptlrpc_request *req, struct mds_client_data *mcd)
327 DEBUG_REQ(D_HA, req, "restoring transno "LPD64"/status %d",
328 mcd->mcd_last_transno, mcd->mcd_last_result);
329 req->rq_repmsg->transno = req->rq_transno = mcd->mcd_last_transno;
330 req->rq_repmsg->status = req->rq_status = mcd->mcd_last_result;
332 mds_steal_ack_locks(req);
335 static void reconstruct_reint_setattr(struct mds_update_record *rec,
336 int offset, struct ptlrpc_request *req)
338 struct mds_export_data *med = &req->rq_export->exp_mds_data;
339 struct mds_obd *obd = &req->rq_export->exp_obd->u.mds;
341 struct mds_body *body;
343 mds_req_from_mcd(req, med->med_mcd);
345 de = mds_fid2dentry(obd, rec->ur_fid1, NULL);
347 LASSERT(PTR_ERR(de) == req->rq_status);
351 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
352 mds_pack_inode2fid(&body->fid1, de->d_inode);
353 mds_pack_inode2body(body, de->d_inode);
355 /* Don't return OST-specific attributes if we didn't just set them */
356 if (rec->ur_iattr.ia_valid & ATTR_SIZE)
357 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
358 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
359 body->valid |= OBD_MD_FLMTIME;
360 if (rec->ur_iattr.ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
361 body->valid |= OBD_MD_FLATIME;
366 int mds_osc_setattr_async(struct obd_device *obd, struct inode *inode,
367 struct lov_mds_md *lmm, int lmm_size,
368 struct llog_cookie *logcookies)
370 struct mds_obd *mds = &obd->u.mds;
371 struct lov_stripe_md *lsm = NULL;
372 struct obd_trans_info oti = { 0 };
373 struct obdo *oa = NULL;
374 int cleanup_phase = 0, rc = 0;
377 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OST_SETATTR))
380 /* first get memory EA */
388 rc = obd_unpackmd(mds->mds_osc_exp, &lsm, lmm, lmm_size);
390 CERROR("Error unpack md %p\n", lmm);
396 oa->o_id = lsm->lsm_object_id;
397 oa->o_uid = inode->i_uid;
398 oa->o_gid = inode->i_gid;
399 oa->o_valid = OBD_MD_FLID | OBD_MD_FLUID | OBD_MD_FLGID;
401 oa->o_valid |= OBD_MD_FLCOOKIE;
402 oti.oti_logcookies = logcookies;
405 /* do setattr from mds to ost asynchronously */
406 rc = obd_setattr_async(mds->mds_osc_exp, oa, lsm, &oti);
408 CDEBUG(D_INODE, "mds to ost setattr objid 0x"LPX64" on ost error "
409 "%d\n", lsm->lsm_object_id, rc);
411 switch(cleanup_phase) {
413 obd_free_memmd(mds->mds_osc_exp, &lsm);
418 OBD_FREE(logcookies, mds->mds_max_cookiesize);
424 /* In the raw-setattr case, we lock the child inode.
425 * In the write-back case or if being called from open, the client holds a lock
428 * We use the ATTR_FROM_OPEN flag to tell these cases apart. */
429 static int mds_reint_setattr(struct mds_update_record *rec, int offset,
430 struct ptlrpc_request *req,
431 struct lustre_handle *lh)
433 struct mds_obd *mds = mds_req2mds(req);
434 struct obd_device *obd = req->rq_export->exp_obd;
435 struct mds_body *body;
437 struct inode *inode = NULL;
438 struct lustre_handle lockh;
440 struct mds_logcancel_data *mlcd = NULL;
441 struct lov_mds_md *lmm = NULL;
442 struct llog_cookie *logcookies = NULL;
443 int lmm_size = 0, need_lock = 1;
444 int rc = 0, cleanup_phase = 0, err, locked = 0;
449 LASSERT(offset == 0);
451 DEBUG_REQ(D_INODE, req, "setattr "LPU64"/%u %x", rec->ur_fid1->id,
452 rec->ur_fid1->generation, rec->ur_iattr.ia_valid);
454 MDS_CHECK_RESENT(req, reconstruct_reint_setattr(rec, offset, req));
456 if (rec->ur_iattr.ia_valid & ATTR_FROM_OPEN ||
457 (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)) {
458 de = mds_fid2dentry(mds, rec->ur_fid1, NULL);
460 GOTO(cleanup, rc = PTR_ERR(de));
461 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
462 GOTO(cleanup, rc = -EROFS);
464 de = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_PW,
467 GOTO(cleanup, rc = PTR_ERR(de));
475 /* save uid/gid for quota acq/rel */
476 child_uid = inode->i_uid;
477 child_gid = inode->i_gid;
479 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
480 rec->ur_eadata != NULL) {
485 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_SETATTR_WRITE, inode->i_sb);
487 /* start a log jounal handle if needed*/
488 if (S_ISREG(inode->i_mode) &&
489 rec->ur_iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
490 lmm_size = mds->mds_max_mdsize;
491 OBD_ALLOC(lmm, lmm_size);
493 GOTO(cleanup, rc = -ENOMEM);
496 rc = mds_get_md(obd, inode, lmm, &lmm_size, need_lock);
500 handle = fsfilt_start_log(obd, inode, FSFILT_OP_SETATTR, NULL,
501 le32_to_cpu(lmm->lmm_stripe_count));
503 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
506 GOTO(cleanup, rc = PTR_ERR(handle));
508 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
509 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu\n",
510 LTIME_S(rec->ur_iattr.ia_mtime),
511 LTIME_S(rec->ur_iattr.ia_ctime));
512 rc = mds_fix_attr(inode, rec);
516 if (rec->ur_iattr.ia_valid & ATTR_ATTR_FLAG) /* ioctl */
517 rc = fsfilt_iocontrol(obd, inode, NULL, EXT3_IOC_SETFLAGS,
518 (long)&rec->ur_iattr.ia_attr_flags);
520 rc = fsfilt_setattr(obd, de, handle, &rec->ur_iattr, 0);
521 /* journal chown/chgrp in llog, just like unlink */
522 if (rc == 0 && S_ISREG(inode->i_mode) &&
523 rec->ur_iattr.ia_valid & (ATTR_UID | ATTR_GID) && lmm_size){
524 OBD_ALLOC(logcookies, mds->mds_max_cookiesize);
525 if (logcookies == NULL)
526 GOTO(cleanup, rc = -ENOMEM);
527 if (mds_log_op_setattr(obd, inode, lmm, lmm_size,
529 mds->mds_max_cookiesize) <= 0) {
530 OBD_FREE(logcookies, mds->mds_max_cookiesize);
536 if (rc == 0 && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
537 rec->ur_eadata != NULL) {
538 struct lov_stripe_md *lsm = NULL;
539 struct lov_user_md *lum = NULL;
541 rc = ll_permission(inode, MAY_WRITE, NULL);
545 lum = rec->ur_eadata;
546 /* if lmm_stripe_size is -1, then delete the stripe
548 if (S_ISDIR(inode->i_mode) &&
549 lum->lmm_stripe_size == (typeof(lum->lmm_stripe_size))(-1)){
550 rc = fsfilt_set_md(obd, inode, handle, NULL, 0);
554 rc = obd_iocontrol(OBD_IOC_LOV_SETSTRIPE,
556 &lsm, rec->ur_eadata);
560 obd_free_memmd(mds->mds_osc_exp, &lsm);
562 rc = fsfilt_set_md(obd, inode, handle, rec->ur_eadata,
569 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
570 mds_pack_inode2fid(&body->fid1, inode);
571 mds_pack_inode2body(body, inode);
573 /* Don't return OST-specific attributes if we didn't just set them */
574 if (rec->ur_iattr.ia_valid & ATTR_SIZE)
575 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
576 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
577 body->valid |= OBD_MD_FLMTIME;
578 if (rec->ur_iattr.ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
579 body->valid |= OBD_MD_FLATIME;
581 if (rc == 0 && rec->ur_cookielen && !IS_ERR(mds->mds_osc_obd)) {
582 OBD_ALLOC(mlcd, sizeof(*mlcd) + rec->ur_cookielen +
585 mlcd->mlcd_size = sizeof(*mlcd) + rec->ur_cookielen +
587 mlcd->mlcd_eadatalen = rec->ur_eadatalen;
588 mlcd->mlcd_cookielen = rec->ur_cookielen;
589 mlcd->mlcd_lmm = (void *)&mlcd->mlcd_cookies +
590 mlcd->mlcd_cookielen;
591 memcpy(&mlcd->mlcd_cookies, rec->ur_logcookies,
592 mlcd->mlcd_cookielen);
593 memcpy(mlcd->mlcd_lmm, rec->ur_eadata,
594 mlcd->mlcd_eadatalen);
596 CERROR("unable to allocate log cancel data\n");
602 fsfilt_add_journal_cb(req->rq_export->exp_obd, 0, handle,
603 mds_cancel_cookies_cb, mlcd);
604 err = mds_finish_transno(mds, inode, handle, req, rc, 0);
605 /* do mds to ost setattr if needed */
606 if (!rc && !err && lmm_size)
607 mds_osc_setattr_async(obd, inode, lmm, lmm_size, logcookies);
609 switch (cleanup_phase) {
611 OBD_FREE(lmm, mds->mds_max_mdsize);
613 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
614 rec->ur_eadata != NULL)
619 ldlm_lock_decref(&lockh, LCK_PW);
621 ptlrpc_save_lock (req, &lockh, LCK_PW);
634 /* trigger dqrel/dqacq for original owner and new owner */
635 if (rec->ur_iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
636 mds_adjust_qunit(obd, rec->ur_iattr.ia_uid,
637 rec->ur_iattr.ia_gid, 0, 0, rc);
638 mds_adjust_qunit(obd, child_uid, child_gid, 0, 0, rc);
643 static void reconstruct_reint_create(struct mds_update_record *rec, int offset,
644 struct ptlrpc_request *req)
646 struct mds_export_data *med = &req->rq_export->exp_mds_data;
647 struct mds_obd *obd = &req->rq_export->exp_obd->u.mds;
648 struct dentry *parent, *child;
649 struct mds_body *body;
651 mds_req_from_mcd(req, med->med_mcd);
656 parent = mds_fid2dentry(obd, rec->ur_fid1, NULL);
657 LASSERT(!IS_ERR(parent));
658 child = ll_lookup_one_len(rec->ur_name, parent, rec->ur_namelen - 1);
659 LASSERT(!IS_ERR(child));
660 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
661 mds_pack_inode2fid(&body->fid1, child->d_inode);
662 mds_pack_inode2body(body, child->d_inode);
667 static int mds_reint_create(struct mds_update_record *rec, int offset,
668 struct ptlrpc_request *req,
669 struct lustre_handle *lh)
671 struct dentry *dparent = NULL;
672 struct mds_obd *mds = mds_req2mds(req);
673 struct obd_device *obd = req->rq_export->exp_obd;
674 struct dentry *dchild = NULL;
675 struct inode *dir = NULL;
677 struct lustre_handle lockh;
678 int rc = 0, err, type = rec->ur_mode & S_IFMT, cleanup_phase = 0;
680 uid_t parent_uid = 0;
681 gid_t parent_gid = 0;
682 struct dentry_params dp;
685 LASSERT(offset == 0);
686 LASSERT(!strcmp(req->rq_export->exp_obd->obd_type->typ_name, "mds"));
688 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u name %s mode %o",
689 rec->ur_fid1->id, rec->ur_fid1->generation,
690 rec->ur_name, rec->ur_mode);
692 MDS_CHECK_RESENT(req, reconstruct_reint_create(rec, offset, req));
694 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_CREATE))
695 GOTO(cleanup, rc = -ESTALE);
697 dparent = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_PW, &lockh,
698 rec->ur_name, rec->ur_namelen - 1);
699 if (IS_ERR(dparent)) {
700 rc = PTR_ERR(dparent);
702 CERROR("parent lookup error %d\n", rc);
705 cleanup_phase = 1; /* locked parent dentry */
706 dir = dparent->d_inode;
709 ldlm_lock_dump_handle(D_OTHER, &lockh);
711 dchild = ll_lookup_one_len(rec->ur_name, dparent, rec->ur_namelen - 1);
712 if (IS_ERR(dchild)) {
713 rc = PTR_ERR(dchild);
714 CERROR("child lookup error %d\n", rc);
718 cleanup_phase = 2; /* child dentry */
720 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_CREATE_WRITE, dir->i_sb);
722 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY) {
724 GOTO(cleanup, rc = -EEXIST);
725 GOTO(cleanup, rc = -EROFS);
728 if (dir->i_mode & S_ISGID) {
729 if (S_ISDIR(rec->ur_mode))
730 rec->ur_mode |= S_ISGID;
733 dchild->d_fsdata = (void *)&dp;
734 dp.p_inum = (unsigned long)rec->ur_fid2->id;
739 handle = fsfilt_start(obd, dir, FSFILT_OP_CREATE, NULL);
741 GOTO(cleanup, rc = PTR_ERR(handle));
742 rc = ll_vfs_create(dir, dchild, rec->ur_mode, NULL);
747 handle = fsfilt_start(obd, dir, FSFILT_OP_MKDIR, NULL);
749 GOTO(cleanup, rc = PTR_ERR(handle));
750 rc = vfs_mkdir(dir, dchild, rec->ur_mode);
755 handle = fsfilt_start(obd, dir, FSFILT_OP_SYMLINK, NULL);
757 GOTO(cleanup, rc = PTR_ERR(handle));
758 if (rec->ur_tgt == NULL) /* no target supplied */
759 rc = -EINVAL; /* -EPROTO? */
761 rc = ll_vfs_symlink(dir, dchild, rec->ur_tgt, S_IALLUGO);
769 int rdev = rec->ur_rdev;
770 handle = fsfilt_start(obd, dir, FSFILT_OP_MKNOD, NULL);
772 GOTO(cleanup, (handle = NULL, rc = PTR_ERR(handle)));
773 rc = vfs_mknod(dir, dchild, rec->ur_mode, rdev);
778 CERROR("bad file type %o creating %s\n", type, rec->ur_name);
779 dchild->d_fsdata = NULL;
780 GOTO(cleanup, rc = -EINVAL);
783 /* In case we stored the desired inum in here, we want to clean up. */
784 if (dchild->d_fsdata == (void *)(unsigned long)rec->ur_fid2->id)
785 dchild->d_fsdata = NULL;
788 CDEBUG(D_INODE, "error during create: %d\n", rc);
792 struct inode *inode = dchild->d_inode;
793 struct mds_body *body;
796 LTIME_S(iattr.ia_atime) = rec->ur_time;
797 LTIME_S(iattr.ia_ctime) = rec->ur_time;
798 LTIME_S(iattr.ia_mtime) = rec->ur_time;
799 iattr.ia_uid = rec->ur_fsuid;
800 if (dir->i_mode & S_ISGID)
801 iattr.ia_gid = dir->i_gid;
803 iattr.ia_gid = rec->ur_fsgid;
804 iattr.ia_valid = ATTR_UID | ATTR_GID | ATTR_ATIME |
805 ATTR_MTIME | ATTR_CTIME;
807 if (rec->ur_fid2->id) {
808 LASSERT(rec->ur_fid2->id == inode->i_ino);
809 inode->i_generation = rec->ur_fid2->generation;
810 /* Dirtied and committed by the upcoming setattr. */
811 CDEBUG(D_INODE, "recreated ino %lu with gen %u\n",
812 inode->i_ino, inode->i_generation);
814 struct lustre_handle child_ino_lockh;
816 CDEBUG(D_INODE, "created ino %lu with gen %x\n",
817 inode->i_ino, inode->i_generation);
819 /* The inode we were allocated may have just been freed
820 * by an unlink operation. We take this lock to
821 * synchronize against the matching reply-ack-lock taken
822 * in unlink, to avoid replay problems if this reply
823 * makes it out to the client but the unlink's does not.
824 * See bug 2029 for more detail.*/
825 rc = mds_lock_new_child(obd, inode, &child_ino_lockh);
826 if (rc != ELDLM_OK) {
827 CERROR("error locking for unlink/create sync: "
830 ldlm_lock_decref(&child_ino_lockh, LCK_EX);
834 rc = fsfilt_setattr(obd, dchild, handle, &iattr, 0);
836 CERROR("error on child setattr: rc = %d\n", rc);
838 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
839 rc = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
841 CERROR("error on parent setattr: rc = %d\n", rc);
843 if (S_ISDIR(inode->i_mode)) {
844 struct lov_mds_md lmm;
845 int lmm_size = sizeof(lmm);
846 rc = mds_get_md(obd, dir, &lmm, &lmm_size, 1);
849 rc = fsfilt_set_md(obd, inode, handle,
854 CERROR("error on copy stripe info: rc = %d\n",
858 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
859 mds_pack_inode2fid(&body->fid1, inode);
860 mds_pack_inode2body(body, inode);
865 err = mds_finish_transno(mds, dir, handle, req, rc, 0);
868 /* Destroy the file we just created. This should not need
869 * extra journal credits, as we have already modified all of
870 * the blocks needed in order to create the file in the first
875 err = vfs_rmdir(dir, dchild);
877 CERROR("rmdir in error path: %d\n", err);
880 err = vfs_unlink(dir, dchild);
882 CERROR("unlink in error path: %d\n", err);
885 } else if (created) {
886 /* save uid/gid of create inode and parent */
887 parent_uid = dir->i_uid;
888 parent_gid = dir->i_gid;
893 switch (cleanup_phase) {
894 case 2: /* child dentry */
896 case 1: /* locked parent dentry */
898 ldlm_lock_decref(&lockh, LCK_PW);
900 ptlrpc_save_lock (req, &lockh, LCK_PW);
906 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
911 /* trigger dqacq on the owner of child and parent */
912 mds_adjust_qunit(obd, current->fsuid, current->fsgid,
913 parent_uid, parent_gid, rc);
917 int res_gt(struct ldlm_res_id *res1, struct ldlm_res_id *res2)
921 for (i = 0; i < RES_NAME_SIZE; i++) {
922 /* return 1 here, because enqueue_ordered will skip resources
923 * of all zeroes if they're sorted to the end of the list. */
924 if (res1->name[i] == 0 && res2->name[i] != 0)
926 if (res2->name[i] == 0 && res1->name[i] != 0)
929 if (res1->name[i] > res2->name[i])
931 if (res1->name[i] < res2->name[i])
937 /* This function doesn't use ldlm_match_or_enqueue because we're always called
938 * with EX or PW locks, and the MDS is no longer allowed to match write locks,
939 * because they take the place of local semaphores.
941 * One or two locks are taken in numerical order. A res_id->name[0] of 0 means
942 * no lock is taken for that res_id. Must be at least one non-zero res_id. */
943 int enqueue_ordered_locks(struct obd_device *obd, struct ldlm_res_id *p1_res_id,
944 struct lustre_handle *p1_lockh, int p1_lock_mode,
945 struct ldlm_res_id *p2_res_id,
946 struct lustre_handle *p2_lockh, int p2_lock_mode)
948 struct ldlm_res_id *res_id[2] = { p1_res_id, p2_res_id };
949 struct lustre_handle *handles[2] = { p1_lockh, p2_lockh };
950 int lock_modes[2] = { p1_lock_mode, p2_lock_mode };
954 LASSERT(p1_res_id != NULL && p2_res_id != NULL);
956 CDEBUG(D_INFO, "locks before: "LPU64"/"LPU64"\n",
957 res_id[0]->name[0], res_id[1]->name[0]);
959 if (res_gt(p1_res_id, p2_res_id)) {
960 handles[1] = p1_lockh;
961 handles[0] = p2_lockh;
962 res_id[1] = p1_res_id;
963 res_id[0] = p2_res_id;
964 lock_modes[1] = p1_lock_mode;
965 lock_modes[0] = p2_lock_mode;
968 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"\n",
969 res_id[0]->name[0], res_id[1]->name[0]);
971 flags = LDLM_FL_LOCAL_ONLY;
972 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, *res_id[0],
973 LDLM_PLAIN, NULL, lock_modes[0], &flags,
974 mds_blocking_ast, ldlm_completion_ast, NULL, NULL,
975 NULL, 0, NULL, handles[0]);
978 ldlm_lock_dump_handle(D_OTHER, handles[0]);
980 if (memcmp(res_id[0], res_id[1], sizeof(*res_id[0])) == 0) {
981 memcpy(handles[1], handles[0], sizeof(*(handles[1])));
982 ldlm_lock_addref(handles[1], lock_modes[1]);
983 } else if (res_id[1]->name[0] != 0) {
984 flags = LDLM_FL_LOCAL_ONLY;
985 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
986 *res_id[1], LDLM_PLAIN, NULL,
987 lock_modes[1], &flags, mds_blocking_ast,
988 ldlm_completion_ast, NULL, NULL, NULL, 0,
990 if (rc != ELDLM_OK) {
991 ldlm_lock_decref(handles[0], lock_modes[0]);
994 ldlm_lock_dump_handle(D_OTHER, handles[1]);
1000 int enqueue_4ordered_locks(struct obd_device *obd,struct ldlm_res_id *p1_res_id,
1001 struct lustre_handle *p1_lockh, int p1_lock_mode,
1002 struct ldlm_res_id *p2_res_id,
1003 struct lustre_handle *p2_lockh, int p2_lock_mode,
1004 struct ldlm_res_id *c1_res_id,
1005 struct lustre_handle *c1_lockh, int c1_lock_mode,
1006 struct ldlm_res_id *c2_res_id,
1007 struct lustre_handle *c2_lockh, int c2_lock_mode)
1009 struct ldlm_res_id *res_id[5] = { p1_res_id, p2_res_id,
1010 c1_res_id, c2_res_id };
1011 struct lustre_handle *dlm_handles[5] = { p1_lockh, p2_lockh,
1012 c1_lockh, c2_lockh };
1013 int lock_modes[5] = { p1_lock_mode, p2_lock_mode,
1014 c1_lock_mode, c2_lock_mode };
1015 int rc, i, j, sorted, flags;
1018 CDEBUG(D_DLMTRACE, "locks before: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
1019 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
1020 res_id[3]->name[0]);
1022 /* simple insertion sort - we have at most 4 elements */
1023 for (i = 1; i < 4; i++) {
1025 dlm_handles[4] = dlm_handles[i];
1026 res_id[4] = res_id[i];
1027 lock_modes[4] = lock_modes[i];
1031 if (res_gt(res_id[j], res_id[4])) {
1032 dlm_handles[j + 1] = dlm_handles[j];
1033 res_id[j + 1] = res_id[j];
1034 lock_modes[j + 1] = lock_modes[j];
1039 } while (j >= 0 && !sorted);
1041 dlm_handles[j + 1] = dlm_handles[4];
1042 res_id[j + 1] = res_id[4];
1043 lock_modes[j + 1] = lock_modes[4];
1046 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
1047 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
1048 res_id[3]->name[0]);
1050 /* XXX we could send ASTs on all these locks first before blocking? */
1051 for (i = 0; i < 4; i++) {
1053 if (res_id[i]->name[0] == 0)
1056 memcmp(res_id[i], res_id[i-1], sizeof(*res_id[i])) == 0) {
1057 memcpy(dlm_handles[i], dlm_handles[i-1],
1058 sizeof(*(dlm_handles[i])));
1059 ldlm_lock_addref(dlm_handles[i], lock_modes[i]);
1061 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
1062 *res_id[i], LDLM_PLAIN, NULL,
1063 lock_modes[i], &flags,
1065 ldlm_completion_ast, NULL, NULL,
1066 NULL, 0, NULL, dlm_handles[i]);
1068 GOTO(out_err, rc = -EIO);
1069 ldlm_lock_dump_handle(D_OTHER, dlm_handles[i]);
1076 ldlm_lock_decref(dlm_handles[i], lock_modes[i]);
1081 /* In the unlikely case that the child changed while we were waiting
1082 * on the lock, we need to drop the lock on the old child and either:
1083 * - if the child has a lower resource name, then we have to also
1084 * drop the parent lock and regain the locks in the right order
1085 * - in the rename case, if the child has a lower resource name than one of
1086 * the other parent/child resources (maxres) we also need to reget the locks
1087 * - if the child has a higher resource name (this is the common case)
1088 * we can just get the lock on the new child (still in lock order)
1090 * Returns 0 if the child did not change or if it changed but could be locked.
1091 * Returns 1 if the child changed and we need to re-lock (no locks held).
1092 * Returns -ve error with a valid dchild (no locks held). */
1093 static int mds_verify_child(struct obd_device *obd,
1094 struct ldlm_res_id *parent_res_id,
1095 struct lustre_handle *parent_lockh,
1096 struct dentry *dparent, int parent_mode,
1097 struct ldlm_res_id *child_res_id,
1098 struct lustre_handle *child_lockh,
1099 struct dentry **dchildp, int child_mode,
1100 const char *name, int namelen,
1101 struct ldlm_res_id *maxres)
1103 struct dentry *vchild, *dchild = *dchildp;
1104 int rc = 0, cleanup_phase = 2; /* parent, child locks */
1107 vchild = ll_lookup_one_len(name, dparent, namelen - 1);
1109 GOTO(cleanup, rc = PTR_ERR(vchild));
1111 if (likely((vchild->d_inode == NULL && child_res_id->name[0] == 0) ||
1112 (vchild->d_inode != NULL &&
1113 child_res_id->name[0] == vchild->d_inode->i_ino &&
1114 child_res_id->name[1] == vchild->d_inode->i_generation))) {
1122 CDEBUG(D_DLMTRACE, "child inode changed: %p != %p (%lu != "LPU64")\n",
1123 vchild->d_inode, dchild ? dchild->d_inode : 0,
1124 vchild->d_inode ? vchild->d_inode->i_ino : 0,
1125 child_res_id->name[0]);
1126 if (child_res_id->name[0] != 0)
1127 ldlm_lock_decref(child_lockh, child_mode);
1131 cleanup_phase = 1; /* parent lock only */
1132 *dchildp = dchild = vchild;
1134 if (dchild->d_inode) {
1136 child_res_id->name[0] = dchild->d_inode->i_ino;
1137 child_res_id->name[1] = dchild->d_inode->i_generation;
1139 if (res_gt(parent_res_id, child_res_id) ||
1140 res_gt(maxres, child_res_id)) {
1141 CDEBUG(D_DLMTRACE, "relock "LPU64"<("LPU64"|"LPU64")\n",
1142 child_res_id->name[0], parent_res_id->name[0],
1144 GOTO(cleanup, rc = 1);
1147 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
1148 *child_res_id, LDLM_PLAIN, NULL,
1149 child_mode, &flags, mds_blocking_ast,
1150 ldlm_completion_ast, NULL, NULL, NULL, 0,
1153 GOTO(cleanup, rc = -EIO);
1155 memset(child_res_id, 0, sizeof(*child_res_id));
1161 switch(cleanup_phase) {
1163 if (child_res_id->name[0] != 0)
1164 ldlm_lock_decref(child_lockh, child_mode);
1166 ldlm_lock_decref(parent_lockh, parent_mode);
1172 int mds_get_parent_child_locked(struct obd_device *obd, struct mds_obd *mds,
1174 struct lustre_handle *parent_lockh,
1175 struct dentry **dparentp, int parent_mode,
1176 char *name, int namelen,
1177 struct lustre_handle *child_lockh,
1178 struct dentry **dchildp, int child_mode)
1180 struct ldlm_res_id child_res_id = { .name = {0} };
1181 struct ldlm_res_id parent_res_id = { .name = {0} };
1182 struct inode *inode;
1183 int rc = 0, cleanup_phase = 0;
1186 /* Step 1: Lookup parent */
1187 *dparentp = mds_fid2dentry(mds, fid, NULL);
1188 if (IS_ERR(*dparentp)) {
1189 rc = PTR_ERR(*dparentp);
1194 CDEBUG(D_INODE, "parent ino %lu, name %s\n",
1195 (*dparentp)->d_inode->i_ino, name);
1197 parent_res_id.name[0] = (*dparentp)->d_inode->i_ino;
1198 parent_res_id.name[1] = (*dparentp)->d_inode->i_generation;
1200 cleanup_phase = 1; /* parent dentry */
1202 /* Step 2: Lookup child (without DLM lock, to get resource name) */
1203 *dchildp = ll_lookup_one_len(name, *dparentp, namelen - 1);
1204 if (IS_ERR(*dchildp)) {
1205 rc = PTR_ERR(*dchildp);
1206 CDEBUG(D_INODE, "child lookup error %d\n", rc);
1210 cleanup_phase = 2; /* child dentry */
1211 inode = (*dchildp)->d_inode;
1212 if (inode != NULL) {
1213 if (is_bad_inode(inode)) {
1214 CERROR("bad inode returned %lu/%u\n",
1215 inode->i_ino, inode->i_generation);
1216 GOTO(cleanup, rc = -ENOENT);
1218 inode = igrab(inode);
1223 child_res_id.name[0] = inode->i_ino;
1224 child_res_id.name[1] = inode->i_generation;
1228 cleanup_phase = 2; /* child dentry */
1230 /* Step 3: Lock parent and child in resource order. If child doesn't
1231 * exist, we still have to lock the parent and re-lookup. */
1232 rc = enqueue_ordered_locks(obd,&parent_res_id,parent_lockh,parent_mode,
1233 &child_res_id, child_lockh, child_mode);
1237 if (!(*dchildp)->d_inode)
1238 cleanup_phase = 3; /* parent lock */
1240 cleanup_phase = 4; /* child lock */
1242 /* Step 4: Re-lookup child to verify it hasn't changed since locking */
1243 rc = mds_verify_child(obd, &parent_res_id, parent_lockh, *dparentp,
1244 parent_mode, &child_res_id, child_lockh, dchildp,
1245 child_mode, name, namelen, &parent_res_id);
1255 switch (cleanup_phase) {
1257 ldlm_lock_decref(child_lockh, child_mode);
1259 ldlm_lock_decref(parent_lockh, parent_mode);
1270 void mds_reconstruct_generic(struct ptlrpc_request *req)
1272 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1274 mds_req_from_mcd(req, med->med_mcd);
1277 /* If we are unlinking an open file/dir (i.e. creating an orphan) then
1278 * we instead link the inode into the PENDING directory until it is
1279 * finally released. We can't simply call mds_reint_rename() or some
1280 * part thereof, because we don't have the inode to check for link
1281 * count/open status until after it is locked.
1283 * For lock ordering, caller must get child->i_sem first, then pending->i_sem
1284 * before starting journal transaction.
1286 * returns 1 on success
1287 * returns 0 if we lost a race and didn't make a new link
1288 * returns negative on error
1290 static int mds_orphan_add_link(struct mds_update_record *rec,
1291 struct obd_device *obd, struct dentry *dentry)
1293 struct mds_obd *mds = &obd->u.mds;
1294 struct inode *pending_dir = mds->mds_pending_dir->d_inode;
1295 struct inode *inode = dentry->d_inode;
1296 struct dentry *pending_child;
1297 char fidname[LL_FID_NAMELEN];
1298 int fidlen = 0, rc, mode;
1301 LASSERT(inode != NULL);
1302 LASSERT(!mds_inode_is_orphan(inode));
1303 #ifndef HAVE_I_ALLOC_SEM
1304 LASSERT(down_trylock(&inode->i_sem) != 0);
1306 LASSERT(down_trylock(&pending_dir->i_sem) != 0);
1308 fidlen = ll_fid2str(fidname, inode->i_ino, inode->i_generation);
1310 CDEBUG(D_INODE, "pending destroy of %dx open %d linked %s %s = %s\n",
1311 mds_orphan_open_count(inode), inode->i_nlink,
1312 S_ISDIR(inode->i_mode) ? "dir" :
1313 S_ISREG(inode->i_mode) ? "file" : "other",rec->ur_name,fidname);
1315 if (mds_orphan_open_count(inode) == 0 || inode->i_nlink != 0)
1318 pending_child = lookup_one_len(fidname, mds->mds_pending_dir, fidlen);
1319 if (IS_ERR(pending_child))
1320 RETURN(PTR_ERR(pending_child));
1322 if (pending_child->d_inode != NULL) {
1323 CERROR("re-destroying orphan file %s?\n", rec->ur_name);
1324 LASSERT(pending_child->d_inode == inode);
1325 GOTO(out_dput, rc = 0);
1328 /* link() is semanticaly-wrong for S_IFDIR, so we set S_IFREG
1329 * for linking and return real mode back then -bzzz */
1330 mode = inode->i_mode;
1331 inode->i_mode = S_IFREG;
1332 rc = vfs_link(dentry, pending_dir, pending_child);
1334 CERROR("error linking orphan %s to PENDING: rc = %d\n",
1337 mds_inode_set_orphan(inode);
1339 /* return mode and correct i_nlink if inode is directory */
1340 inode->i_mode = mode;
1341 LASSERTF(inode->i_nlink == 1, "%s nlink == %d\n",
1342 S_ISDIR(mode) ? "dir" : S_ISREG(mode) ? "file" : "other",
1344 if (S_ISDIR(mode)) {
1346 pending_dir->i_nlink++;
1347 mark_inode_dirty(inode);
1348 mark_inode_dirty(pending_dir);
1351 GOTO(out_dput, rc = 1);
1353 l_dput(pending_child);
1357 static int mds_reint_unlink(struct mds_update_record *rec, int offset,
1358 struct ptlrpc_request *req,
1359 struct lustre_handle *lh)
1361 struct dentry *dparent = NULL, *dchild;
1362 struct mds_obd *mds = mds_req2mds(req);
1363 struct obd_device *obd = req->rq_export->exp_obd;
1364 struct mds_body *body = NULL;
1365 struct inode *child_inode = NULL;
1366 struct lustre_handle parent_lockh, child_lockh, child_reuse_lockh;
1367 void *handle = NULL;
1368 int rc = 0, cleanup_phase = 0;
1369 uid_t child_uid = 0, parent_uid = 0;
1370 gid_t child_gid = 0, parent_gid = 0;
1373 LASSERT(offset == 0 || offset == 2);
1375 DEBUG_REQ(D_INODE, req, "parent ino "LPU64"/%u, child %s",
1376 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name);
1378 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1380 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNLINK))
1381 GOTO(cleanup, rc = -ENOENT);
1383 rc = mds_get_parent_child_locked(obd, mds, rec->ur_fid1,
1384 &parent_lockh, &dparent, LCK_PW,
1385 rec->ur_name, rec->ur_namelen,
1386 &child_lockh, &dchild, LCK_EX);
1390 cleanup_phase = 1; /* dchild, dparent, locks */
1393 child_inode = dchild->d_inode;
1394 if (child_inode == NULL) {
1395 CDEBUG(D_INODE, "child doesn't exist (dir %lu, name %s)\n",
1396 dparent->d_inode->i_ino, rec->ur_name);
1397 GOTO(cleanup, rc = -ENOENT);
1400 /* save uid/gid for quota acquire/release */
1401 child_uid = child_inode->i_uid;
1402 child_gid = child_inode->i_gid;
1403 parent_uid = dparent->d_inode->i_uid;
1404 parent_gid = dparent->d_inode->i_gid;
1406 cleanup_phase = 2; /* dchild has a lock */
1408 /* We have to do these checks ourselves, in case we are making an
1409 * orphan. The client tells us whether rmdir() or unlink() was called,
1410 * so we need to return appropriate errors (bug 72). */
1411 if ((rec->ur_mode & S_IFMT) == S_IFDIR) {
1412 if (!S_ISDIR(child_inode->i_mode))
1413 GOTO(cleanup, rc = -ENOTDIR);
1415 if (S_ISDIR(child_inode->i_mode))
1416 GOTO(cleanup, rc = -EISDIR);
1419 /* Check for EROFS after we check ENODENT, ENOTDIR, and EISDIR */
1420 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
1421 GOTO(cleanup, rc = -EROFS);
1423 /* Step 3: Get a lock on the ino to sync with creation WRT inode
1424 * reuse (see bug 2029). */
1425 rc = mds_lock_new_child(obd, child_inode, &child_reuse_lockh);
1429 cleanup_phase = 3; /* child inum lock */
1431 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_UNLINK_WRITE, dparent->d_inode->i_sb);
1433 /* ldlm_reply in buf[0] if called via intent */
1437 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
1438 LASSERT(body != NULL);
1440 /* child orphan sem protects orphan_dec_test && is_orphan race */
1441 MDS_DOWN_READ_ORPHAN_SEM(child_inode);
1442 cleanup_phase = 4; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
1444 /* If this is potentially the last reference to this inode, get the
1445 * OBD EA data first so the client can destroy OST objects. We
1446 * only do the object removal later if no open files/links remain. */
1447 if ((S_ISDIR(child_inode->i_mode) && child_inode->i_nlink == 2) ||
1448 child_inode->i_nlink == 1) {
1449 if (mds_orphan_open_count(child_inode) > 0) {
1450 /* need to lock pending_dir before transaction */
1451 down(&mds->mds_pending_dir->d_inode->i_sem);
1452 cleanup_phase = 5; /* up(&pending_dir->i_sem) */
1453 } else if (S_ISREG(child_inode->i_mode)) {
1454 mds_pack_inode2fid(&body->fid1, child_inode);
1455 mds_pack_inode2body(body, child_inode);
1456 mds_pack_md(obd, req->rq_repmsg, offset + 1, body,
1457 child_inode, MDS_PACK_MD_LOCK);
1461 /* Step 4: Do the unlink: we already verified ur_mode above (bug 72) */
1462 switch (child_inode->i_mode & S_IFMT) {
1464 /* Drop any lingering child directories before we start our
1465 * transaction, to avoid doing multiple inode dirty/delete
1466 * in our compound transaction (bug 1321). */
1467 shrink_dcache_parent(dchild);
1468 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_RMDIR,
1471 GOTO(cleanup, rc = PTR_ERR(handle));
1472 rc = vfs_rmdir(dparent->d_inode, dchild);
1475 struct lov_mds_md *lmm = lustre_msg_buf(req->rq_repmsg,
1477 handle = fsfilt_start_log(obd, dparent->d_inode,
1478 FSFILT_OP_UNLINK, NULL,
1479 le32_to_cpu(lmm->lmm_stripe_count));
1481 GOTO(cleanup, rc = PTR_ERR(handle));
1482 rc = vfs_unlink(dparent->d_inode, dchild);
1490 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_UNLINK,
1493 GOTO(cleanup, rc = PTR_ERR(handle));
1494 rc = vfs_unlink(dparent->d_inode, dchild);
1497 CERROR("bad file type %o unlinking %s\n", rec->ur_mode,
1500 GOTO(cleanup, rc = -EINVAL);
1503 if (rc == 0 && child_inode->i_nlink == 0) {
1504 if (mds_orphan_open_count(child_inode) > 0)
1505 rc = mds_orphan_add_link(rec, obd, dchild);
1508 GOTO(cleanup, rc = 0);
1510 if (!S_ISREG(child_inode->i_mode))
1513 if (!(body->valid & OBD_MD_FLEASIZE)) {
1514 body->valid |=(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1515 OBD_MD_FLATIME | OBD_MD_FLMTIME);
1516 } else if (mds_log_op_unlink(obd, child_inode,
1517 lustre_msg_buf(req->rq_repmsg, offset + 1, 0),
1518 req->rq_repmsg->buflens[offset + 1],
1519 lustre_msg_buf(req->rq_repmsg, offset + 2, 0),
1520 req->rq_repmsg->buflens[offset+2]) > 0){
1521 body->valid |= OBD_MD_FLCOOKIE;
1531 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
1532 LTIME_S(iattr.ia_mtime) = rec->ur_time;
1533 LTIME_S(iattr.ia_ctime) = rec->ur_time;
1535 err = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
1537 CERROR("error on parent setattr: rc = %d\n", err);
1540 rc = mds_finish_transno(mds, dparent ? dparent->d_inode : NULL,
1541 handle, req, rc, 0);
1543 (void)obd_set_info(mds->mds_osc_exp, strlen("unlinked"),
1544 "unlinked", 0, NULL);
1545 switch(cleanup_phase) {
1546 case 5: /* pending_dir semaphore */
1547 up(&mds->mds_pending_dir->d_inode->i_sem);
1548 case 4: /* child inode semaphore */
1549 MDS_UP_READ_ORPHAN_SEM(child_inode);
1550 case 3: /* child ino-reuse lock */
1551 if (rc && body != NULL) {
1552 // Don't unlink the OST objects if the MDS unlink failed
1556 ldlm_lock_decref(&child_reuse_lockh, LCK_EX);
1558 ptlrpc_save_lock(req, &child_reuse_lockh, LCK_EX);
1559 case 2: /* child lock */
1560 ldlm_lock_decref(&child_lockh, LCK_EX);
1561 case 1: /* child and parent dentry, parent lock */
1563 ldlm_lock_decref(&parent_lockh, LCK_PW);
1565 ptlrpc_save_lock(req, &parent_lockh, LCK_PW);
1572 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1575 req->rq_status = rc;
1577 /* trigger dqrel on the owner of child and parent */
1578 mds_adjust_qunit(obd, child_uid, child_gid, parent_uid, parent_gid, rc);
1582 static int mds_reint_link(struct mds_update_record *rec, int offset,
1583 struct ptlrpc_request *req,
1584 struct lustre_handle *lh)
1586 struct obd_device *obd = req->rq_export->exp_obd;
1587 struct dentry *de_src = NULL;
1588 struct dentry *de_tgt_dir = NULL;
1589 struct dentry *dchild = NULL;
1590 struct mds_obd *mds = mds_req2mds(req);
1591 struct lustre_handle *handle = NULL, tgt_dir_lockh, src_lockh;
1592 struct ldlm_res_id src_res_id = { .name = {0} };
1593 struct ldlm_res_id tgt_dir_res_id = { .name = {0} };
1594 int rc = 0, cleanup_phase = 0;
1597 LASSERT(offset == 0);
1599 DEBUG_REQ(D_INODE, req, "original "LPU64"/%u to "LPU64"/%u %s",
1600 rec->ur_fid1->id, rec->ur_fid1->generation,
1601 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_name);
1603 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1605 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_LINK))
1606 GOTO(cleanup, rc = -ENOENT);
1608 /* Step 1: Lookup the source inode and target directory by FID */
1609 de_src = mds_fid2dentry(mds, rec->ur_fid1, NULL);
1611 GOTO(cleanup, rc = PTR_ERR(de_src));
1613 cleanup_phase = 1; /* source dentry */
1615 de_tgt_dir = mds_fid2dentry(mds, rec->ur_fid2, NULL);
1616 if (IS_ERR(de_tgt_dir)) {
1617 rc = PTR_ERR(de_tgt_dir);
1622 cleanup_phase = 2; /* target directory dentry */
1624 CDEBUG(D_INODE, "linking %.*s/%s to inode %lu\n",
1625 de_tgt_dir->d_name.len, de_tgt_dir->d_name.name, rec->ur_name,
1626 de_src->d_inode->i_ino);
1628 /* Step 2: Take the two locks */
1629 src_res_id.name[0] = de_src->d_inode->i_ino;
1630 src_res_id.name[1] = de_src->d_inode->i_generation;
1631 tgt_dir_res_id.name[0] = de_tgt_dir->d_inode->i_ino;
1632 tgt_dir_res_id.name[1] = de_tgt_dir->d_inode->i_generation;
1634 rc = enqueue_ordered_locks(obd, &src_res_id, &src_lockh, LCK_EX,
1635 &tgt_dir_res_id, &tgt_dir_lockh, LCK_EX);
1639 cleanup_phase = 3; /* locks */
1641 /* Step 3: Lookup the child */
1642 dchild = ll_lookup_one_len(rec->ur_name, de_tgt_dir, rec->ur_namelen-1);
1643 if (IS_ERR(dchild)) {
1644 rc = PTR_ERR(dchild);
1645 if (rc != -EPERM && rc != -EACCES)
1646 CERROR("child lookup error %d\n", rc);
1650 cleanup_phase = 4; /* child dentry */
1652 if (dchild->d_inode) {
1653 CDEBUG(D_INODE, "child exists (dir %lu, name %s)\n",
1654 de_tgt_dir->d_inode->i_ino, rec->ur_name);
1659 /* Step 4: Do it. */
1660 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_LINK_WRITE, de_src->d_inode->i_sb);
1662 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
1663 GOTO(cleanup, rc = -EROFS);
1665 handle = fsfilt_start(obd, de_tgt_dir->d_inode, FSFILT_OP_LINK, NULL);
1666 if (IS_ERR(handle)) {
1667 rc = PTR_ERR(handle);
1671 rc = vfs_link(de_src, de_tgt_dir->d_inode, dchild);
1672 if (rc && rc != -EPERM && rc != -EACCES)
1673 CERROR("vfs_link error %d\n", rc);
1675 rc = mds_finish_transno(mds, de_tgt_dir ? de_tgt_dir->d_inode : NULL,
1676 handle, req, rc, 0);
1679 switch (cleanup_phase) {
1680 case 4: /* child dentry */
1684 ldlm_lock_decref(&src_lockh, LCK_EX);
1685 ldlm_lock_decref(&tgt_dir_lockh, LCK_EX);
1687 ptlrpc_save_lock(req, &src_lockh, LCK_EX);
1688 ptlrpc_save_lock(req, &tgt_dir_lockh, LCK_EX);
1690 case 2: /* target dentry */
1692 case 1: /* source dentry */
1697 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1700 req->rq_status = rc;
1704 /* The idea here is that we need to get four locks in the end:
1705 * one on each parent directory, one on each child. We need to take
1706 * these locks in some kind of order (to avoid deadlocks), and the order
1707 * I selected is "increasing resource number" order. We need to look up
1708 * the children, however, before we know what the resource number(s) are.
1709 * Thus the following plan:
1711 * 1,2. Look up the parents
1712 * 3,4. Look up the children
1713 * 5. Take locks on the parents and children, in order
1714 * 6. Verify that the children haven't changed since they were looked up
1716 * If there was a race and the children changed since they were first looked
1717 * up, it is possible that mds_verify_child() will be able to just grab the
1718 * lock on the new child resource (if it has a higher resource than any other)
1719 * but we need to compare against not only its parent, but also against the
1720 * parent and child of the "other half" of the rename, hence maxres_{src,tgt}.
1722 * We need the fancy igrab() on the child inodes because we aren't holding a
1723 * lock on the parent after the lookup is done, so dentry->d_inode may change
1724 * at any time, and igrab() itself doesn't like getting passed a NULL argument.
1726 static int mds_get_parents_children_locked(struct obd_device *obd,
1727 struct mds_obd *mds,
1728 struct ll_fid *p1_fid,
1729 struct dentry **de_srcdirp,
1730 struct ll_fid *p2_fid,
1731 struct dentry **de_tgtdirp,
1733 const char *old_name, int old_len,
1734 struct dentry **de_oldp,
1735 const char *new_name, int new_len,
1736 struct dentry **de_newp,
1737 struct lustre_handle *dlm_handles,
1740 struct ldlm_res_id p1_res_id = { .name = {0} };
1741 struct ldlm_res_id p2_res_id = { .name = {0} };
1742 struct ldlm_res_id c1_res_id = { .name = {0} };
1743 struct ldlm_res_id c2_res_id = { .name = {0} };
1744 struct ldlm_res_id *maxres_src, *maxres_tgt;
1745 struct inode *inode;
1746 int rc = 0, cleanup_phase = 0;
1749 /* Step 1: Lookup the source directory */
1750 *de_srcdirp = mds_fid2dentry(mds, p1_fid, NULL);
1751 if (IS_ERR(*de_srcdirp))
1752 GOTO(cleanup, rc = PTR_ERR(*de_srcdirp));
1754 cleanup_phase = 1; /* source directory dentry */
1756 p1_res_id.name[0] = (*de_srcdirp)->d_inode->i_ino;
1757 p1_res_id.name[1] = (*de_srcdirp)->d_inode->i_generation;
1759 /* Step 2: Lookup the target directory */
1760 if (memcmp(p1_fid, p2_fid, sizeof(*p1_fid)) == 0) {
1761 *de_tgtdirp = dget(*de_srcdirp);
1763 *de_tgtdirp = mds_fid2dentry(mds, p2_fid, NULL);
1764 if (IS_ERR(*de_tgtdirp)) {
1765 rc = PTR_ERR(*de_tgtdirp);
1771 cleanup_phase = 2; /* target directory dentry */
1773 p2_res_id.name[0] = (*de_tgtdirp)->d_inode->i_ino;
1774 p2_res_id.name[1] = (*de_tgtdirp)->d_inode->i_generation;
1776 /* Step 3: Lookup the source child entry */
1777 *de_oldp = ll_lookup_one_len(old_name, *de_srcdirp, old_len - 1);
1778 if (IS_ERR(*de_oldp)) {
1779 rc = PTR_ERR(*de_oldp);
1780 CERROR("old child lookup error (%.*s): %d\n",
1781 old_len - 1, old_name, rc);
1785 cleanup_phase = 3; /* original name dentry */
1787 inode = (*de_oldp)->d_inode;
1789 inode = igrab(inode);
1791 GOTO(cleanup, rc = -ENOENT);
1793 c1_res_id.name[0] = inode->i_ino;
1794 c1_res_id.name[1] = inode->i_generation;
1798 /* Step 4: Lookup the target child entry */
1799 *de_newp = ll_lookup_one_len(new_name, *de_tgtdirp, new_len - 1);
1800 if (IS_ERR(*de_newp)) {
1801 rc = PTR_ERR(*de_newp);
1802 CERROR("new child lookup error (%.*s): %d\n",
1803 old_len - 1, old_name, rc);
1807 cleanup_phase = 4; /* target dentry */
1809 inode = (*de_newp)->d_inode;
1811 inode = igrab(inode);
1815 c2_res_id.name[0] = inode->i_ino;
1816 c2_res_id.name[1] = inode->i_generation;
1820 /* Step 5: Take locks on the parents and child(ren) */
1821 maxres_src = &p1_res_id;
1822 maxres_tgt = &p2_res_id;
1823 cleanup_phase = 4; /* target dentry */
1825 if (c1_res_id.name[0] != 0 && res_gt(&c1_res_id, &p1_res_id))
1826 maxres_src = &c1_res_id;
1827 if (c2_res_id.name[0] != 0 && res_gt(&c2_res_id, &p2_res_id))
1828 maxres_tgt = &c2_res_id;
1830 rc = enqueue_4ordered_locks(obd, &p1_res_id,&dlm_handles[0],parent_mode,
1831 &p2_res_id, &dlm_handles[1], parent_mode,
1832 &c1_res_id, &dlm_handles[2], child_mode,
1833 &c2_res_id, &dlm_handles[3], child_mode);
1837 cleanup_phase = 6; /* parent and child(ren) locks */
1839 /* Step 6a: Re-lookup source child to verify it hasn't changed */
1840 rc = mds_verify_child(obd, &p1_res_id, &dlm_handles[0], *de_srcdirp,
1841 parent_mode, &c1_res_id, &dlm_handles[2], de_oldp,
1842 child_mode, old_name, old_len, maxres_tgt);
1844 if (c2_res_id.name[0] != 0)
1845 ldlm_lock_decref(&dlm_handles[3], child_mode);
1846 ldlm_lock_decref(&dlm_handles[1], parent_mode);
1853 if ((*de_oldp)->d_inode == NULL)
1854 GOTO(cleanup, rc = -ENOENT);
1856 /* Step 6b: Re-lookup target child to verify it hasn't changed */
1857 rc = mds_verify_child(obd, &p2_res_id, &dlm_handles[1], *de_tgtdirp,
1858 parent_mode, &c2_res_id, &dlm_handles[3], de_newp,
1859 child_mode, new_name, new_len, maxres_src);
1861 ldlm_lock_decref(&dlm_handles[2], child_mode);
1862 ldlm_lock_decref(&dlm_handles[0], parent_mode);
1872 switch (cleanup_phase) {
1873 case 6: /* child lock(s) */
1874 if (c2_res_id.name[0] != 0)
1875 ldlm_lock_decref(&dlm_handles[3], child_mode);
1876 if (c1_res_id.name[0] != 0)
1877 ldlm_lock_decref(&dlm_handles[2], child_mode);
1878 case 5: /* parent locks */
1879 ldlm_lock_decref(&dlm_handles[1], parent_mode);
1880 ldlm_lock_decref(&dlm_handles[0], parent_mode);
1881 case 4: /* target dentry */
1883 case 3: /* source dentry */
1885 case 2: /* target directory dentry */
1886 l_dput(*de_tgtdirp);
1887 case 1: /* source directry dentry */
1888 l_dput(*de_srcdirp);
1895 static int mds_reint_rename(struct mds_update_record *rec, int offset,
1896 struct ptlrpc_request *req,
1897 struct lustre_handle *lockh)
1899 struct obd_device *obd = req->rq_export->exp_obd;
1900 struct dentry *de_srcdir = NULL;
1901 struct dentry *de_tgtdir = NULL;
1902 struct dentry *de_old = NULL;
1903 struct dentry *de_new = NULL;
1904 struct inode *old_inode = NULL, *new_inode = NULL;
1905 struct mds_obd *mds = mds_req2mds(req);
1906 struct lustre_handle dlm_handles[4];
1907 struct mds_body *body = NULL;
1908 struct lov_mds_md *lmm = NULL;
1909 int rc = 0, lock_count = 3, cleanup_phase = 0;
1910 void *handle = NULL;
1913 LASSERT(offset == 0);
1915 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u %s to "LPU64"/%u %s",
1916 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name,
1917 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_tgt);
1919 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1921 rc = mds_get_parents_children_locked(obd, mds, rec->ur_fid1, &de_srcdir,
1922 rec->ur_fid2, &de_tgtdir, LCK_PW,
1923 rec->ur_name, rec->ur_namelen,
1924 &de_old, rec->ur_tgt,
1925 rec->ur_tgtlen, &de_new,
1926 dlm_handles, LCK_EX);
1930 cleanup_phase = 1; /* parent(s), children, locks */
1932 old_inode = de_old->d_inode;
1933 new_inode = de_new->d_inode;
1935 if (new_inode != NULL)
1938 /* sanity check for src inode */
1939 if (old_inode->i_ino == de_srcdir->d_inode->i_ino ||
1940 old_inode->i_ino == de_tgtdir->d_inode->i_ino)
1941 GOTO(cleanup, rc = -EINVAL);
1943 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
1944 GOTO(cleanup, rc = -EROFS);
1946 if (new_inode == NULL)
1950 cleanup_phase = 2; /* iput(new_inode) when finished */
1952 /* sanity check for dest inode */
1953 if (new_inode->i_ino == de_srcdir->d_inode->i_ino ||
1954 new_inode->i_ino == de_tgtdir->d_inode->i_ino)
1955 GOTO(cleanup, rc = -EINVAL);
1957 if (old_inode == new_inode)
1958 GOTO(cleanup, rc = 0);
1960 /* if we are about to remove the target at first, pass the EA of
1961 * that inode to client to perform and cleanup on OST */
1962 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
1963 LASSERT(body != NULL);
1965 /* child orphan sem protects orphan_dec_test && is_orphan race */
1966 MDS_DOWN_READ_ORPHAN_SEM(new_inode);
1967 cleanup_phase = 3; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
1969 if ((S_ISDIR(new_inode->i_mode) && new_inode->i_nlink == 2) ||
1970 new_inode->i_nlink == 1) {
1971 if (mds_orphan_open_count(new_inode) > 0) {
1972 /* need to lock pending_dir before transaction */
1973 down(&mds->mds_pending_dir->d_inode->i_sem);
1974 cleanup_phase = 4; /* up(&pending_dir->i_sem) */
1975 } else if (S_ISREG(new_inode->i_mode)) {
1976 mds_pack_inode2fid(&body->fid1, new_inode);
1977 mds_pack_inode2body(body, new_inode);
1978 mds_pack_md(obd, req->rq_repmsg, 1, body, new_inode,
1984 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_RENAME_WRITE,
1985 de_srcdir->d_inode->i_sb);
1987 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1988 /* Check if we are moving old entry into its child. 2.6 does not
1989 check for this in vfs_rename() anymore */
1990 if (is_subdir(de_new, de_old))
1991 GOTO(cleanup, rc = -EINVAL);
1994 lmm = lustre_msg_buf(req->rq_repmsg, 1, 0);
1995 handle = fsfilt_start_log(obd, de_tgtdir->d_inode, FSFILT_OP_RENAME,
1996 NULL, le32_to_cpu(lmm->lmm_stripe_count));
1999 GOTO(cleanup, rc = PTR_ERR(handle));
2002 de_old->d_fsdata = req;
2003 de_new->d_fsdata = req;
2005 rc = vfs_rename(de_srcdir->d_inode, de_old, de_tgtdir->d_inode, de_new);
2008 if (rc == 0 && new_inode != NULL && new_inode->i_nlink == 0) {
2009 if (mds_orphan_open_count(new_inode) > 0)
2010 rc = mds_orphan_add_link(rec, obd, de_new);
2013 GOTO(cleanup, rc = 0);
2015 if (!S_ISREG(new_inode->i_mode))
2018 if (!(body->valid & OBD_MD_FLEASIZE)) {
2019 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
2020 OBD_MD_FLATIME | OBD_MD_FLMTIME);
2021 } else if (mds_log_op_unlink(obd, new_inode,
2022 lustre_msg_buf(req->rq_repmsg,1,0),
2023 req->rq_repmsg->buflens[1],
2024 lustre_msg_buf(req->rq_repmsg,2,0),
2025 req->rq_repmsg->buflens[2]) > 0) {
2026 body->valid |= OBD_MD_FLCOOKIE;
2032 rc = mds_finish_transno(mds, de_tgtdir ? de_tgtdir->d_inode : NULL,
2033 handle, req, rc, 0);
2035 switch (cleanup_phase) {
2037 up(&mds->mds_pending_dir->d_inode->i_sem);
2039 MDS_UP_READ_ORPHAN_SEM(new_inode);
2044 if (lock_count == 4)
2045 ldlm_lock_decref(&(dlm_handles[3]), LCK_EX);
2046 ldlm_lock_decref(&(dlm_handles[2]), LCK_EX);
2047 ldlm_lock_decref(&(dlm_handles[1]), LCK_PW);
2048 ldlm_lock_decref(&(dlm_handles[0]), LCK_PW);
2050 if (lock_count == 4)
2051 ptlrpc_save_lock(req,&(dlm_handles[3]), LCK_EX);
2052 ptlrpc_save_lock(req, &(dlm_handles[2]), LCK_EX);
2053 ptlrpc_save_lock(req, &(dlm_handles[1]), LCK_PW);
2054 ptlrpc_save_lock(req, &(dlm_handles[0]), LCK_PW);
2063 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
2066 req->rq_status = rc;
2070 typedef int (*mds_reinter)(struct mds_update_record *, int offset,
2071 struct ptlrpc_request *, struct lustre_handle *);
2073 static mds_reinter reinters[REINT_MAX] = {
2074 [REINT_SETATTR] mds_reint_setattr,
2075 [REINT_CREATE] mds_reint_create,
2076 [REINT_LINK] mds_reint_link,
2077 [REINT_UNLINK] mds_reint_unlink,
2078 [REINT_RENAME] mds_reint_rename,
2079 [REINT_OPEN] mds_open
2082 int mds_reint_rec(struct mds_update_record *rec, int offset,
2083 struct ptlrpc_request *req, struct lustre_handle *lockh)
2085 struct obd_device *obd = req->rq_export->exp_obd;
2086 struct obd_run_ctxt saved;
2090 /* checked by unpacker */
2091 LASSERT(rec->ur_opcode < REINT_MAX && reinters[rec->ur_opcode] != NULL);
2093 push_ctxt(&saved, &obd->obd_ctxt, &rec->ur_uc);
2094 rc = reinters[rec->ur_opcode] (rec, offset, req, lockh);
2095 pop_ctxt(&saved, &obd->obd_ctxt, &rec->ur_uc);