1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/mds/mds_reint.c
5 * Lustre Metadata Server (mds) reintegration routines
7 * Copyright (C) 2002-2005 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
12 * This file is part of the Lustre file system, http://www.lustre.org
13 * Lustre is a trademark of Cluster File Systems, Inc.
15 * You may have signed or agreed to another license before downloading
16 * this software. If so, you are bound by the terms and conditions
17 * of that agreement, and the following does not apply to you. See the
18 * LICENSE file included with this distribution for more information.
20 * If you did not agree to a different license, then this copy of Lustre
21 * is open source software; you can redistribute it and/or modify it
22 * under the terms of version 2 of the GNU General Public License as
23 * published by the Free Software Foundation.
25 * In either case, Lustre is distributed in the hope that it will be
26 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
27 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * license text for more details.
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_MDS
37 #include <obd_support.h>
38 #include <obd_class.h>
40 #include <lustre_lib.h>
41 #include <lustre/lustre_idl.h>
42 #include <lustre_mds.h>
43 #include <lustre_dlm.h>
44 #include <lustre_fsfilt.h>
45 #include <lustre_ucache.h>
47 #include "mds_internal.h"
49 void mds_commit_cb(struct obd_device *obd, __u64 transno, void *data,
52 obd_transno_commit_cb(obd, transno, error);
55 struct mds_logcancel_data {
56 struct lov_mds_md *mlcd_lmm;
60 struct llog_cookie mlcd_cookies[0];
64 static void mds_cancel_cookies_cb(struct obd_device *obd, __u64 transno,
65 void *cb_data, int error)
67 struct mds_logcancel_data *mlcd = cb_data;
68 struct lov_stripe_md *lsm = NULL;
69 struct llog_ctxt *ctxt;
72 obd_transno_commit_cb(obd, transno, error);
74 CDEBUG(D_HA, "cancelling %d cookies\n",
75 (int)(mlcd->mlcd_cookielen / sizeof(*mlcd->mlcd_cookies)));
77 rc = obd_unpackmd(obd->u.mds.mds_osc_exp, &lsm, mlcd->mlcd_lmm,
78 mlcd->mlcd_eadatalen);
80 CERROR("bad LSM cancelling %d log cookies: rc %d\n",
81 (int)(mlcd->mlcd_cookielen/sizeof(*mlcd->mlcd_cookies)),
84 ///* XXX 0 normally, SENDNOW for debug */);
85 rc = obd_checkmd(obd->u.mds.mds_osc_exp, obd->obd_self_export,
88 CERROR("Can not revalidate lsm %p \n", lsm);
90 ctxt = llog_get_context(obd,mlcd->mlcd_cookies[0].lgc_subsys+1);
91 rc = llog_cancel(ctxt, lsm, mlcd->mlcd_cookielen /
92 sizeof(*mlcd->mlcd_cookies),
93 mlcd->mlcd_cookies, OBD_LLOG_FL_SENDNOW);
95 CERROR("error cancelling %d log cookies: rc %d\n",
96 (int)(mlcd->mlcd_cookielen /
97 sizeof(*mlcd->mlcd_cookies)), rc);
100 OBD_FREE(mlcd, mlcd->mlcd_size);
103 /* Assumes caller has already pushed us into the kernel context. */
104 int mds_finish_transno(struct mds_obd *mds, struct inode *inode, void *handle,
105 struct ptlrpc_request *req, int rc, __u32 op_data)
107 struct mds_export_data *med = &req->rq_export->exp_mds_data;
108 struct mds_client_data *mcd = med->med_mcd;
109 struct obd_device *obd = req->rq_export->exp_obd;
116 if (IS_ERR(handle)) {
121 /* if the export has already been failed, we have no last_rcvd slot */
122 if (req->rq_export->exp_failed) {
123 CWARN("commit transaction for disconnected client %s: rc %d\n",
124 req->rq_export->exp_client_uuid.uuid, rc);
132 if (handle == NULL) {
133 /* if we're starting our own xaction, use our own inode */
134 inode = mds->mds_rcvd_filp->f_dentry->d_inode;
135 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
136 if (IS_ERR(handle)) {
137 CERROR("fsfilt_start: %ld\n", PTR_ERR(handle));
138 RETURN(PTR_ERR(handle));
142 off = med->med_lr_off;
144 transno = req->rq_reqmsg->transno;
147 CERROR("%s: replay %s transno "LPU64" failed: rc %d\n",
149 libcfs_nid2str(req->rq_export->exp_connection->c_peer.nid),
153 } else if (transno == 0) {
154 spin_lock(&mds->mds_transno_lock);
155 transno = ++mds->mds_last_transno;
156 spin_unlock(&mds->mds_transno_lock);
158 spin_lock(&mds->mds_transno_lock);
159 if (transno > mds->mds_last_transno)
160 mds->mds_last_transno = transno;
161 spin_unlock(&mds->mds_transno_lock);
163 req->rq_repmsg->transno = req->rq_transno = transno;
164 mcd->mcd_last_transno = cpu_to_le64(transno);
165 mcd->mcd_last_xid = cpu_to_le64(req->rq_xid);
166 mcd->mcd_last_result = cpu_to_le32(rc);
167 mcd->mcd_last_data = cpu_to_le32(op_data);
170 CERROR("client idx %d has offset %lld\n", med->med_lr_idx, off);
173 fsfilt_add_journal_cb(req->rq_export->exp_obd, transno, handle,
174 mds_commit_cb, NULL);
175 err = fsfilt_write_record(obd, mds->mds_rcvd_filp, mcd,
176 sizeof(*mcd), &off, 0);
185 DEBUG_REQ_EX(log_pri, req,
186 "wrote trans #"LPU64" rc %d client %s at idx %u: err = %d",
187 transno, rc, mcd->mcd_uuid, med->med_lr_idx, err);
189 err = mds_lov_write_objids(obd);
195 CDEBUG_EX(log_pri, "wrote objids: err = %d\n", err);
198 err = fsfilt_commit(obd, inode, handle, 0);
200 CERROR("error committing transaction: %d\n", err);
208 /* this gives the same functionality as the code between
209 * sys_chmod and inode_setattr
210 * chown_common and inode_setattr
211 * utimes and inode_setattr
213 int mds_fix_attr(struct inode *inode, struct mds_update_record *rec)
215 time_t now = CURRENT_SECONDS;
216 struct iattr *attr = &rec->ur_iattr;
217 unsigned int ia_valid = attr->ia_valid;
221 if (ia_valid & ATTR_RAW)
222 attr->ia_valid &= ~ATTR_RAW;
224 if (!(ia_valid & ATTR_CTIME_SET))
225 LTIME_S(attr->ia_ctime) = now;
227 attr->ia_valid &= ~ATTR_CTIME_SET;
228 if (!(ia_valid & ATTR_ATIME_SET))
229 LTIME_S(attr->ia_atime) = now;
230 if (!(ia_valid & ATTR_MTIME_SET))
231 LTIME_S(attr->ia_mtime) = now;
233 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
234 RETURN((attr->ia_valid & ~ATTR_ATTR_FLAG) ? -EPERM : 0);
237 if ((ia_valid & (ATTR_MTIME|ATTR_ATIME)) == (ATTR_MTIME|ATTR_ATIME)) {
238 if (current->fsuid != inode->i_uid &&
239 (error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
243 if (ia_valid & ATTR_SIZE &&
244 /* NFSD hack for open(O_CREAT|O_TRUNC)=mknod+truncate (bug 5781) */
245 !(rec->ur_uc.luc_fsuid == inode->i_uid &&
246 ia_valid & MDS_OPEN_OWNEROVERRIDE)) {
247 if ((error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
251 if (ia_valid & (ATTR_UID | ATTR_GID)) {
254 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
256 if (attr->ia_uid == (uid_t) -1)
257 attr->ia_uid = inode->i_uid;
258 if (attr->ia_gid == (gid_t) -1)
259 attr->ia_gid = inode->i_gid;
260 if (!(ia_valid & ATTR_MODE))
261 attr->ia_mode = inode->i_mode;
263 * If the user or group of a non-directory has been
264 * changed by a non-root user, remove the setuid bit.
265 * 19981026 David C Niemi <niemi@tux.org>
267 * Changed this to apply to all users, including root,
268 * to avoid some races. This is the behavior we had in
269 * 2.0. The check for non-root was definitely wrong
270 * for 2.2 anyway, as it should have been using
271 * CAP_FSETID rather than fsuid -- 19990830 SD.
273 if ((inode->i_mode & S_ISUID) == S_ISUID &&
274 !S_ISDIR(inode->i_mode)) {
275 attr->ia_mode &= ~S_ISUID;
276 attr->ia_valid |= ATTR_MODE;
279 * Likewise, if the user or group of a non-directory
280 * has been changed by a non-root user, remove the
281 * setgid bit UNLESS there is no group execute bit
282 * (this would be a file marked for mandatory
283 * locking). 19981026 David C Niemi <niemi@tux.org>
285 * Removed the fsuid check (see the comment above) --
288 if (((inode->i_mode & (S_ISGID | S_IXGRP)) ==
289 (S_ISGID | S_IXGRP)) && !S_ISDIR(inode->i_mode)) {
290 attr->ia_mode &= ~S_ISGID;
291 attr->ia_valid |= ATTR_MODE;
293 } else if (ia_valid & ATTR_MODE) {
294 int mode = attr->ia_mode;
296 if (attr->ia_mode == (umode_t)-1)
297 mode = inode->i_mode;
299 (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
304 void mds_steal_ack_locks(struct ptlrpc_request *req)
306 struct obd_export *exp = req->rq_export;
307 struct list_head *tmp;
308 struct ptlrpc_reply_state *oldrep;
309 struct ptlrpc_service *svc;
313 /* CAVEAT EMPTOR: spinlock order */
314 spin_lock_irqsave (&exp->exp_lock, flags);
315 list_for_each (tmp, &exp->exp_outstanding_replies) {
316 oldrep = list_entry(tmp, struct ptlrpc_reply_state,rs_exp_list);
318 if (oldrep->rs_xid != req->rq_xid)
321 if (oldrep->rs_msg.opc != req->rq_reqmsg->opc)
322 CERROR ("Resent req xid "LPX64" has mismatched opc: "
323 "new %d old %d\n", req->rq_xid,
324 req->rq_reqmsg->opc, oldrep->rs_msg.opc);
326 svc = oldrep->rs_service;
327 spin_lock (&svc->srv_lock);
329 list_del_init (&oldrep->rs_exp_list);
331 CWARN("Stealing %d locks from rs %p x"LPD64".t"LPD64
333 oldrep->rs_nlocks, oldrep,
334 oldrep->rs_xid, oldrep->rs_transno, oldrep->rs_msg.opc,
335 libcfs_nid2str(exp->exp_connection->c_peer.nid));
337 for (i = 0; i < oldrep->rs_nlocks; i++)
338 ptlrpc_save_lock(req,
339 &oldrep->rs_locks[i],
340 oldrep->rs_modes[i]);
341 oldrep->rs_nlocks = 0;
343 DEBUG_REQ(D_HA, req, "stole locks for");
344 ptlrpc_schedule_difficult_reply (oldrep);
346 spin_unlock (&svc->srv_lock);
349 spin_unlock_irqrestore (&exp->exp_lock, flags);
352 void mds_req_from_mcd(struct ptlrpc_request *req, struct mds_client_data *mcd)
354 DEBUG_REQ(D_HA, req, "restoring transno "LPD64"/status %d",
355 mcd->mcd_last_transno, mcd->mcd_last_result);
356 req->rq_repmsg->transno = req->rq_transno = mcd->mcd_last_transno;
357 req->rq_repmsg->status = req->rq_status = mcd->mcd_last_result;
359 mds_steal_ack_locks(req);
362 static void reconstruct_reint_setattr(struct mds_update_record *rec,
363 int offset, struct ptlrpc_request *req)
365 struct mds_export_data *med = &req->rq_export->exp_mds_data;
366 struct mds_obd *obd = &req->rq_export->exp_obd->u.mds;
368 struct mds_body *body;
370 mds_req_from_mcd(req, med->med_mcd);
372 de = mds_fid2dentry(obd, rec->ur_fid1, NULL);
374 LASSERT(PTR_ERR(de) == req->rq_status);
378 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
379 mds_pack_inode2fid(&body->fid1, de->d_inode);
380 mds_pack_inode2body(body, de->d_inode);
382 /* Don't return OST-specific attributes if we didn't just set them */
383 if (rec->ur_iattr.ia_valid & ATTR_SIZE)
384 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
385 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
386 body->valid |= OBD_MD_FLMTIME;
387 if (rec->ur_iattr.ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
388 body->valid |= OBD_MD_FLATIME;
393 int mds_osc_setattr_async(struct obd_device *obd, struct inode *inode,
394 struct lov_mds_md *lmm, int lmm_size,
395 struct llog_cookie *logcookies, struct ll_fid *fid)
397 struct mds_obd *mds = &obd->u.mds;
398 struct lov_stripe_md *lsm = NULL;
399 struct obd_trans_info oti = { 0 };
400 struct obdo *oa = NULL;
404 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OST_SETATTR))
407 /* first get memory EA */
414 rc = obd_unpackmd(mds->mds_osc_exp, &lsm, lmm, lmm_size);
416 CERROR("Error unpack md %p for inode %lu\n", lmm, inode->i_ino);
420 rc = obd_checkmd(mds->mds_osc_exp, obd->obd_self_export, lsm);
422 CERROR("Error revalidate lsm %p \n", lsm);
427 oa->o_id = lsm->lsm_object_id;
428 oa->o_uid = inode->i_uid;
429 oa->o_gid = inode->i_gid;
430 oa->o_valid = OBD_MD_FLID | OBD_MD_FLUID | OBD_MD_FLGID;
432 oa->o_valid |= OBD_MD_FLCOOKIE;
433 oti.oti_logcookies = logcookies;
436 LASSERT(fid != NULL);
438 oa->o_generation = fid->generation;
439 oa->o_valid |= OBD_MD_FLFID | OBD_MD_FLGENER;
441 /* do setattr from mds to ost asynchronously */
442 rc = obd_setattr_async(mds->mds_osc_exp, oa, lsm, &oti);
444 CDEBUG(D_INODE, "mds to ost setattr objid 0x"LPX64
445 " on ost error %d\n", lsm->lsm_object_id, rc);
448 obd_free_memmd(mds->mds_osc_exp, &lsm);
453 /* In the raw-setattr case, we lock the child inode.
454 * In the write-back case or if being called from open, the client holds a lock
457 * We use the ATTR_FROM_OPEN flag to tell these cases apart. */
458 static int mds_reint_setattr(struct mds_update_record *rec, int offset,
459 struct ptlrpc_request *req,
460 struct lustre_handle *lh)
462 unsigned int ia_valid = rec->ur_iattr.ia_valid;
463 struct mds_obd *mds = mds_req2mds(req);
464 struct obd_device *obd = req->rq_export->exp_obd;
465 struct mds_body *body;
467 struct inode *inode = NULL;
468 struct lustre_handle lockh;
470 struct mds_logcancel_data *mlcd = NULL;
471 struct lov_mds_md *lmm = NULL;
472 struct llog_cookie *logcookies = NULL;
473 int lmm_size = 0, need_lock = 1, cookie_size = 0;
474 int rc = 0, cleanup_phase = 0, err, locked = 0;
475 unsigned int qcids[MAXQUOTAS] = {0, 0};
476 unsigned int qpids[MAXQUOTAS] = {rec->ur_iattr.ia_uid,
477 rec->ur_iattr.ia_gid};
480 LASSERT(offset == MDS_REQ_REC_OFF);
482 DEBUG_REQ(D_INODE, req, "setattr "LPU64"/%u %x", rec->ur_fid1->id,
483 rec->ur_fid1->generation, rec->ur_iattr.ia_valid);
485 MDS_CHECK_RESENT(req, reconstruct_reint_setattr(rec, offset, req));
487 if (rec->ur_iattr.ia_valid & ATTR_FROM_OPEN ||
488 (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)) {
489 de = mds_fid2dentry(mds, rec->ur_fid1, NULL);
491 GOTO(cleanup, rc = PTR_ERR(de));
492 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
493 GOTO(cleanup, rc = -EROFS);
495 __u64 lockpart = MDS_INODELOCK_UPDATE;
496 if (rec->ur_iattr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
497 lockpart |= MDS_INODELOCK_LOOKUP;
499 de = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_EX,
502 GOTO(cleanup, rc = PTR_ERR(de));
510 /* save uid/gid for quota acq/rel */
511 qcids[USRQUOTA] = inode->i_uid;
512 qcids[GRPQUOTA] = inode->i_gid;
514 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
515 rec->ur_eadata != NULL) {
516 LOCK_INODE_MUTEX(inode);
520 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_SETATTR_WRITE, inode->i_sb);
522 /* start a log jounal handle if needed */
523 if (S_ISREG(inode->i_mode) &&
524 rec->ur_iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
525 lmm_size = mds->mds_max_mdsize;
526 OBD_ALLOC(lmm, lmm_size);
528 GOTO(cleanup, rc = -ENOMEM);
531 rc = mds_get_md(obd, inode, lmm, &lmm_size, need_lock);
536 handle = fsfilt_start_log(obd, inode, FSFILT_OP_SETATTR, NULL,
537 le32_to_cpu(lmm->lmm_stripe_count));
539 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
542 GOTO(cleanup, rc = PTR_ERR(handle));
544 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
545 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu\n",
546 LTIME_S(rec->ur_iattr.ia_mtime),
547 LTIME_S(rec->ur_iattr.ia_ctime));
548 rc = mds_fix_attr(inode, rec);
552 if (rec->ur_iattr.ia_valid & ATTR_ATTR_FLAG) { /* ioctl */
553 rc = fsfilt_iocontrol(obd, inode, NULL, EXT3_IOC_SETFLAGS,
554 (long)&rec->ur_iattr.ia_attr_flags);
555 } else if (rec->ur_iattr.ia_valid) { /* setattr */
556 rc = fsfilt_setattr(obd, de, handle, &rec->ur_iattr, 0);
557 /* journal chown/chgrp in llog, just like unlink */
558 if (rc == 0 && lmm_size){
559 cookie_size = mds_get_cookie_size(obd, lmm);
560 OBD_ALLOC(logcookies, cookie_size);
561 if (logcookies == NULL)
562 GOTO(cleanup, rc = -ENOMEM);
564 if (mds_log_op_setattr(obd, inode, lmm, lmm_size,
565 logcookies, cookie_size) <= 0) {
566 OBD_FREE(logcookies, cookie_size);
572 if (rc == 0 && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
573 rec->ur_eadata != NULL) {
574 struct lov_stripe_md *lsm = NULL;
575 struct lov_user_md *lum = NULL;
577 rc = ll_permission(inode, MAY_WRITE, NULL);
581 lum = rec->ur_eadata;
582 /* if { size, offset, count } = { 0, -1, 0 } (i.e. all default
583 * values specified) then delete default striping from dir. */
584 if (S_ISDIR(inode->i_mode) &&
585 ((lum->lmm_stripe_size == 0 &&
586 lum->lmm_stripe_offset == (typeof(lum->lmm_stripe_offset))(-1) &&
587 lum->lmm_stripe_count == 0) ||
588 /* lmm_stripe_size == -1 is deprecated in 1.4.6 */
589 lum->lmm_stripe_size == (typeof(lum->lmm_stripe_size))(-1))){
590 rc = fsfilt_set_md(obd, inode, handle, NULL, 0, "lov");
594 rc = obd_iocontrol(OBD_IOC_LOV_SETSTRIPE,
596 &lsm, rec->ur_eadata);
600 obd_free_memmd(mds->mds_osc_exp, &lsm);
602 rc = fsfilt_set_md(obd, inode, handle, rec->ur_eadata,
603 rec->ur_eadatalen, "lov");
609 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
610 mds_pack_inode2fid(&body->fid1, inode);
611 mds_pack_inode2body(body, inode);
613 /* don't return OST-specific attributes if we didn't just set them. */
614 if (ia_valid & ATTR_SIZE)
615 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
616 if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
617 body->valid |= OBD_MD_FLMTIME;
618 if (ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
619 body->valid |= OBD_MD_FLATIME;
621 if (rc == 0 && rec->ur_cookielen && !IS_ERR(mds->mds_osc_obd)) {
622 OBD_ALLOC(mlcd, sizeof(*mlcd) + rec->ur_cookielen +
625 mlcd->mlcd_size = sizeof(*mlcd) + rec->ur_cookielen +
627 mlcd->mlcd_eadatalen = rec->ur_eadatalen;
628 mlcd->mlcd_cookielen = rec->ur_cookielen;
629 mlcd->mlcd_lmm = (void *)&mlcd->mlcd_cookies +
630 mlcd->mlcd_cookielen;
631 memcpy(&mlcd->mlcd_cookies, rec->ur_logcookies,
632 mlcd->mlcd_cookielen);
633 memcpy(mlcd->mlcd_lmm, rec->ur_eadata,
634 mlcd->mlcd_eadatalen);
636 CERROR("unable to allocate log cancel data\n");
642 fsfilt_add_journal_cb(req->rq_export->exp_obd, 0, handle,
643 mds_cancel_cookies_cb, mlcd);
644 err = mds_finish_transno(mds, inode, handle, req, rc, 0);
645 /* do mds to ost setattr if needed */
646 if (!rc && !err && lmm_size)
647 mds_osc_setattr_async(obd, inode, lmm, lmm_size,
648 logcookies, rec->ur_fid1);
650 switch (cleanup_phase) {
652 OBD_FREE(lmm, mds->mds_max_mdsize);
654 OBD_FREE(logcookies, cookie_size);
656 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
657 rec->ur_eadata != NULL)
658 UNLOCK_INODE_MUTEX(inode);
662 ldlm_lock_decref(&lockh, LCK_EX);
664 ptlrpc_save_lock (req, &lockh, LCK_EX);
677 /* trigger dqrel/dqacq for original owner and new owner */
678 if (ia_valid & (ATTR_UID | ATTR_GID))
679 lquota_adjust(quota_interface, obd, qcids, qpids, rc,
685 static void reconstruct_reint_create(struct mds_update_record *rec, int offset,
686 struct ptlrpc_request *req)
688 struct mds_export_data *med = &req->rq_export->exp_mds_data;
689 struct mds_obd *obd = &req->rq_export->exp_obd->u.mds;
690 struct dentry *parent, *child;
691 struct mds_body *body;
693 mds_req_from_mcd(req, med->med_mcd);
698 parent = mds_fid2dentry(obd, rec->ur_fid1, NULL);
699 LASSERT(!IS_ERR(parent));
700 child = ll_lookup_one_len(rec->ur_name, parent, rec->ur_namelen - 1);
701 LASSERT(!IS_ERR(child));
702 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
703 mds_pack_inode2fid(&body->fid1, child->d_inode);
704 mds_pack_inode2body(body, child->d_inode);
709 static int mds_reint_create(struct mds_update_record *rec, int offset,
710 struct ptlrpc_request *req,
711 struct lustre_handle *lh)
713 struct dentry *dparent = NULL;
714 struct mds_obd *mds = mds_req2mds(req);
715 struct obd_device *obd = req->rq_export->exp_obd;
716 struct dentry *dchild = NULL;
717 struct inode *dir = NULL;
719 struct lustre_handle lockh;
720 int rc = 0, err, type = rec->ur_mode & S_IFMT, cleanup_phase = 0;
722 unsigned int qcids[MAXQUOTAS] = {current->fsuid, current->fsgid};
723 unsigned int qpids[MAXQUOTAS] = {0, 0};
724 struct dentry_params dp;
727 LASSERT(offset == MDS_REQ_REC_OFF);
728 LASSERT(!strcmp(req->rq_export->exp_obd->obd_type->typ_name,
731 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u name %s mode %o",
732 rec->ur_fid1->id, rec->ur_fid1->generation,
733 rec->ur_name, rec->ur_mode);
735 MDS_CHECK_RESENT(req, reconstruct_reint_create(rec, offset, req));
737 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_CREATE))
738 GOTO(cleanup, rc = -ESTALE);
740 dparent = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_EX, &lockh,
741 MDS_INODELOCK_UPDATE);
742 if (IS_ERR(dparent)) {
743 rc = PTR_ERR(dparent);
745 CERROR("parent "LPU64"/%u lookup error %d\n",
746 rec->ur_fid1->id, rec->ur_fid1->generation, rc);
749 cleanup_phase = 1; /* locked parent dentry */
750 dir = dparent->d_inode;
753 ldlm_lock_dump_handle(D_OTHER, &lockh);
755 dchild = ll_lookup_one_len(rec->ur_name, dparent, rec->ur_namelen - 1);
756 if (IS_ERR(dchild)) {
757 rc = PTR_ERR(dchild);
758 if (rc != -ENAMETOOLONG)
759 CERROR("child lookup error %d\n", rc);
763 cleanup_phase = 2; /* child dentry */
765 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_CREATE_WRITE, dir->i_sb);
767 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY) {
769 GOTO(cleanup, rc = -EEXIST);
770 GOTO(cleanup, rc = -EROFS);
773 if (dir->i_mode & S_ISGID && S_ISDIR(rec->ur_mode))
774 rec->ur_mode |= S_ISGID;
776 dchild->d_fsdata = (void *)&dp;
777 dp.p_inum = (unsigned long)rec->ur_fid2->id;
782 handle = fsfilt_start(obd, dir, FSFILT_OP_CREATE, NULL);
784 GOTO(cleanup, rc = PTR_ERR(handle));
785 rc = ll_vfs_create(dir, dchild, rec->ur_mode, NULL);
790 handle = fsfilt_start(obd, dir, FSFILT_OP_MKDIR, NULL);
792 GOTO(cleanup, rc = PTR_ERR(handle));
793 rc = vfs_mkdir(dir, dchild, rec->ur_mode);
798 handle = fsfilt_start(obd, dir, FSFILT_OP_SYMLINK, NULL);
800 GOTO(cleanup, rc = PTR_ERR(handle));
801 if (rec->ur_tgt == NULL) /* no target supplied */
802 rc = -EINVAL; /* -EPROTO? */
804 rc = ll_vfs_symlink(dir, dchild, rec->ur_tgt, S_IALLUGO);
812 int rdev = rec->ur_rdev;
813 handle = fsfilt_start(obd, dir, FSFILT_OP_MKNOD, NULL);
815 GOTO(cleanup, rc = PTR_ERR(handle));
816 rc = vfs_mknod(dir, dchild, rec->ur_mode, rdev);
821 CERROR("bad file type %o creating %s\n", type, rec->ur_name);
822 dchild->d_fsdata = NULL;
823 GOTO(cleanup, rc = -EINVAL);
826 /* In case we stored the desired inum in here, we want to clean up. */
827 if (dchild->d_fsdata == (void *)(unsigned long)rec->ur_fid2->id)
828 dchild->d_fsdata = NULL;
831 CDEBUG(D_INODE, "error during create: %d\n", rc);
835 struct inode *inode = dchild->d_inode;
836 struct mds_body *body;
839 LTIME_S(iattr.ia_atime) = rec->ur_time;
840 LTIME_S(iattr.ia_ctime) = rec->ur_time;
841 LTIME_S(iattr.ia_mtime) = rec->ur_time;
842 iattr.ia_uid = current->fsuid; /* set by push_ctxt already */
843 if (dir->i_mode & S_ISGID)
844 iattr.ia_gid = dir->i_gid;
846 iattr.ia_gid = current->fsgid;
847 iattr.ia_valid = ATTR_UID | ATTR_GID | ATTR_ATIME |
848 ATTR_MTIME | ATTR_CTIME;
850 if (rec->ur_fid2->id) {
851 LASSERT(rec->ur_fid2->id == inode->i_ino);
852 inode->i_generation = rec->ur_fid2->generation;
853 /* Dirtied and committed by the upcoming setattr. */
854 CDEBUG(D_INODE, "recreated ino %lu with gen %u\n",
855 inode->i_ino, inode->i_generation);
857 CDEBUG(D_INODE, "created ino %lu with gen %x\n",
858 inode->i_ino, inode->i_generation);
861 rc = fsfilt_setattr(obd, dchild, handle, &iattr, 0);
863 CERROR("error on child setattr: rc = %d\n", rc);
865 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
866 rc = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
868 CERROR("error on parent setattr: rc = %d\n", rc);
870 if (S_ISDIR(inode->i_mode)) {
871 struct lov_mds_md lmm;
872 int lmm_size = sizeof(lmm);
873 rc = mds_get_md(obd, dir, &lmm, &lmm_size, 1);
875 LOCK_INODE_MUTEX(inode);
876 rc = fsfilt_set_md(obd, inode, handle,
877 &lmm, lmm_size, "lov");
878 UNLOCK_INODE_MUTEX(inode);
881 CERROR("error on copy stripe info: rc = %d\n",
885 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
886 mds_pack_inode2fid(&body->fid1, inode);
887 mds_pack_inode2body(body, inode);
892 err = mds_finish_transno(mds, dir, handle, req, rc, 0);
895 /* Destroy the file we just created. This should not need
896 * extra journal credits, as we have already modified all of
897 * the blocks needed in order to create the file in the first
902 err = vfs_rmdir(dir, dchild);
904 CERROR("rmdir in error path: %d\n", err);
907 err = vfs_unlink(dir, dchild);
909 CERROR("unlink in error path: %d\n", err);
912 } else if (created) {
913 /* The inode we were allocated may have just been freed
914 * by an unlink operation. We take this lock to
915 * synchronize against the matching reply-ack-lock taken
916 * in unlink, to avoid replay problems if this reply
917 * makes it out to the client but the unlink's does not.
918 * See bug 2029 for more detail.*/
919 mds_lock_new_child(obd, dchild->d_inode, NULL);
920 /* save uid/gid of create inode and parent */
921 qpids[USRQUOTA] = dir->i_uid;
922 qpids[GRPQUOTA] = dir->i_gid;
927 switch (cleanup_phase) {
928 case 2: /* child dentry */
930 case 1: /* locked parent dentry */
932 ldlm_lock_decref(&lockh, LCK_EX);
934 ptlrpc_save_lock (req, &lockh, LCK_EX);
940 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
945 /* trigger dqacq on the owner of child and parent */
946 lquota_adjust(quota_interface, obd, qcids, qpids, rc, FSFILT_OP_CREATE);
950 int res_gt(struct ldlm_res_id *res1, struct ldlm_res_id *res2,
951 ldlm_policy_data_t *p1, ldlm_policy_data_t *p2)
955 for (i = 0; i < RES_NAME_SIZE; i++) {
956 /* return 1 here, because enqueue_ordered will skip resources
957 * of all zeroes if they're sorted to the end of the list. */
958 if (res1->name[i] == 0 && res2->name[i] != 0)
960 if (res2->name[i] == 0 && res1->name[i] != 0)
963 if (res1->name[i] > res2->name[i])
965 if (res1->name[i] < res2->name[i])
970 if (memcmp(p1, p2, sizeof(*p1)) < 0)
975 /* This function doesn't use ldlm_match_or_enqueue because we're always called
976 * with EX or PW locks, and the MDS is no longer allowed to match write locks,
977 * because they take the place of local semaphores.
979 * One or two locks are taken in numerical order. A res_id->name[0] of 0 means
980 * no lock is taken for that res_id. Must be at least one non-zero res_id. */
981 int enqueue_ordered_locks(struct obd_device *obd, struct ldlm_res_id *p1_res_id,
982 struct lustre_handle *p1_lockh, int p1_lock_mode,
983 ldlm_policy_data_t *p1_policy,
984 struct ldlm_res_id *p2_res_id,
985 struct lustre_handle *p2_lockh, int p2_lock_mode,
986 ldlm_policy_data_t *p2_policy)
988 struct ldlm_res_id *res_id[2] = { p1_res_id, p2_res_id };
989 struct lustre_handle *handles[2] = { p1_lockh, p2_lockh };
990 int lock_modes[2] = { p1_lock_mode, p2_lock_mode };
991 ldlm_policy_data_t *policies[2] = {p1_policy, p2_policy};
995 LASSERT(p1_res_id != NULL && p2_res_id != NULL);
997 CDEBUG(D_INFO, "locks before: "LPU64"/"LPU64"\n",
998 res_id[0]->name[0], res_id[1]->name[0]);
1000 if (res_gt(p1_res_id, p2_res_id, p1_policy, p2_policy)) {
1001 handles[1] = p1_lockh;
1002 handles[0] = p2_lockh;
1003 res_id[1] = p1_res_id;
1004 res_id[0] = p2_res_id;
1005 lock_modes[1] = p1_lock_mode;
1006 lock_modes[0] = p2_lock_mode;
1007 policies[1] = p1_policy;
1008 policies[0] = p2_policy;
1011 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"\n",
1012 res_id[0]->name[0], res_id[1]->name[0]);
1014 flags = LDLM_FL_LOCAL_ONLY;
1015 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, *res_id[0],
1016 LDLM_IBITS, policies[0], lock_modes[0], &flags,
1017 ldlm_blocking_ast, ldlm_completion_ast,
1018 NULL, NULL, NULL, 0, NULL, handles[0]);
1021 ldlm_lock_dump_handle(D_OTHER, handles[0]);
1023 if (memcmp(res_id[0], res_id[1], sizeof(*res_id[0])) == 0 &&
1024 (policies[0]->l_inodebits.bits & policies[1]->l_inodebits.bits)) {
1025 memcpy(handles[1], handles[0], sizeof(*(handles[1])));
1026 ldlm_lock_addref(handles[1], lock_modes[1]);
1027 } else if (res_id[1]->name[0] != 0) {
1028 flags = LDLM_FL_LOCAL_ONLY;
1029 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
1030 *res_id[1], LDLM_IBITS, policies[1],
1031 lock_modes[1], &flags,
1032 ldlm_blocking_ast, ldlm_completion_ast,
1033 NULL, NULL, NULL, 0, NULL, handles[1]);
1034 if (rc != ELDLM_OK) {
1035 ldlm_lock_decref(handles[0], lock_modes[0]);
1038 ldlm_lock_dump_handle(D_OTHER, handles[1]);
1044 static inline int res_eq(struct ldlm_res_id *res1, struct ldlm_res_id *res2)
1046 return !memcmp(res1, res2, sizeof(*res1));
1050 try_to_aggregate_locks(struct ldlm_res_id *res1, ldlm_policy_data_t *p1,
1051 struct ldlm_res_id *res2, ldlm_policy_data_t *p2)
1053 if (!res_eq(res1, res2))
1055 /* XXX: any additional inodebits (to current LOOKUP and UPDATE)
1056 * should be taken with great care here */
1057 p1->l_inodebits.bits |= p2->l_inodebits.bits;
1060 int enqueue_4ordered_locks(struct obd_device *obd,struct ldlm_res_id *p1_res_id,
1061 struct lustre_handle *p1_lockh, int p1_lock_mode,
1062 ldlm_policy_data_t *p1_policy,
1063 struct ldlm_res_id *p2_res_id,
1064 struct lustre_handle *p2_lockh, int p2_lock_mode,
1065 ldlm_policy_data_t *p2_policy,
1066 struct ldlm_res_id *c1_res_id,
1067 struct lustre_handle *c1_lockh, int c1_lock_mode,
1068 ldlm_policy_data_t *c1_policy,
1069 struct ldlm_res_id *c2_res_id,
1070 struct lustre_handle *c2_lockh, int c2_lock_mode,
1071 ldlm_policy_data_t *c2_policy)
1073 struct ldlm_res_id *res_id[5] = { p1_res_id, p2_res_id,
1074 c1_res_id, c2_res_id };
1075 struct lustre_handle *dlm_handles[5] = { p1_lockh, p2_lockh,
1076 c1_lockh, c2_lockh };
1077 int lock_modes[5] = { p1_lock_mode, p2_lock_mode,
1078 c1_lock_mode, c2_lock_mode };
1079 ldlm_policy_data_t *policies[5] = {p1_policy, p2_policy,
1080 c1_policy, c2_policy};
1081 int rc, i, j, sorted, flags;
1084 CDEBUG(D_DLMTRACE, "locks before: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
1085 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
1086 res_id[3]->name[0]);
1088 /* simple insertion sort - we have at most 4 elements */
1089 for (i = 1; i < 4; i++) {
1091 dlm_handles[4] = dlm_handles[i];
1092 res_id[4] = res_id[i];
1093 lock_modes[4] = lock_modes[i];
1094 policies[4] = policies[i];
1098 if (res_gt(res_id[j], res_id[4], policies[j],
1100 dlm_handles[j + 1] = dlm_handles[j];
1101 res_id[j + 1] = res_id[j];
1102 lock_modes[j + 1] = lock_modes[j];
1103 policies[j + 1] = policies[j];
1108 } while (j >= 0 && !sorted);
1110 dlm_handles[j + 1] = dlm_handles[4];
1111 res_id[j + 1] = res_id[4];
1112 lock_modes[j + 1] = lock_modes[4];
1113 policies[j + 1] = policies[4];
1116 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
1117 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
1118 res_id[3]->name[0]);
1120 /* XXX we could send ASTs on all these locks first before blocking? */
1121 for (i = 0; i < 4; i++) {
1123 if (res_id[i]->name[0] == 0)
1125 if (i && res_eq(res_id[i], res_id[i-1])) {
1126 memcpy(dlm_handles[i], dlm_handles[i-1],
1127 sizeof(*(dlm_handles[i])));
1128 ldlm_lock_addref(dlm_handles[i], lock_modes[i]);
1130 /* we need to enqueue locks with different inodebits
1131 * at once, because otherwise concurrent thread can
1132 * hit the windown between these two locks and we'll
1133 * get to deadlock. see bug 10360. note also, that it
1134 * is impossible to have >2 equal res. */
1136 try_to_aggregate_locks(res_id[i], policies[i],
1137 res_id[i+1], policies[i+1]);
1138 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
1139 *res_id[i], LDLM_IBITS,
1141 lock_modes[i], &flags,
1143 ldlm_completion_ast, NULL, NULL,
1144 NULL, 0, NULL, dlm_handles[i]);
1146 GOTO(out_err, rc = -EIO);
1147 ldlm_lock_dump_handle(D_OTHER, dlm_handles[i]);
1154 ldlm_lock_decref(dlm_handles[i], lock_modes[i]);
1159 /* In the unlikely case that the child changed while we were waiting
1160 * on the lock, we need to drop the lock on the old child and either:
1161 * - if the child has a lower resource name, then we have to also
1162 * drop the parent lock and regain the locks in the right order
1163 * - in the rename case, if the child has a lower resource name than one of
1164 * the other parent/child resources (maxres) we also need to reget the locks
1165 * - if the child has a higher resource name (this is the common case)
1166 * we can just get the lock on the new child (still in lock order)
1168 * Returns 0 if the child did not change or if it changed but could be locked.
1169 * Returns 1 if the child changed and we need to re-lock (no locks held).
1170 * Returns -ve error with a valid dchild (no locks held). */
1171 static int mds_verify_child(struct obd_device *obd,
1172 struct ldlm_res_id *parent_res_id,
1173 struct lustre_handle *parent_lockh,
1174 struct dentry *dparent, int parent_mode,
1175 struct ldlm_res_id *child_res_id,
1176 struct lustre_handle *child_lockh,
1177 struct dentry **dchildp, int child_mode,
1178 ldlm_policy_data_t *child_policy,
1179 const char *name, int namelen,
1180 struct ldlm_res_id *maxres)
1182 struct dentry *vchild, *dchild = *dchildp;
1183 int rc = 0, cleanup_phase = 2; /* parent, child locks */
1186 vchild = ll_lookup_one_len(name, dparent, namelen - 1);
1188 GOTO(cleanup, rc = PTR_ERR(vchild));
1190 if (likely((vchild->d_inode == NULL && child_res_id->name[0] == 0) ||
1191 (vchild->d_inode != NULL &&
1192 child_res_id->name[0] == vchild->d_inode->i_ino &&
1193 child_res_id->name[1] == vchild->d_inode->i_generation))) {
1201 CDEBUG(D_DLMTRACE, "child inode changed: %p != %p (%lu != "LPU64")\n",
1202 vchild->d_inode, dchild ? dchild->d_inode : 0,
1203 vchild->d_inode ? vchild->d_inode->i_ino : 0,
1204 child_res_id->name[0]);
1205 if (child_res_id->name[0] != 0)
1206 ldlm_lock_decref(child_lockh, child_mode);
1210 cleanup_phase = 1; /* parent lock only */
1211 *dchildp = dchild = vchild;
1213 if (dchild->d_inode) {
1215 child_res_id->name[0] = dchild->d_inode->i_ino;
1216 child_res_id->name[1] = dchild->d_inode->i_generation;
1218 /* Make sure that we don't try to re-enqueue a lock on the
1219 * same resource if it happens that the source is renamed to
1220 * the target by another thread (bug 9974, thanks racer :-) */
1221 if (!res_gt(child_res_id, parent_res_id, NULL, NULL) ||
1222 !res_gt(child_res_id, maxres, NULL, NULL)) {
1223 CDEBUG(D_DLMTRACE, "relock "LPU64"<("LPU64"|"LPU64")\n",
1224 child_res_id->name[0], parent_res_id->name[0],
1226 GOTO(cleanup, rc = 1);
1229 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
1230 *child_res_id, LDLM_IBITS, child_policy,
1231 child_mode, &flags, ldlm_blocking_ast,
1232 ldlm_completion_ast, NULL, NULL, NULL, 0,
1235 GOTO(cleanup, rc = -EIO);
1237 memset(child_res_id, 0, sizeof(*child_res_id));
1243 switch(cleanup_phase) {
1245 if (child_res_id->name[0] != 0)
1246 ldlm_lock_decref(child_lockh, child_mode);
1248 ldlm_lock_decref(parent_lockh, parent_mode);
1254 int mds_get_parent_child_locked(struct obd_device *obd, struct mds_obd *mds,
1256 struct lustre_handle *parent_lockh,
1257 struct dentry **dparentp, int parent_mode,
1258 __u64 parent_lockpart,
1259 char *name, int namelen,
1260 struct lustre_handle *child_lockh,
1261 struct dentry **dchildp, int child_mode,
1262 __u64 child_lockpart)
1264 struct ldlm_res_id child_res_id = { .name = {0} };
1265 struct ldlm_res_id parent_res_id = { .name = {0} };
1266 ldlm_policy_data_t parent_policy = {.l_inodebits = { parent_lockpart }};
1267 ldlm_policy_data_t child_policy = {.l_inodebits = { child_lockpart }};
1268 struct inode *inode;
1269 int rc = 0, cleanup_phase = 0;
1272 /* Step 1: Lookup parent */
1273 *dparentp = mds_fid2dentry(mds, fid, NULL);
1274 if (IS_ERR(*dparentp)) {
1275 rc = PTR_ERR(*dparentp);
1280 CDEBUG(D_INODE, "parent ino %lu, name %s\n",
1281 (*dparentp)->d_inode->i_ino, name);
1283 parent_res_id.name[0] = (*dparentp)->d_inode->i_ino;
1284 parent_res_id.name[1] = (*dparentp)->d_inode->i_generation;
1286 cleanup_phase = 1; /* parent dentry */
1288 /* Step 2: Lookup child (without DLM lock, to get resource name) */
1289 *dchildp = ll_lookup_one_len(name, *dparentp, namelen - 1);
1290 if (IS_ERR(*dchildp)) {
1291 rc = PTR_ERR(*dchildp);
1292 CDEBUG(D_INODE, "child lookup error %d\n", rc);
1296 cleanup_phase = 2; /* child dentry */
1297 inode = (*dchildp)->d_inode;
1298 if (inode != NULL) {
1299 if (is_bad_inode(inode)) {
1300 CERROR("bad inode returned %lu/%u\n",
1301 inode->i_ino, inode->i_generation);
1302 GOTO(cleanup, rc = -ENOENT);
1304 inode = igrab(inode);
1309 child_res_id.name[0] = inode->i_ino;
1310 child_res_id.name[1] = inode->i_generation;
1314 cleanup_phase = 2; /* child dentry */
1316 /* Step 3: Lock parent and child in resource order. If child doesn't
1317 * exist, we still have to lock the parent and re-lookup. */
1318 rc = enqueue_ordered_locks(obd,&parent_res_id,parent_lockh,parent_mode,
1320 &child_res_id, child_lockh, child_mode,
1325 if (!(*dchildp)->d_inode)
1326 cleanup_phase = 3; /* parent lock */
1328 cleanup_phase = 4; /* child lock */
1330 /* Step 4: Re-lookup child to verify it hasn't changed since locking */
1331 rc = mds_verify_child(obd, &parent_res_id, parent_lockh, *dparentp,
1332 parent_mode, &child_res_id, child_lockh, dchildp,
1333 child_mode,&child_policy, name, namelen, &parent_res_id);
1343 switch (cleanup_phase) {
1345 ldlm_lock_decref(child_lockh, child_mode);
1347 ldlm_lock_decref(parent_lockh, parent_mode);
1358 void mds_reconstruct_generic(struct ptlrpc_request *req)
1360 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1362 mds_req_from_mcd(req, med->med_mcd);
1365 /* If we are unlinking an open file/dir (i.e. creating an orphan) then
1366 * we instead link the inode into the PENDING directory until it is
1367 * finally released. We can't simply call mds_reint_rename() or some
1368 * part thereof, because we don't have the inode to check for link
1369 * count/open status until after it is locked.
1371 * For lock ordering, caller must get child->i_mutex first, then
1372 * pending->i_mutex before starting journal transaction.
1374 * returns 1 on success
1375 * returns 0 if we lost a race and didn't make a new link
1376 * returns negative on error
1378 static int mds_orphan_add_link(struct mds_update_record *rec,
1379 struct obd_device *obd, struct dentry *dentry)
1381 struct mds_obd *mds = &obd->u.mds;
1382 struct inode *pending_dir = mds->mds_pending_dir->d_inode;
1383 struct inode *inode = dentry->d_inode;
1384 struct dentry *pending_child;
1385 char fidname[LL_FID_NAMELEN];
1386 int fidlen = 0, rc, mode;
1389 LASSERT(inode != NULL);
1390 LASSERT(!mds_inode_is_orphan(inode));
1391 #ifndef HAVE_I_ALLOC_SEM
1392 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
1394 LASSERT(TRYLOCK_INODE_MUTEX(pending_dir) == 0);
1396 fidlen = mds_fid2str(fidname, inode->i_ino, inode->i_generation);
1398 CDEBUG(D_INODE, "pending destroy of %dx open %d linked %s %s = %s\n",
1399 mds_orphan_open_count(inode), inode->i_nlink,
1400 S_ISDIR(inode->i_mode) ? "dir" :
1401 S_ISREG(inode->i_mode) ? "file" : "other",rec->ur_name,fidname);
1403 if (mds_orphan_open_count(inode) == 0 || inode->i_nlink != 0)
1406 pending_child = lookup_one_len(fidname, mds->mds_pending_dir, fidlen);
1407 if (IS_ERR(pending_child))
1408 RETURN(PTR_ERR(pending_child));
1410 if (pending_child->d_inode != NULL) {
1411 CERROR("re-destroying orphan file %s?\n", rec->ur_name);
1412 LASSERT(pending_child->d_inode == inode);
1413 GOTO(out_dput, rc = 0);
1416 /* link() is semanticaly-wrong for S_IFDIR, so we set S_IFREG
1417 * for linking and return real mode back then -bzzz */
1418 mode = inode->i_mode;
1419 inode->i_mode = S_IFREG;
1420 rc = vfs_link(dentry, pending_dir, pending_child);
1422 CERROR("error linking orphan %s to PENDING: rc = %d\n",
1425 mds_inode_set_orphan(inode);
1427 /* return mode and correct i_nlink if inode is directory */
1428 inode->i_mode = mode;
1429 LASSERTF(inode->i_nlink == 1, "%s nlink == %d\n",
1430 S_ISDIR(mode) ? "dir" : S_ISREG(mode) ? "file" : "other",
1432 if (S_ISDIR(mode)) {
1434 pending_dir->i_nlink++;
1435 mark_inode_dirty(inode);
1436 mark_inode_dirty(pending_dir);
1439 GOTO(out_dput, rc = 1);
1441 l_dput(pending_child);
1445 int mds_get_cookie_size(struct obd_device *obd, struct lov_mds_md *lmm)
1447 int count = le32_to_cpu(lmm->lmm_stripe_count);
1448 int real_csize = count * sizeof(struct llog_cookie);
1452 void mds_shrink_reply(struct obd_device *obd, struct ptlrpc_request *req,
1453 struct mds_body *body)
1455 int cookie_size = 0, md_size = 0;
1457 if (body && body->valid & OBD_MD_FLEASIZE) {
1458 md_size = body->eadatasize;
1460 if (body && body->valid & OBD_MD_FLCOOKIE) {
1461 LASSERT(body->valid & OBD_MD_FLEASIZE);
1462 cookie_size = mds_get_cookie_size(obd, lustre_msg_buf(
1463 req->rq_repmsg, 1, 0));
1466 CDEBUG(D_INFO, "Shrink to md_size %d cookie_size %d \n", md_size,
1469 lustre_shrink_reply(req, 1, md_size, 1);
1471 lustre_shrink_reply(req, md_size? 2:1, cookie_size, 0);
1474 static int mds_reint_unlink(struct mds_update_record *rec, int offset,
1475 struct ptlrpc_request *req,
1476 struct lustre_handle *lh)
1478 struct dentry *dparent = NULL, *dchild;
1479 struct mds_obd *mds = mds_req2mds(req);
1480 struct obd_device *obd = req->rq_export->exp_obd;
1481 struct mds_body *body = NULL;
1482 struct inode *child_inode = NULL;
1483 struct lustre_handle parent_lockh, child_lockh, child_reuse_lockh;
1484 void *handle = NULL;
1485 int rc = 0, cleanup_phase = 0;
1486 unsigned int qcids [MAXQUOTAS] = {0, 0};
1487 unsigned int qpids [MAXQUOTAS] = {0, 0};
1490 LASSERT(offset == MDS_REQ_REC_OFF || offset == 2);
1492 DEBUG_REQ(D_INODE, req, "parent ino "LPU64"/%u, child %s",
1493 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name);
1495 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1497 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNLINK))
1498 GOTO(cleanup, rc = -ENOENT);
1500 rc = mds_get_parent_child_locked(obd, mds, rec->ur_fid1,
1501 &parent_lockh, &dparent, LCK_EX,
1502 MDS_INODELOCK_UPDATE,
1503 rec->ur_name, rec->ur_namelen,
1504 &child_lockh, &dchild, LCK_EX,
1505 MDS_INODELOCK_FULL);
1509 cleanup_phase = 1; /* dchild, dparent, locks */
1512 child_inode = dchild->d_inode;
1513 if (child_inode == NULL) {
1514 CDEBUG(D_INODE, "child doesn't exist (dir %lu, name %s)\n",
1515 dparent->d_inode->i_ino, rec->ur_name);
1516 GOTO(cleanup, rc = -ENOENT);
1519 /* save uid/gid for quota acquire/release */
1520 qcids[USRQUOTA] = child_inode->i_uid;
1521 qcids[GRPQUOTA] = child_inode->i_gid;
1522 qpids[USRQUOTA] = dparent->d_inode->i_uid;
1523 qpids[GRPQUOTA] = dparent->d_inode->i_gid;
1525 cleanup_phase = 2; /* dchild has a lock */
1527 /* We have to do these checks ourselves, in case we are making an
1528 * orphan. The client tells us whether rmdir() or unlink() was called,
1529 * so we need to return appropriate errors (bug 72). */
1530 if ((rec->ur_mode & S_IFMT) == S_IFDIR) {
1531 if (!S_ISDIR(child_inode->i_mode))
1532 GOTO(cleanup, rc = -ENOTDIR);
1534 if (S_ISDIR(child_inode->i_mode))
1535 GOTO(cleanup, rc = -EISDIR);
1538 /* Check for EROFS after we check ENODENT, ENOTDIR, and EISDIR */
1539 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
1540 GOTO(cleanup, rc = -EROFS);
1542 /* Step 3: Get a lock on the ino to sync with creation WRT inode
1543 * reuse (see bug 2029). */
1544 rc = mds_lock_new_child(obd, child_inode, &child_reuse_lockh);
1548 cleanup_phase = 3; /* child inum lock */
1550 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_UNLINK_WRITE, dparent->d_inode->i_sb);
1552 /* ldlm_reply in buf[0] if called via intent */
1556 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof (*body));
1557 LASSERT(body != NULL);
1559 /* child orphan sem protects orphan_dec_test && is_orphan race */
1560 MDS_DOWN_READ_ORPHAN_SEM(child_inode);
1561 cleanup_phase = 4; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
1563 /* If this is potentially the last reference to this inode, get the
1564 * OBD EA data first so the client can destroy OST objects. We
1565 * only do the object removal later if no open files/links remain. */
1566 if ((S_ISDIR(child_inode->i_mode) && child_inode->i_nlink == 2) ||
1567 child_inode->i_nlink == 1) {
1568 if (mds_orphan_open_count(child_inode) > 0) {
1569 /* need to lock pending_dir before transaction */
1570 LOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
1571 cleanup_phase = 5; /* UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode); */
1572 } else if (S_ISREG(child_inode->i_mode)) {
1573 mds_pack_inode2fid(&body->fid1, child_inode);
1574 mds_pack_inode2body(body, child_inode);
1575 mds_pack_md(obd, req->rq_repmsg, offset + 1, body,
1576 child_inode, MDS_PACK_MD_LOCK);
1580 /* Step 4: Do the unlink: we already verified ur_mode above (bug 72) */
1581 switch (child_inode->i_mode & S_IFMT) {
1583 /* Drop any lingering child directories before we start our
1584 * transaction, to avoid doing multiple inode dirty/delete
1585 * in our compound transaction (bug 1321). */
1586 shrink_dcache_parent(dchild);
1587 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_RMDIR,
1590 GOTO(cleanup, rc = PTR_ERR(handle));
1591 rc = vfs_rmdir(dparent->d_inode, dchild);
1594 struct lov_mds_md *lmm = lustre_msg_buf(req->rq_repmsg,
1596 handle = fsfilt_start_log(obd, dparent->d_inode,
1597 FSFILT_OP_UNLINK, NULL,
1598 le32_to_cpu(lmm->lmm_stripe_count));
1600 GOTO(cleanup, rc = PTR_ERR(handle));
1601 rc = vfs_unlink(dparent->d_inode, dchild);
1609 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_UNLINK,
1612 GOTO(cleanup, rc = PTR_ERR(handle));
1613 rc = vfs_unlink(dparent->d_inode, dchild);
1616 CERROR("bad file type %o unlinking %s\n", rec->ur_mode,
1619 GOTO(cleanup, rc = -EINVAL);
1622 if (rc == 0 && child_inode->i_nlink == 0) {
1623 if (mds_orphan_open_count(child_inode) > 0)
1624 rc = mds_orphan_add_link(rec, obd, dchild);
1627 GOTO(cleanup, rc = 0);
1629 if (!S_ISREG(child_inode->i_mode))
1632 if (!(body->valid & OBD_MD_FLEASIZE)) {
1633 body->valid |=(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1634 OBD_MD_FLATIME | OBD_MD_FLMTIME);
1635 } else if (mds_log_op_unlink(obd, child_inode,
1636 lustre_msg_buf(req->rq_repmsg, offset + 1, 0),
1637 req->rq_repmsg->buflens[offset + 1],
1638 lustre_msg_buf(req->rq_repmsg, offset + 2, 0),
1639 req->rq_repmsg->buflens[offset+2]) > 0){
1640 body->valid |= OBD_MD_FLCOOKIE;
1650 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
1651 LTIME_S(iattr.ia_mtime) = rec->ur_time;
1652 LTIME_S(iattr.ia_ctime) = rec->ur_time;
1654 err = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
1656 CERROR("error on parent setattr: rc = %d\n", err);
1659 rc = mds_finish_transno(mds, dparent ? dparent->d_inode : NULL,
1660 handle, req, rc, 0);
1662 (void)obd_set_info_async(mds->mds_osc_exp, strlen("unlinked"),
1663 "unlinked", 0, NULL, NULL);
1664 switch(cleanup_phase) {
1665 case 5: /* pending_dir semaphore */
1666 UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
1667 case 4: /* child inode semaphore */
1668 MDS_UP_READ_ORPHAN_SEM(child_inode);
1669 case 3: /* child ino-reuse lock */
1670 if (rc && body != NULL) {
1671 // Don't unlink the OST objects if the MDS unlink failed
1675 ldlm_lock_decref(&child_reuse_lockh, LCK_EX);
1677 ptlrpc_save_lock(req, &child_reuse_lockh, LCK_EX);
1678 case 2: /* child lock */
1679 ldlm_lock_decref(&child_lockh, LCK_EX);
1680 case 1: /* child and parent dentry, parent lock */
1682 ldlm_lock_decref(&parent_lockh, LCK_EX);
1684 ptlrpc_save_lock(req, &parent_lockh, LCK_EX);
1691 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1694 req->rq_status = rc;
1696 mds_shrink_reply(obd, req, body);
1698 /* trigger dqrel on the owner of child and parent */
1699 lquota_adjust(quota_interface, obd, qcids, qpids, rc, FSFILT_OP_UNLINK);
1703 static int mds_reint_link(struct mds_update_record *rec, int offset,
1704 struct ptlrpc_request *req,
1705 struct lustre_handle *lh)
1707 struct obd_device *obd = req->rq_export->exp_obd;
1708 struct dentry *de_src = NULL;
1709 struct dentry *de_tgt_dir = NULL;
1710 struct dentry *dchild = NULL;
1711 struct mds_obd *mds = mds_req2mds(req);
1712 struct lustre_handle *handle = NULL, tgt_dir_lockh, src_lockh;
1713 struct ldlm_res_id src_res_id = { .name = {0} };
1714 struct ldlm_res_id tgt_dir_res_id = { .name = {0} };
1715 ldlm_policy_data_t src_policy ={.l_inodebits = {MDS_INODELOCK_UPDATE}};
1716 ldlm_policy_data_t tgt_dir_policy =
1717 {.l_inodebits = {MDS_INODELOCK_UPDATE}};
1718 int rc = 0, cleanup_phase = 0;
1721 LASSERT(offset == MDS_REQ_REC_OFF);
1723 DEBUG_REQ(D_INODE, req, "original "LPU64"/%u to "LPU64"/%u %s",
1724 rec->ur_fid1->id, rec->ur_fid1->generation,
1725 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_name);
1727 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1729 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_LINK))
1730 GOTO(cleanup, rc = -ENOENT);
1732 /* Step 1: Lookup the source inode and target directory by FID */
1733 de_src = mds_fid2dentry(mds, rec->ur_fid1, NULL);
1735 GOTO(cleanup, rc = PTR_ERR(de_src));
1737 cleanup_phase = 1; /* source dentry */
1739 de_tgt_dir = mds_fid2dentry(mds, rec->ur_fid2, NULL);
1740 if (IS_ERR(de_tgt_dir)) {
1741 rc = PTR_ERR(de_tgt_dir);
1746 cleanup_phase = 2; /* target directory dentry */
1748 CDEBUG(D_INODE, "linking %.*s/%s to inode %lu\n",
1749 de_tgt_dir->d_name.len, de_tgt_dir->d_name.name, rec->ur_name,
1750 de_src->d_inode->i_ino);
1752 /* Step 2: Take the two locks */
1753 src_res_id.name[0] = de_src->d_inode->i_ino;
1754 src_res_id.name[1] = de_src->d_inode->i_generation;
1755 tgt_dir_res_id.name[0] = de_tgt_dir->d_inode->i_ino;
1756 tgt_dir_res_id.name[1] = de_tgt_dir->d_inode->i_generation;
1758 rc = enqueue_ordered_locks(obd, &src_res_id, &src_lockh, LCK_EX,
1760 &tgt_dir_res_id, &tgt_dir_lockh, LCK_EX,
1765 cleanup_phase = 3; /* locks */
1767 if (mds_inode_is_orphan(de_src->d_inode)) {
1768 CDEBUG(D_INODE, "an attempt to link an orphan inode %lu/%u\n",
1769 de_src->d_inode->i_ino,
1770 de_src->d_inode->i_generation);
1771 GOTO(cleanup, rc = -ENOENT);
1774 /* Step 3: Lookup the child */
1775 dchild = ll_lookup_one_len(rec->ur_name, de_tgt_dir, rec->ur_namelen-1);
1776 if (IS_ERR(dchild)) {
1777 rc = PTR_ERR(dchild);
1778 if (rc != -EPERM && rc != -EACCES && rc != -ENAMETOOLONG)
1779 CERROR("child lookup error %d\n", rc);
1783 cleanup_phase = 4; /* child dentry */
1785 if (dchild->d_inode) {
1786 CDEBUG(D_INODE, "child exists (dir %lu, name %s)\n",
1787 de_tgt_dir->d_inode->i_ino, rec->ur_name);
1792 /* Step 4: Do it. */
1793 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_LINK_WRITE, de_src->d_inode->i_sb);
1795 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
1796 GOTO(cleanup, rc = -EROFS);
1798 handle = fsfilt_start(obd, de_tgt_dir->d_inode, FSFILT_OP_LINK, NULL);
1800 GOTO(cleanup, rc = PTR_ERR(handle));
1802 rc = vfs_link(de_src, de_tgt_dir->d_inode, dchild);
1803 if (rc && rc != -EPERM && rc != -EACCES)
1804 CERROR("vfs_link error %d\n", rc);
1806 rc = mds_finish_transno(mds, de_tgt_dir ? de_tgt_dir->d_inode : NULL,
1807 handle, req, rc, 0);
1810 switch (cleanup_phase) {
1811 case 4: /* child dentry */
1815 ldlm_lock_decref(&src_lockh, LCK_EX);
1816 ldlm_lock_decref(&tgt_dir_lockh, LCK_EX);
1818 ptlrpc_save_lock(req, &src_lockh, LCK_EX);
1819 ptlrpc_save_lock(req, &tgt_dir_lockh, LCK_EX);
1821 case 2: /* target dentry */
1823 case 1: /* source dentry */
1828 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1831 req->rq_status = rc;
1835 /* The idea here is that we need to get four locks in the end:
1836 * one on each parent directory, one on each child. We need to take
1837 * these locks in some kind of order (to avoid deadlocks), and the order
1838 * I selected is "increasing resource number" order. We need to look up
1839 * the children, however, before we know what the resource number(s) are.
1840 * Thus the following plan:
1842 * 1,2. Look up the parents
1843 * 3,4. Look up the children
1844 * 5. Take locks on the parents and children, in order
1845 * 6. Verify that the children haven't changed since they were looked up
1847 * If there was a race and the children changed since they were first looked
1848 * up, it is possible that mds_verify_child() will be able to just grab the
1849 * lock on the new child resource (if it has a higher resource than any other)
1850 * but we need to compare against not only its parent, but also against the
1851 * parent and child of the "other half" of the rename, hence maxres_{src,tgt}.
1853 * We need the fancy igrab() on the child inodes because we aren't holding a
1854 * lock on the parent after the lookup is done, so dentry->d_inode may change
1855 * at any time, and igrab() itself doesn't like getting passed a NULL argument.
1857 int mds_get_parents_children_locked(struct obd_device *obd,
1858 struct mds_obd *mds,
1859 struct ll_fid *p1_fid,
1860 struct dentry **de_srcdirp,
1861 struct ll_fid *p2_fid,
1862 struct dentry **de_tgtdirp,
1864 const char *old_name, int old_len,
1865 struct dentry **de_oldp,
1866 const char *new_name, int new_len,
1867 struct dentry **de_newp,
1868 struct lustre_handle *dlm_handles,
1871 struct ldlm_res_id p1_res_id = { .name = {0} };
1872 struct ldlm_res_id p2_res_id = { .name = {0} };
1873 struct ldlm_res_id c1_res_id = { .name = {0} };
1874 struct ldlm_res_id c2_res_id = { .name = {0} };
1875 ldlm_policy_data_t p_policy = {.l_inodebits = {MDS_INODELOCK_UPDATE}};
1876 /* Only dentry should disappear, but the inode itself would be
1877 intact otherwise. */
1878 ldlm_policy_data_t c1_policy = {.l_inodebits = {MDS_INODELOCK_LOOKUP}};
1879 /* If something is going to be replaced, both dentry and inode locks are needed */
1880 ldlm_policy_data_t c2_policy = {.l_inodebits = {MDS_INODELOCK_FULL}};
1881 struct ldlm_res_id *maxres_src, *maxres_tgt;
1882 struct inode *inode;
1883 int rc = 0, cleanup_phase = 0;
1886 /* Step 1: Lookup the source directory */
1887 *de_srcdirp = mds_fid2dentry(mds, p1_fid, NULL);
1888 if (IS_ERR(*de_srcdirp))
1889 GOTO(cleanup, rc = PTR_ERR(*de_srcdirp));
1891 cleanup_phase = 1; /* source directory dentry */
1893 p1_res_id.name[0] = (*de_srcdirp)->d_inode->i_ino;
1894 p1_res_id.name[1] = (*de_srcdirp)->d_inode->i_generation;
1896 /* Step 2: Lookup the target directory */
1897 if (memcmp(p1_fid, p2_fid, sizeof(*p1_fid)) == 0) {
1898 *de_tgtdirp = dget(*de_srcdirp);
1900 *de_tgtdirp = mds_fid2dentry(mds, p2_fid, NULL);
1901 if (IS_ERR(*de_tgtdirp)) {
1902 rc = PTR_ERR(*de_tgtdirp);
1908 cleanup_phase = 2; /* target directory dentry */
1910 p2_res_id.name[0] = (*de_tgtdirp)->d_inode->i_ino;
1911 p2_res_id.name[1] = (*de_tgtdirp)->d_inode->i_generation;
1913 /* Step 3: Lookup the source child entry */
1914 *de_oldp = ll_lookup_one_len(old_name, *de_srcdirp, old_len - 1);
1915 if (IS_ERR(*de_oldp)) {
1916 rc = PTR_ERR(*de_oldp);
1917 CERROR("old child lookup error (%.*s): %d\n",
1918 old_len - 1, old_name, rc);
1922 cleanup_phase = 3; /* original name dentry */
1924 inode = (*de_oldp)->d_inode;
1926 inode = igrab(inode);
1928 GOTO(cleanup, rc = -ENOENT);
1930 c1_res_id.name[0] = inode->i_ino;
1931 c1_res_id.name[1] = inode->i_generation;
1935 /* Step 4: Lookup the target child entry */
1937 GOTO(retry_locks, rc);
1938 *de_newp = ll_lookup_one_len(new_name, *de_tgtdirp, new_len - 1);
1939 if (IS_ERR(*de_newp)) {
1940 rc = PTR_ERR(*de_newp);
1941 if (rc != -ENAMETOOLONG)
1942 CERROR("new child lookup error (%.*s): %d\n",
1943 old_len - 1, old_name, rc);
1947 cleanup_phase = 4; /* target dentry */
1949 inode = (*de_newp)->d_inode;
1951 inode = igrab(inode);
1955 c2_res_id.name[0] = inode->i_ino;
1956 c2_res_id.name[1] = inode->i_generation;
1960 /* Step 5: Take locks on the parents and child(ren) */
1961 maxres_src = &p1_res_id;
1962 maxres_tgt = &p2_res_id;
1963 cleanup_phase = 4; /* target dentry */
1965 if (c1_res_id.name[0] != 0 && res_gt(&c1_res_id, &p1_res_id,NULL,NULL))
1966 maxres_src = &c1_res_id;
1967 if (c2_res_id.name[0] != 0 && res_gt(&c2_res_id, &p2_res_id,NULL,NULL))
1968 maxres_tgt = &c2_res_id;
1970 rc = enqueue_4ordered_locks(obd, &p1_res_id,&dlm_handles[0],parent_mode,
1972 &p2_res_id, &dlm_handles[1], parent_mode,
1974 &c1_res_id, &dlm_handles[2], child_mode,
1976 &c2_res_id, &dlm_handles[3], child_mode,
1981 cleanup_phase = 6; /* parent and child(ren) locks */
1983 /* Step 6a: Re-lookup source child to verify it hasn't changed */
1984 rc = mds_verify_child(obd, &p1_res_id, &dlm_handles[0], *de_srcdirp,
1985 parent_mode, &c1_res_id, &dlm_handles[2], de_oldp,
1986 child_mode, &c1_policy, old_name, old_len,
1989 if (c2_res_id.name[0] != 0)
1990 ldlm_lock_decref(&dlm_handles[3], child_mode);
1991 ldlm_lock_decref(&dlm_handles[1], parent_mode);
1998 if ((*de_oldp)->d_inode == NULL)
1999 GOTO(cleanup, rc = -ENOENT);
2003 /* Step 6b: Re-lookup target child to verify it hasn't changed */
2004 rc = mds_verify_child(obd, &p2_res_id, &dlm_handles[1], *de_tgtdirp,
2005 parent_mode, &c2_res_id, &dlm_handles[3], de_newp,
2006 child_mode, &c2_policy, new_name, new_len,
2009 ldlm_lock_decref(&dlm_handles[2], child_mode);
2010 ldlm_lock_decref(&dlm_handles[0], parent_mode);
2020 switch (cleanup_phase) {
2021 case 6: /* child lock(s) */
2022 if (c2_res_id.name[0] != 0)
2023 ldlm_lock_decref(&dlm_handles[3], child_mode);
2024 if (c1_res_id.name[0] != 0)
2025 ldlm_lock_decref(&dlm_handles[2], child_mode);
2026 case 5: /* parent locks */
2027 ldlm_lock_decref(&dlm_handles[1], parent_mode);
2028 ldlm_lock_decref(&dlm_handles[0], parent_mode);
2029 case 4: /* target dentry */
2031 case 3: /* source dentry */
2033 case 2: /* target directory dentry */
2034 l_dput(*de_tgtdirp);
2035 case 1: /* source directry dentry */
2036 l_dput(*de_srcdirp);
2043 static int mds_reint_rename(struct mds_update_record *rec, int offset,
2044 struct ptlrpc_request *req,
2045 struct lustre_handle *lockh)
2047 struct obd_device *obd = req->rq_export->exp_obd;
2048 struct dentry *de_srcdir = NULL;
2049 struct dentry *de_tgtdir = NULL;
2050 struct dentry *de_old = NULL;
2051 struct dentry *de_new = NULL;
2052 struct inode *old_inode = NULL, *new_inode = NULL;
2053 struct mds_obd *mds = mds_req2mds(req);
2054 struct lustre_handle dlm_handles[4];
2055 struct mds_body *body = NULL;
2056 struct lov_mds_md *lmm = NULL;
2057 int rc = 0, lock_count = 3, cleanup_phase = 0;
2058 void *handle = NULL;
2059 unsigned int qcids[MAXQUOTAS] = {0, 0};
2060 unsigned int qpids[4] = {0, 0, 0, 0};
2063 LASSERT(offset == MDS_REQ_REC_OFF);
2065 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u %s to "LPU64"/%u %s",
2066 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name,
2067 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_tgt);
2069 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
2071 rc = mds_get_parents_children_locked(obd, mds, rec->ur_fid1, &de_srcdir,
2072 rec->ur_fid2, &de_tgtdir, LCK_EX,
2073 rec->ur_name, rec->ur_namelen,
2074 &de_old, rec->ur_tgt,
2075 rec->ur_tgtlen, &de_new,
2076 dlm_handles, LCK_EX);
2080 cleanup_phase = 1; /* parent(s), children, locks */
2082 old_inode = de_old->d_inode;
2083 new_inode = de_new->d_inode;
2085 if (new_inode != NULL)
2088 /* sanity check for src inode */
2089 if (old_inode->i_ino == de_srcdir->d_inode->i_ino ||
2090 old_inode->i_ino == de_tgtdir->d_inode->i_ino)
2091 GOTO(cleanup, rc = -EINVAL);
2093 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2094 GOTO(cleanup, rc = -EROFS);
2096 if (new_inode == NULL)
2100 cleanup_phase = 2; /* iput(new_inode) when finished */
2102 /* sanity check for dest inode */
2103 if (new_inode->i_ino == de_srcdir->d_inode->i_ino ||
2104 new_inode->i_ino == de_tgtdir->d_inode->i_ino)
2105 GOTO(cleanup, rc = -EINVAL);
2107 if (old_inode == new_inode)
2108 GOTO(cleanup, rc = 0);
2110 /* save uids/gids for qunit acquire/release */
2111 qcids[USRQUOTA] = old_inode->i_uid;
2112 qcids[GRPQUOTA] = old_inode->i_gid;
2113 qpids[USRQUOTA] = de_tgtdir->d_inode->i_uid;
2114 qpids[GRPQUOTA] = de_tgtdir->d_inode->i_gid;
2115 qpids[2] = de_srcdir->d_inode->i_uid;
2116 qpids[3] = de_srcdir->d_inode->i_gid;
2118 /* if we are about to remove the target at first, pass the EA of
2119 * that inode to client to perform and cleanup on OST */
2120 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
2121 LASSERT(body != NULL);
2123 /* child orphan sem protects orphan_dec_test && is_orphan race */
2124 MDS_DOWN_READ_ORPHAN_SEM(new_inode);
2125 cleanup_phase = 3; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
2127 if ((S_ISDIR(new_inode->i_mode) && new_inode->i_nlink == 2) ||
2128 new_inode->i_nlink == 1) {
2129 if (mds_orphan_open_count(new_inode) > 0) {
2130 /* need to lock pending_dir before transaction */
2131 LOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
2132 cleanup_phase = 4; /* UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode); */
2133 } else if (S_ISREG(new_inode->i_mode)) {
2134 mds_pack_inode2fid(&body->fid1, new_inode);
2135 mds_pack_inode2body(body, new_inode);
2136 mds_pack_md(obd, req->rq_repmsg, 1, body, new_inode,
2142 OBD_FAIL_WRITE(OBD_FAIL_MDS_REINT_RENAME_WRITE,
2143 de_srcdir->d_inode->i_sb);
2145 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2146 /* Check if we are moving old entry into its child. 2.6 does not
2147 check for this in vfs_rename() anymore */
2148 if (is_subdir(de_new, de_old))
2149 GOTO(cleanup, rc = -EINVAL);
2152 lmm = lustre_msg_buf(req->rq_repmsg, 1, 0);
2153 handle = fsfilt_start_log(obd, de_tgtdir->d_inode, FSFILT_OP_RENAME,
2154 NULL, le32_to_cpu(lmm->lmm_stripe_count));
2157 GOTO(cleanup, rc = PTR_ERR(handle));
2160 de_old->d_fsdata = req;
2161 de_new->d_fsdata = req;
2163 rc = vfs_rename(de_srcdir->d_inode, de_old, de_tgtdir->d_inode, de_new);
2166 if (rc == 0 && new_inode != NULL && new_inode->i_nlink == 0) {
2167 if (mds_orphan_open_count(new_inode) > 0)
2168 rc = mds_orphan_add_link(rec, obd, de_new);
2171 GOTO(cleanup, rc = 0);
2173 if (!S_ISREG(new_inode->i_mode))
2176 if (!(body->valid & OBD_MD_FLEASIZE)) {
2177 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
2178 OBD_MD_FLATIME | OBD_MD_FLMTIME);
2179 } else if (mds_log_op_unlink(obd, new_inode,
2180 lustre_msg_buf(req->rq_repmsg,1,0),
2181 req->rq_repmsg->buflens[1],
2182 lustre_msg_buf(req->rq_repmsg,2,0),
2183 req->rq_repmsg->buflens[2]) > 0) {
2184 body->valid |= OBD_MD_FLCOOKIE;
2190 rc = mds_finish_transno(mds, de_tgtdir ? de_tgtdir->d_inode : NULL,
2191 handle, req, rc, 0);
2193 switch (cleanup_phase) {
2195 UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
2197 MDS_UP_READ_ORPHAN_SEM(new_inode);
2202 if (lock_count == 4)
2203 ldlm_lock_decref(&(dlm_handles[3]), LCK_EX);
2204 ldlm_lock_decref(&(dlm_handles[2]), LCK_EX);
2205 ldlm_lock_decref(&(dlm_handles[1]), LCK_EX);
2206 ldlm_lock_decref(&(dlm_handles[0]), LCK_EX);
2208 if (lock_count == 4)
2209 ptlrpc_save_lock(req,&(dlm_handles[3]), LCK_EX);
2210 ptlrpc_save_lock(req, &(dlm_handles[2]), LCK_EX);
2211 ptlrpc_save_lock(req, &(dlm_handles[1]), LCK_EX);
2212 ptlrpc_save_lock(req, &(dlm_handles[0]), LCK_EX);
2221 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
2224 req->rq_status = rc;
2226 /* acquire/release qunit */
2227 lquota_adjust(quota_interface, obd, qcids, qpids, rc, FSFILT_OP_RENAME);
2231 typedef int (*mds_reinter)(struct mds_update_record *, int offset,
2232 struct ptlrpc_request *, struct lustre_handle *);
2234 static mds_reinter reinters[REINT_MAX] = {
2235 [REINT_SETATTR] mds_reint_setattr,
2236 [REINT_CREATE] mds_reint_create,
2237 [REINT_LINK] mds_reint_link,
2238 [REINT_UNLINK] mds_reint_unlink,
2239 [REINT_RENAME] mds_reint_rename,
2240 [REINT_OPEN] mds_open
2243 int mds_reint_rec(struct mds_update_record *rec, int offset,
2244 struct ptlrpc_request *req, struct lustre_handle *lockh)
2246 struct obd_device *obd = req->rq_export->exp_obd;
2247 struct mds_obd *mds = &obd->u.mds;
2248 struct lvfs_run_ctxt saved;
2253 if (req->rq_uid != LNET_UID_ANY) {
2254 /* non-root local cluster client
2255 * NB root's creds are believed... */
2256 LASSERT (req->rq_uid != 0);
2257 rec->ur_uc.luc_fsuid = req->rq_uid;
2258 rec->ur_uc.luc_cap = 0;
2262 /* get group info of this user */
2263 rec->ur_uc.luc_uce = upcall_cache_get_entry(mds->mds_group_hash,
2264 rec->ur_uc.luc_fsuid,
2265 rec->ur_uc.luc_fsgid, 2,
2266 &rec->ur_uc.luc_suppgid1);
2268 if (IS_ERR(rec->ur_uc.luc_uce)) {
2269 rc = PTR_ERR(rec->ur_uc.luc_uce);
2270 rec->ur_uc.luc_uce = NULL;
2274 /* checked by unpacker */
2275 LASSERT(rec->ur_opcode < REINT_MAX && reinters[rec->ur_opcode] != NULL);
2278 if (rec->ur_uc.luc_uce)
2279 rec->ur_uc.luc_fsgid = rec->ur_uc.luc_uce->ue_primary;
2282 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &rec->ur_uc);
2283 rc = reinters[rec->ur_opcode] (rec, offset, req, lockh);
2284 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &rec->ur_uc);
2286 upcall_cache_put_entry(mds->mds_group_hash, rec->ur_uc.luc_uce);