1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/mds/mds_reint.c
5 * Lustre Metadata Server (mds) reintegration routines
7 * Copyright (C) 2002-2005 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
12 * This file is part of the Lustre file system, http://www.lustre.org
13 * Lustre is a trademark of Cluster File Systems, Inc.
15 * You may have signed or agreed to another license before downloading
16 * this software. If so, you are bound by the terms and conditions
17 * of that agreement, and the following does not apply to you. See the
18 * LICENSE file included with this distribution for more information.
20 * If you did not agree to a different license, then this copy of Lustre
21 * is open source software; you can redistribute it and/or modify it
22 * under the terms of version 2 of the GNU General Public License as
23 * published by the Free Software Foundation.
25 * In either case, Lustre is distributed in the hope that it will be
26 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
27 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * license text for more details.
32 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_MDS
37 #include <obd_support.h>
38 #include <obd_class.h>
40 #include <lustre_lib.h>
41 #include <lustre/lustre_idl.h>
42 #include <lustre_mds.h>
43 #include <lustre_dlm.h>
44 #include <lustre_fsfilt.h>
45 #include <lustre_ucache.h>
47 #include "mds_internal.h"
49 void mds_commit_cb(struct obd_device *obd, __u64 transno, void *data,
52 obd_transno_commit_cb(obd, transno, error);
55 struct mds_logcancel_data {
56 struct lov_mds_md *mlcd_lmm;
60 struct llog_cookie mlcd_cookies[0];
64 static void mds_cancel_cookies_cb(struct obd_device *obd, __u64 transno,
65 void *cb_data, int error)
67 struct mds_logcancel_data *mlcd = cb_data;
68 struct lov_stripe_md *lsm = NULL;
69 struct llog_ctxt *ctxt;
72 obd_transno_commit_cb(obd, transno, error);
74 CDEBUG(D_RPCTRACE, "cancelling %d cookies\n",
75 (int)(mlcd->mlcd_cookielen / sizeof(*mlcd->mlcd_cookies)));
77 rc = obd_unpackmd(obd->u.mds.mds_osc_exp, &lsm, mlcd->mlcd_lmm,
78 mlcd->mlcd_eadatalen);
80 CERROR("bad LSM cancelling %d log cookies: rc %d\n",
81 (int)(mlcd->mlcd_cookielen/sizeof(*mlcd->mlcd_cookies)),
84 ///* XXX 0 normally, SENDNOW for debug */);
85 rc = obd_checkmd(obd->u.mds.mds_osc_exp, obd->obd_self_export,
88 CERROR("Can not revalidate lsm %p \n", lsm);
90 ctxt = llog_get_context(obd,mlcd->mlcd_cookies[0].lgc_subsys+1);
91 rc = llog_cancel(ctxt, lsm, mlcd->mlcd_cookielen /
92 sizeof(*mlcd->mlcd_cookies),
93 mlcd->mlcd_cookies, OBD_LLOG_FL_SENDNOW);
97 CERROR("error cancelling %d log cookies: rc %d\n",
98 (int)(mlcd->mlcd_cookielen /
99 sizeof(*mlcd->mlcd_cookies)), rc);
102 OBD_FREE(mlcd, mlcd->mlcd_size);
105 /* Assumes caller has already pushed us into the kernel context. */
106 int mds_finish_transno(struct mds_obd *mds, struct inode *inode, void *handle,
107 struct ptlrpc_request *req, int rc, __u32 op_data,
110 struct mds_export_data *med = &req->rq_export->exp_mds_data;
111 struct lsd_client_data *lcd = med->med_lcd;
112 struct obd_device *obd = req->rq_export->exp_obd;
113 __u64 transno, prev_transno;
116 int log_pri = D_RPCTRACE;
119 if (IS_ERR(handle)) {
124 /* if the export has already been failed, we have no last_rcvd slot */
125 if (req->rq_export->exp_failed || obd->obd_fail) {
126 CWARN("commit transaction for disconnected client %s: rc %d\n",
127 req->rq_export->exp_client_uuid.uuid, rc);
135 if (handle == NULL) {
136 /* if we're starting our own xaction, use our own inode */
137 inode = mds->mds_rcvd_filp->f_dentry->d_inode;
138 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
139 if (IS_ERR(handle)) {
140 CERROR("fsfilt_start: %ld\n", PTR_ERR(handle));
141 RETURN(PTR_ERR(handle));
145 off = med->med_lr_off;
147 transno = lustre_msg_get_transno(req->rq_reqmsg);
150 CERROR("%s: replay %s transno "LPU64" failed: rc %d\n",
152 libcfs_nid2str(req->rq_export->exp_connection->c_peer.nid),
156 } else if (transno == 0) {
157 spin_lock(&mds->mds_transno_lock);
158 transno = ++mds->mds_last_transno;
159 spin_unlock(&mds->mds_transno_lock);
161 spin_lock(&mds->mds_transno_lock);
162 if (transno > mds->mds_last_transno)
163 mds->mds_last_transno = transno;
164 spin_unlock(&mds->mds_transno_lock);
167 req->rq_transno = transno;
168 lustre_msg_set_transno(req->rq_repmsg, transno);
169 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
170 prev_transno = le64_to_cpu(lcd->lcd_last_close_transno);
171 lcd->lcd_last_close_transno = cpu_to_le64(transno);
172 lcd->lcd_last_close_xid = cpu_to_le64(req->rq_xid);
173 lcd->lcd_last_close_result = cpu_to_le32(rc);
174 lcd->lcd_last_close_data = cpu_to_le32(op_data);
176 prev_transno = le64_to_cpu(lcd->lcd_last_transno);
177 if (((lustre_msg_get_flags(req->rq_reqmsg) &
178 (MSG_RESENT | MSG_REPLAY)) == 0) ||
179 (transno > prev_transno)) {
180 lcd->lcd_last_transno = cpu_to_le64(transno);
181 lcd->lcd_last_xid = cpu_to_le64(req->rq_xid);
182 lcd->lcd_last_result = cpu_to_le32(rc);
183 lcd->lcd_last_data = cpu_to_le32(op_data);
186 /* update the server data to not lose the greatest transno. Bug 11125 */
187 if ((transno == 0) && (prev_transno == mds->mds_last_transno))
188 mds_update_server_data(obd, 0);
191 CERROR("client idx %d has offset %lld\n", med->med_lr_idx, off);
194 struct obd_export *exp = req->rq_export;
197 force_sync = fsfilt_add_journal_cb(exp->exp_obd,transno,
198 handle, mds_commit_cb,
201 err = fsfilt_write_record(obd, mds->mds_rcvd_filp, lcd,
203 force_sync | exp->exp_need_sync);
205 mds_commit_cb(obd, transno, NULL, err);
214 DEBUG_REQ(log_pri, req,
215 "wrote trans #"LPU64" rc %d client %s at idx %u: err = %d",
216 transno, rc, lcd->lcd_uuid, med->med_lr_idx, err);
218 err = mds_lov_write_objids(obd);
224 CDEBUG(log_pri, "wrote objids: err = %d\n", err);
227 err = fsfilt_commit(obd, inode, handle, 0);
229 CERROR("error committing transaction: %d\n", err);
237 /* this gives the same functionality as the code between
238 * sys_chmod and inode_setattr
239 * chown_common and inode_setattr
240 * utimes and inode_setattr
242 int mds_fix_attr(struct inode *inode, struct mds_update_record *rec)
244 time_t now = CURRENT_SECONDS;
245 struct iattr *attr = &rec->ur_iattr;
246 unsigned int ia_valid = attr->ia_valid;
250 if (ia_valid & ATTR_RAW)
251 attr->ia_valid &= ~ATTR_RAW;
253 if (!(ia_valid & ATTR_CTIME_SET))
254 LTIME_S(attr->ia_ctime) = now;
256 attr->ia_valid &= ~ATTR_CTIME_SET;
257 if (!(ia_valid & ATTR_ATIME_SET))
258 LTIME_S(attr->ia_atime) = now;
259 if (!(ia_valid & ATTR_MTIME_SET))
260 LTIME_S(attr->ia_mtime) = now;
262 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
263 RETURN((attr->ia_valid & ~ATTR_ATTR_FLAG) ? -EPERM : 0);
266 if ((ia_valid & (ATTR_MTIME|ATTR_ATIME)) == (ATTR_MTIME|ATTR_ATIME)) {
267 if (current->fsuid != inode->i_uid &&
268 (error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
272 if (ia_valid & ATTR_SIZE &&
273 /* NFSD hack for open(O_CREAT|O_TRUNC)=mknod+truncate (bug 5781) */
274 !(rec->ur_uc.luc_fsuid == inode->i_uid &&
275 ia_valid & MDS_OPEN_OWNEROVERRIDE)) {
276 if ((error = ll_permission(inode, MAY_WRITE, NULL)) != 0)
280 if (ia_valid & (ATTR_UID | ATTR_GID)) {
283 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
285 if (attr->ia_uid == (uid_t) -1)
286 attr->ia_uid = inode->i_uid;
287 if (attr->ia_gid == (gid_t) -1)
288 attr->ia_gid = inode->i_gid;
289 if (!(ia_valid & ATTR_MODE))
290 attr->ia_mode = inode->i_mode;
292 * If the user or group of a non-directory has been
293 * changed by a non-root user, remove the setuid bit.
294 * 19981026 David C Niemi <niemi@tux.org>
296 * Changed this to apply to all users, including root,
297 * to avoid some races. This is the behavior we had in
298 * 2.0. The check for non-root was definitely wrong
299 * for 2.2 anyway, as it should have been using
300 * CAP_FSETID rather than fsuid -- 19990830 SD.
302 if ((inode->i_mode & S_ISUID) == S_ISUID &&
303 !S_ISDIR(inode->i_mode)) {
304 attr->ia_mode &= ~S_ISUID;
305 attr->ia_valid |= ATTR_MODE;
308 * Likewise, if the user or group of a non-directory
309 * has been changed by a non-root user, remove the
310 * setgid bit UNLESS there is no group execute bit
311 * (this would be a file marked for mandatory
312 * locking). 19981026 David C Niemi <niemi@tux.org>
314 * Removed the fsuid check (see the comment above) --
317 if (((inode->i_mode & (S_ISGID | S_IXGRP)) ==
318 (S_ISGID | S_IXGRP)) && !S_ISDIR(inode->i_mode)) {
319 attr->ia_mode &= ~S_ISGID;
320 attr->ia_valid |= ATTR_MODE;
322 } else if (ia_valid & ATTR_MODE) {
323 int mode = attr->ia_mode;
325 if (attr->ia_mode == (umode_t)-1)
326 mode = inode->i_mode;
328 (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
333 void mds_steal_ack_locks(struct ptlrpc_request *req)
335 struct obd_export *exp = req->rq_export;
336 struct list_head *tmp;
337 struct ptlrpc_reply_state *oldrep;
338 struct ptlrpc_service *svc;
341 /* CAVEAT EMPTOR: spinlock order */
342 spin_lock(&exp->exp_lock);
343 list_for_each (tmp, &exp->exp_outstanding_replies) {
344 oldrep = list_entry(tmp, struct ptlrpc_reply_state,rs_exp_list);
346 if (oldrep->rs_xid != req->rq_xid)
349 if (lustre_msg_get_opc(oldrep->rs_msg) !=
350 lustre_msg_get_opc(req->rq_reqmsg))
351 CERROR ("Resent req xid "LPX64" has mismatched opc: "
352 "new %d old %d\n", req->rq_xid,
353 lustre_msg_get_opc(req->rq_reqmsg),
354 lustre_msg_get_opc(oldrep->rs_msg));
356 svc = oldrep->rs_service;
357 spin_lock (&svc->srv_lock);
359 list_del_init (&oldrep->rs_exp_list);
361 CWARN("Stealing %d locks from rs %p x"LPD64".t"LPD64
363 oldrep->rs_nlocks, oldrep,
364 oldrep->rs_xid, oldrep->rs_transno,
365 lustre_msg_get_opc(oldrep->rs_msg),
366 libcfs_nid2str(exp->exp_connection->c_peer.nid));
368 for (i = 0; i < oldrep->rs_nlocks; i++)
369 ptlrpc_save_lock(req,
370 &oldrep->rs_locks[i],
371 oldrep->rs_modes[i]);
372 oldrep->rs_nlocks = 0;
374 DEBUG_REQ(D_DLMTRACE, req, "stole locks for");
375 ptlrpc_schedule_difficult_reply (oldrep);
377 spin_unlock (&svc->srv_lock);
380 spin_unlock(&exp->exp_lock);
383 void mds_req_from_lcd(struct ptlrpc_request *req, struct lsd_client_data *lcd)
385 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
386 req->rq_transno = le64_to_cpu(lcd->lcd_last_close_transno);
387 lustre_msg_set_transno(req->rq_repmsg, req->rq_transno);
388 req->rq_status = le32_to_cpu(lcd->lcd_last_close_result);
389 lustre_msg_set_status(req->rq_repmsg, req->rq_status);
391 req->rq_transno = le64_to_cpu(lcd->lcd_last_transno);
392 lustre_msg_set_transno(req->rq_repmsg, req->rq_transno);
393 req->rq_status = le32_to_cpu(lcd->lcd_last_result);
394 lustre_msg_set_status(req->rq_repmsg, req->rq_status);
396 DEBUG_REQ(D_RPCTRACE, req, "restoring transno "LPD64"/status %d",
397 req->rq_transno, req->rq_status);
399 mds_steal_ack_locks(req);
402 static void reconstruct_reint_setattr(struct mds_update_record *rec,
403 int offset, struct ptlrpc_request *req)
405 struct obd_export *exp = req->rq_export;
406 struct mds_export_data *med = &exp->exp_mds_data;
407 struct mds_obd *obd = &exp->exp_obd->u.mds;
409 struct mds_body *body;
411 mds_req_from_lcd(req, med->med_lcd);
413 de = mds_fid2dentry(obd, rec->ur_fid1, NULL);
417 LCONSOLE_WARN("FID "LPU64"/%u lookup error %d."
418 " Evicting client %s with export %s.\n",
419 rec->ur_fid1->id, rec->ur_fid1->generation, rc,
420 obd_uuid2str(&exp->exp_client_uuid),
421 obd_export_nid2str(exp));
422 mds_export_evict(exp);
427 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof(*body));
428 mds_pack_inode2fid(&body->fid1, de->d_inode);
429 mds_pack_inode2body(body, de->d_inode);
431 /* Don't return OST-specific attributes if we didn't just set them */
432 if (rec->ur_iattr.ia_valid & ATTR_SIZE)
433 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
434 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
435 body->valid |= OBD_MD_FLMTIME;
436 if (rec->ur_iattr.ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
437 body->valid |= OBD_MD_FLATIME;
442 int mds_osc_setattr_async(struct obd_device *obd, struct inode *inode,
443 struct lov_mds_md *lmm, int lmm_size,
444 struct llog_cookie *logcookies, struct ll_fid *fid)
446 struct mds_obd *mds = &obd->u.mds;
447 struct obd_trans_info oti = { 0 };
448 struct obd_info oinfo = { { { 0 } } };
452 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OST_SETATTR))
455 /* first get memory EA */
456 OBDO_ALLOC(oinfo.oi_oa);
462 rc = obd_unpackmd(mds->mds_osc_exp, &oinfo.oi_md, lmm, lmm_size);
464 CERROR("Error unpack md %p for inode %lu\n", lmm, inode->i_ino);
468 rc = obd_checkmd(mds->mds_osc_exp, obd->obd_self_export, oinfo.oi_md);
470 CERROR("Error revalidate lsm %p \n", oinfo.oi_md);
475 obdo_from_inode(oinfo.oi_oa, inode, OBD_MD_FLUID | OBD_MD_FLGID);
476 oinfo.oi_oa->o_valid |= OBD_MD_FLID;
477 oinfo.oi_oa->o_id = oinfo.oi_md->lsm_object_id;
479 oinfo.oi_oa->o_valid |= OBD_MD_FLCOOKIE;
480 oti.oti_logcookies = logcookies;
483 LASSERT(fid != NULL);
484 oinfo.oi_oa->o_fid = fid->id;
485 oinfo.oi_oa->o_generation = fid->generation;
486 oinfo.oi_oa->o_valid |= OBD_MD_FLFID | OBD_MD_FLGENER;
488 /* do async setattr from mds to ost not waiting for responses. */
489 rc = obd_setattr_async(mds->mds_osc_exp, &oinfo, &oti, NULL);
491 CDEBUG(D_INODE, "mds to ost setattr objid 0x"LPX64
492 " on ost error %d\n", oinfo.oi_md->lsm_object_id, rc);
495 obd_free_memmd(mds->mds_osc_exp, &oinfo.oi_md);
496 OBDO_FREE(oinfo.oi_oa);
500 /* In the raw-setattr case, we lock the child inode.
501 * In the write-back case or if being called from open, the client holds a lock
504 * We use the ATTR_FROM_OPEN flag to tell these cases apart. */
505 static int mds_reint_setattr(struct mds_update_record *rec, int offset,
506 struct ptlrpc_request *req,
507 struct lustre_handle *lh)
509 unsigned int ia_valid = rec->ur_iattr.ia_valid;
510 struct mds_obd *mds = mds_req2mds(req);
511 struct obd_device *obd = req->rq_export->exp_obd;
512 struct mds_body *body;
514 struct inode *inode = NULL;
515 struct lustre_handle lockh;
517 struct mds_logcancel_data *mlcd = NULL;
518 struct lov_mds_md *lmm = NULL;
519 struct llog_cookie *logcookies = NULL;
520 int lmm_size = 0, need_lock = 1, cookie_size = 0;
521 int rc = 0, cleanup_phase = 0, err, locked = 0, sync = 0;
522 unsigned int qcids[MAXQUOTAS] = { 0, 0 };
523 unsigned int qpids[MAXQUOTAS] = { rec->ur_iattr.ia_uid,
524 rec->ur_iattr.ia_gid };
527 LASSERT(offset == REQ_REC_OFF);
528 offset = REPLY_REC_OFF;
530 DEBUG_REQ(D_INODE, req, "setattr "LPU64"/%u %x", rec->ur_fid1->id,
531 rec->ur_fid1->generation, rec->ur_iattr.ia_valid);
532 OBD_COUNTER_INCREMENT(obd, setattr);
534 MDS_CHECK_RESENT(req, reconstruct_reint_setattr(rec, offset, req));
537 ldlm_request_cancel(req, rec->ur_dlm, 0);
539 if (rec->ur_iattr.ia_valid & ATTR_FROM_OPEN ||
540 (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)) {
541 de = mds_fid2dentry(mds, rec->ur_fid1, NULL);
543 GOTO(cleanup, rc = PTR_ERR(de));
544 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
545 GOTO(cleanup, rc = -EROFS);
547 __u64 lockpart = MDS_INODELOCK_UPDATE;
548 if (rec->ur_iattr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
549 lockpart |= MDS_INODELOCK_LOOKUP;
551 de = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_EX,
552 &lockh, NULL, 0, lockpart);
554 GOTO(cleanup, rc = PTR_ERR(de));
562 if ((rec->ur_iattr.ia_valid & ATTR_FROM_OPEN) ||
563 (rec->ur_iattr.ia_valid & ATTR_SIZE)) {
564 /* Check write access for the O_TRUNC case */
565 if (mds_query_write_access(inode) < 0)
566 GOTO(cleanup, rc = -ETXTBSY);
569 /* save uid/gid for quota acq/rel */
570 qcids[USRQUOTA] = inode->i_uid;
571 qcids[GRPQUOTA] = inode->i_gid;
573 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
574 rec->ur_eadata != NULL) {
575 LOCK_INODE_MUTEX(inode);
579 OBD_FAIL_WRITE(obd, OBD_FAIL_MDS_REINT_SETATTR_WRITE, inode->i_sb);
581 /* start a log jounal handle if needed */
582 if (S_ISREG(inode->i_mode) &&
583 rec->ur_iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
584 lmm_size = mds->mds_max_mdsize;
585 OBD_ALLOC(lmm, lmm_size);
587 GOTO(cleanup, rc = -ENOMEM);
590 rc = mds_get_md(obd, inode, lmm, &lmm_size, need_lock, 0);
595 handle = fsfilt_start_log(obd, inode, FSFILT_OP_SETATTR, NULL,
596 le32_to_cpu(lmm->lmm_stripe_count));
598 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
601 GOTO(cleanup, rc = PTR_ERR(handle));
603 if (rec->ur_iattr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
604 CDEBUG(D_INODE, "setting mtime %lu, ctime %lu\n",
605 LTIME_S(rec->ur_iattr.ia_mtime),
606 LTIME_S(rec->ur_iattr.ia_ctime));
607 rc = mds_fix_attr(inode, rec);
611 if (rec->ur_iattr.ia_valid & ATTR_ATTR_FLAG) { /* ioctl */
612 rc = fsfilt_iocontrol(obd, inode, NULL, EXT3_IOC_SETFLAGS,
613 (long)&rec->ur_flags);
614 } else if (rec->ur_iattr.ia_valid) { /* setattr */
615 rc = fsfilt_setattr(obd, de, handle, &rec->ur_iattr, 0);
616 /* journal chown/chgrp in llog, just like unlink */
617 if (rc == 0 && lmm_size){
618 cookie_size = mds_get_cookie_size(obd, lmm);
619 OBD_ALLOC(logcookies, cookie_size);
620 if (logcookies == NULL)
621 GOTO(cleanup, rc = -ENOMEM);
623 if (mds_log_op_setattr(obd, inode, lmm, lmm_size,
624 logcookies, cookie_size) <= 0) {
625 OBD_FREE(logcookies, cookie_size);
631 if (rc == 0 && (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
632 rec->ur_eadata != NULL) {
633 struct lov_stripe_md *lsm = NULL;
634 struct lov_user_md *lum = NULL;
636 rc = ll_permission(inode, MAY_WRITE, NULL);
640 lum = rec->ur_eadata;
641 /* if { size, offset, count } = { 0, -1, 0 } (i.e. all default
642 * values specified) then delete default striping from dir. */
643 if (S_ISDIR(inode->i_mode) &&
644 ((lum->lmm_stripe_size == 0 &&
645 lum->lmm_stripe_offset ==
646 (typeof(lum->lmm_stripe_offset))(-1) &&
647 lum->lmm_stripe_count == 0) ||
648 /* lmm_stripe_size == -1 is deprecated in 1.4.6 */
649 lum->lmm_stripe_size ==
650 (typeof(lum->lmm_stripe_size))(-1))){
651 rc = fsfilt_set_md(obd, inode, handle, NULL, 0, "lov");
655 rc = obd_iocontrol(OBD_IOC_LOV_SETSTRIPE,
657 &lsm, rec->ur_eadata);
661 obd_free_memmd(mds->mds_osc_exp, &lsm);
663 rc = fsfilt_set_md(obd, inode, handle, rec->ur_eadata,
664 rec->ur_eadatalen, "lov");
670 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof(*body));
671 mds_pack_inode2fid(&body->fid1, inode);
672 mds_pack_inode2body(body, inode);
674 /* don't return OST-specific attributes if we didn't just set them. */
675 if (ia_valid & ATTR_SIZE)
676 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
677 if (ia_valid & (ATTR_MTIME | ATTR_MTIME_SET))
678 body->valid |= OBD_MD_FLMTIME;
679 if (ia_valid & (ATTR_ATIME | ATTR_ATIME_SET))
680 body->valid |= OBD_MD_FLATIME;
682 if (rc == 0 && rec->ur_cookielen && !IS_ERR(mds->mds_osc_obd)) {
683 OBD_ALLOC(mlcd, sizeof(*mlcd) + rec->ur_cookielen +
686 mlcd->mlcd_size = sizeof(*mlcd) + rec->ur_cookielen +
688 mlcd->mlcd_eadatalen = rec->ur_eadatalen;
689 mlcd->mlcd_cookielen = rec->ur_cookielen;
690 mlcd->mlcd_lmm = (void *)&mlcd->mlcd_cookies +
691 mlcd->mlcd_cookielen;
692 memcpy(&mlcd->mlcd_cookies, rec->ur_logcookies,
693 mlcd->mlcd_cookielen);
694 memcpy(mlcd->mlcd_lmm, rec->ur_eadata,
695 mlcd->mlcd_eadatalen);
697 CERROR("unable to allocate log cancel data\n");
703 sync = fsfilt_add_journal_cb(req->rq_export->exp_obd, 0, handle,
704 mds_cancel_cookies_cb, mlcd);
705 err = mds_finish_transno(mds, inode, handle, req, rc, 0, sync);
706 /* do mds to ost setattr if needed */
707 if (!rc && !err && lmm_size)
708 mds_osc_setattr_async(obd, inode, lmm, lmm_size,
709 logcookies, rec->ur_fid1);
711 switch (cleanup_phase) {
713 OBD_FREE(lmm, mds->mds_max_mdsize);
715 OBD_FREE(logcookies, cookie_size);
717 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) &&
718 rec->ur_eadata != NULL)
719 UNLOCK_INODE_MUTEX(inode);
723 ldlm_lock_decref(&lockh, LCK_EX);
725 ptlrpc_save_lock (req, &lockh, LCK_EX);
738 /* trigger dqrel/dqacq for original owner and new owner */
739 if (ia_valid & (ATTR_UID | ATTR_GID))
740 lquota_adjust(mds_quota_interface_ref, obd, qcids, qpids, rc,
746 static void reconstruct_reint_create(struct mds_update_record *rec, int offset,
747 struct ptlrpc_request *req)
749 struct obd_export *exp = req->rq_export;
750 struct mds_export_data *med = &exp->exp_mds_data;
751 struct mds_obd *obd = &exp->exp_obd->u.mds;
752 struct dentry *parent, *child;
753 struct mds_body *body;
756 mds_req_from_lcd(req, med->med_lcd);
761 parent = mds_fid2dentry(obd, rec->ur_fid1, NULL);
762 if (IS_ERR(parent)) {
763 rc = PTR_ERR(parent);
764 LCONSOLE_WARN("Parent "LPU64"/%u lookup error %d."
765 " Evicting client %s with export %s.\n",
766 rec->ur_fid1->id, rec->ur_fid1->generation, rc,
767 obd_uuid2str(&exp->exp_client_uuid),
768 obd_export_nid2str(exp));
769 mds_export_evict(exp);
773 child = ll_lookup_one_len(rec->ur_name, parent, rec->ur_namelen - 1);
776 LCONSOLE_WARN("Child "LPU64"/%u lookup error %d."
777 " Evicting client %s with export %s.\n",
778 rec->ur_fid1->id, rec->ur_fid1->generation, rc,
779 obd_uuid2str(&exp->exp_client_uuid),
780 obd_export_nid2str(exp));
781 mds_export_evict(exp);
786 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof(*body));
787 mds_pack_inode2fid(&body->fid1, child->d_inode);
788 mds_pack_inode2body(body, child->d_inode);
794 static int mds_reint_create(struct mds_update_record *rec, int offset,
795 struct ptlrpc_request *req,
796 struct lustre_handle *lh)
798 struct dentry *dparent = NULL;
799 struct mds_obd *mds = mds_req2mds(req);
800 struct obd_device *obd = req->rq_export->exp_obd;
801 struct dentry *dchild = NULL;
802 struct inode *dir = NULL;
804 struct lustre_handle lockh;
805 int rc = 0, err, type = rec->ur_mode & S_IFMT, cleanup_phase = 0;
807 unsigned int qcids[MAXQUOTAS] = { current->fsuid, current->fsgid };
808 unsigned int qpids[MAXQUOTAS] = { 0, 0 };
809 struct lvfs_dentry_params dp = LVFS_DENTRY_PARAMS_INIT;
811 unsigned int gid = current->fsgid;
814 LASSERT(offset == REQ_REC_OFF);
815 offset = REPLY_REC_OFF;
817 LASSERT(!strcmp(req->rq_export->exp_obd->obd_type->typ_name,
820 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u name %s mode %o",
821 rec->ur_fid1->id, rec->ur_fid1->generation,
822 rec->ur_name, rec->ur_mode);
824 MDS_CHECK_RESENT(req, reconstruct_reint_create(rec, offset, req));
826 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_CREATE))
827 GOTO(cleanup, rc = -ESTALE);
830 ldlm_request_cancel(req, rec->ur_dlm, 0);
832 dparent = mds_fid2locked_dentry(obd, rec->ur_fid1, NULL, LCK_EX, &lockh,
833 rec->ur_name, rec->ur_namelen - 1,
834 MDS_INODELOCK_UPDATE);
835 if (IS_ERR(dparent)) {
836 rc = PTR_ERR(dparent);
837 CDEBUG(D_DENTRY, "parent "LPU64"/%u lookup error %d\n",
838 rec->ur_fid1->id, rec->ur_fid1->generation, rc);
841 cleanup_phase = 1; /* locked parent dentry */
842 dir = dparent->d_inode;
845 ldlm_lock_dump_handle(D_OTHER, &lockh);
847 dchild = ll_lookup_one_len(rec->ur_name, dparent, rec->ur_namelen - 1);
848 if (IS_ERR(dchild)) {
849 rc = PTR_ERR(dchild);
850 CDEBUG(D_DENTRY, "child lookup error %d\n", rc);
854 cleanup_phase = 2; /* child dentry */
856 OBD_FAIL_WRITE(obd, OBD_FAIL_MDS_REINT_CREATE_WRITE, dir->i_sb);
858 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY) {
860 GOTO(cleanup, rc = -EEXIST);
861 GOTO(cleanup, rc = -EROFS);
864 if (dir->i_mode & S_ISGID && S_ISDIR(rec->ur_mode))
865 rec->ur_mode |= S_ISGID;
867 dchild->d_fsdata = (void *)&dp;
868 dp.ldp_inum = (unsigned long)rec->ur_fid2->id;
871 if (dir->i_mode & S_ISGID)
874 gid = current->fsgid;
876 /* we try to get enough quota to write here, and let ldiskfs
877 * decide if it is out of quota or not b=14783 */
878 lquota_chkquota(mds_quota_interface_ref, obd,
879 current->fsuid, gid, 1, &rec_pending);
883 handle = fsfilt_start(obd, dir, FSFILT_OP_CREATE, NULL);
885 GOTO(cleanup, rc = PTR_ERR(handle));
886 rc = ll_vfs_create(dir, dchild, rec->ur_mode, NULL);
887 mds_counter_incr(req->rq_export, LPROC_MDS_MKNOD);
892 handle = fsfilt_start(obd, dir, FSFILT_OP_MKDIR, NULL);
894 GOTO(cleanup, rc = PTR_ERR(handle));
895 rc = vfs_mkdir(dir, dchild, rec->ur_mode);
896 mds_counter_incr(req->rq_export, LPROC_MDS_MKDIR);
901 handle = fsfilt_start(obd, dir, FSFILT_OP_SYMLINK, NULL);
903 GOTO(cleanup, rc = PTR_ERR(handle));
904 if (rec->ur_tgt == NULL) /* no target supplied */
905 rc = -EINVAL; /* -EPROTO? */
907 rc = ll_vfs_symlink(dir, dchild, rec->ur_tgt, S_IALLUGO);
908 mds_counter_incr(req->rq_export, LPROC_MDS_MKNOD);
916 int rdev = rec->ur_rdev;
917 handle = fsfilt_start(obd, dir, FSFILT_OP_MKNOD, NULL);
919 GOTO(cleanup, rc = PTR_ERR(handle));
920 rc = vfs_mknod(dir, dchild, rec->ur_mode, rdev);
921 mds_counter_incr(req->rq_export, LPROC_MDS_MKNOD);
926 CERROR("bad file type %o creating %s\n", type, rec->ur_name);
927 dchild->d_fsdata = NULL;
928 GOTO(cleanup, rc = -EINVAL);
931 /* In case we stored the desired inum in here, we want to clean up. */
932 if (dchild->d_fsdata == (void *)(unsigned long)rec->ur_fid2->id)
933 dchild->d_fsdata = NULL;
936 CDEBUG(D_INODE, "error during create: %d\n", rc);
940 struct inode *inode = dchild->d_inode;
941 struct mds_body *body;
944 LTIME_S(iattr.ia_atime) = rec->ur_time;
945 LTIME_S(iattr.ia_ctime) = rec->ur_time;
946 LTIME_S(iattr.ia_mtime) = rec->ur_time;
947 iattr.ia_uid = current->fsuid; /* set by push_ctxt already */
949 iattr.ia_valid = ATTR_UID | ATTR_GID | ATTR_ATIME |
950 ATTR_MTIME | ATTR_CTIME;
952 if (rec->ur_fid2->id) {
953 LASSERT(rec->ur_fid2->id == inode->i_ino);
954 inode->i_generation = rec->ur_fid2->generation;
955 /* Dirtied and committed by the upcoming setattr. */
956 CDEBUG(D_INODE, "recreated ino %lu with gen %u\n",
957 inode->i_ino, inode->i_generation);
959 CDEBUG(D_INODE, "created ino %lu with gen %x\n",
960 inode->i_ino, inode->i_generation);
963 rc = fsfilt_setattr(obd, dchild, handle, &iattr, 0);
965 CERROR("error on child setattr: rc = %d\n", rc);
967 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
968 rc = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
970 CERROR("error on parent setattr: rc = %d\n", rc);
972 if (S_ISDIR(inode->i_mode)) {
973 struct lov_mds_md lmm;
974 int lmm_size = sizeof(lmm);
975 rc = mds_get_md(obd, dir, &lmm, &lmm_size, 1, 0);
977 LOCK_INODE_MUTEX(inode);
978 rc = fsfilt_set_md(obd, inode, handle,
979 &lmm, lmm_size, "lov");
980 UNLOCK_INODE_MUTEX(inode);
983 CERROR("error on copy stripe info: rc = %d\n",
987 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof(*body));
988 mds_pack_inode2fid(&body->fid1, inode);
989 mds_pack_inode2body(body, inode);
994 err = mds_finish_transno(mds, dir, handle, req, rc, 0, 0);
996 lquota_pending_commit(mds_quota_interface_ref, obd,
997 current->fsuid, gid, 1);
1000 /* Destroy the file we just created. This should not need
1001 * extra journal credits, as we have already modified all of
1002 * the blocks needed in order to create the file in the first
1007 err = vfs_rmdir(dir, dchild);
1009 CERROR("rmdir in error path: %d\n", err);
1012 err = vfs_unlink(dir, dchild);
1014 CERROR("unlink in error path: %d\n", err);
1017 } else if (created) {
1018 /* The inode we were allocated may have just been freed
1019 * by an unlink operation. We take this lock to
1020 * synchronize against the matching reply-ack-lock taken
1021 * in unlink, to avoid replay problems if this reply
1022 * makes it out to the client but the unlink's does not.
1023 * See bug 2029 for more detail.*/
1024 mds_lock_new_child(obd, dchild->d_inode, NULL);
1025 /* save uid/gid of create inode and parent */
1026 qpids[USRQUOTA] = dir->i_uid;
1027 qpids[GRPQUOTA] = dir->i_gid;
1032 switch (cleanup_phase) {
1033 case 2: /* child dentry */
1035 case 1: /* locked parent dentry */
1037 ldlm_lock_decref(&lockh, LCK_EX);
1039 ptlrpc_save_lock (req, &lockh, LCK_EX);
1045 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1048 req->rq_status = rc;
1050 /* trigger dqacq on the owner of child and parent */
1051 lquota_adjust(mds_quota_interface_ref, obd, qcids, qpids, rc,
1056 int res_gt(struct ldlm_res_id *res1, struct ldlm_res_id *res2,
1057 ldlm_policy_data_t *p1, ldlm_policy_data_t *p2)
1061 for (i = 0; i < RES_NAME_SIZE; i++) {
1062 /* return 1 here, because enqueue_ordered will skip resources
1063 * of all zeroes if they're sorted to the end of the list. */
1064 if (res1->name[i] == 0 && res2->name[i] != 0)
1066 if (res2->name[i] == 0 && res1->name[i] != 0)
1069 if (res1->name[i] > res2->name[i])
1071 if (res1->name[i] < res2->name[i])
1076 if (memcmp(p1, p2, sizeof(*p1)) < 0)
1081 /* This function doesn't use ldlm_match_or_enqueue because we're always called
1082 * with EX or PW locks, and the MDS is no longer allowed to match write locks,
1083 * because they take the place of local semaphores.
1085 * One or two locks are taken in numerical order. A res_id->name[0] of 0 means
1086 * no lock is taken for that res_id. Must be at least one non-zero res_id. */
1087 int enqueue_ordered_locks(struct obd_device *obd, struct ldlm_res_id *p1_res_id,
1088 struct lustre_handle *p1_lockh, int p1_lock_mode,
1089 ldlm_policy_data_t *p1_policy,
1090 struct ldlm_res_id *p2_res_id,
1091 struct lustre_handle *p2_lockh, int p2_lock_mode,
1092 ldlm_policy_data_t *p2_policy)
1094 struct ldlm_res_id *res_id[2] = { p1_res_id, p2_res_id };
1095 struct lustre_handle *handles[2] = { p1_lockh, p2_lockh };
1096 int lock_modes[2] = { p1_lock_mode, p2_lock_mode };
1097 ldlm_policy_data_t *policies[2] = {p1_policy, p2_policy};
1101 LASSERT(p1_res_id != NULL && p2_res_id != NULL);
1103 CDEBUG(D_INFO, "locks before: "LPU64"/"LPU64"\n",
1104 res_id[0]->name[0], res_id[1]->name[0]);
1106 if (res_gt(p1_res_id, p2_res_id, p1_policy, p2_policy)) {
1107 handles[1] = p1_lockh;
1108 handles[0] = p2_lockh;
1109 res_id[1] = p1_res_id;
1110 res_id[0] = p2_res_id;
1111 lock_modes[1] = p1_lock_mode;
1112 lock_modes[0] = p2_lock_mode;
1113 policies[1] = p1_policy;
1114 policies[0] = p2_policy;
1117 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"\n",
1118 res_id[0]->name[0], res_id[1]->name[0]);
1120 flags = LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB;
1121 rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id[0],
1122 LDLM_IBITS, policies[0], lock_modes[0],
1123 &flags, ldlm_blocking_ast,
1124 ldlm_completion_ast, NULL, NULL, 0,
1128 ldlm_lock_dump_handle(D_OTHER, handles[0]);
1130 if (memcmp(res_id[0], res_id[1], sizeof(*res_id[0])) == 0 &&
1131 (policies[0]->l_inodebits.bits & policies[1]->l_inodebits.bits)) {
1132 memcpy(handles[1], handles[0], sizeof(*(handles[1])));
1133 ldlm_lock_addref(handles[1], lock_modes[1]);
1134 } else if (res_id[1]->name[0] != 0) {
1135 flags = LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB;
1136 rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id[1],
1137 LDLM_IBITS, policies[1],
1138 lock_modes[1], &flags,
1140 ldlm_completion_ast, NULL, NULL,
1141 0, NULL, handles[1]);
1142 if (rc != ELDLM_OK) {
1143 ldlm_lock_decref(handles[0], lock_modes[0]);
1146 ldlm_lock_dump_handle(D_OTHER, handles[1]);
1152 static inline int res_eq(struct ldlm_res_id *res1, struct ldlm_res_id *res2)
1154 return !memcmp(res1, res2, sizeof(*res1));
1158 try_to_aggregate_locks(struct ldlm_res_id *res1, ldlm_policy_data_t *p1,
1159 struct ldlm_res_id *res2, ldlm_policy_data_t *p2)
1161 if (!res_eq(res1, res2))
1163 /* XXX: any additional inodebits (to current LOOKUP and UPDATE)
1164 * should be taken with great care here */
1165 p1->l_inodebits.bits |= p2->l_inodebits.bits;
1168 int enqueue_4ordered_locks(struct obd_device *obd,struct ldlm_res_id *p1_res_id,
1169 struct lustre_handle *p1_lockh, int p1_lock_mode,
1170 ldlm_policy_data_t *p1_policy,
1171 struct ldlm_res_id *p2_res_id,
1172 struct lustre_handle *p2_lockh, int p2_lock_mode,
1173 ldlm_policy_data_t *p2_policy,
1174 struct ldlm_res_id *c1_res_id,
1175 struct lustre_handle *c1_lockh, int c1_lock_mode,
1176 ldlm_policy_data_t *c1_policy,
1177 struct ldlm_res_id *c2_res_id,
1178 struct lustre_handle *c2_lockh, int c2_lock_mode,
1179 ldlm_policy_data_t *c2_policy)
1181 struct ldlm_res_id *res_id[5] = { p1_res_id, p2_res_id,
1182 c1_res_id, c2_res_id };
1183 struct lustre_handle *dlm_handles[5] = { p1_lockh, p2_lockh,
1184 c1_lockh, c2_lockh };
1185 int lock_modes[5] = { p1_lock_mode, p2_lock_mode,
1186 c1_lock_mode, c2_lock_mode };
1187 ldlm_policy_data_t *policies[5] = {p1_policy, p2_policy,
1188 c1_policy, c2_policy};
1189 int rc, i, j, sorted, flags;
1192 CDEBUG(D_DLMTRACE, "locks before: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
1193 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
1194 res_id[3]->name[0]);
1196 /* simple insertion sort - we have at most 4 elements */
1197 for (i = 1; i < 4; i++) {
1199 dlm_handles[4] = dlm_handles[i];
1200 res_id[4] = res_id[i];
1201 lock_modes[4] = lock_modes[i];
1202 policies[4] = policies[i];
1206 if (res_gt(res_id[j], res_id[4], policies[j],
1208 dlm_handles[j + 1] = dlm_handles[j];
1209 res_id[j + 1] = res_id[j];
1210 lock_modes[j + 1] = lock_modes[j];
1211 policies[j + 1] = policies[j];
1216 } while (j >= 0 && !sorted);
1218 dlm_handles[j + 1] = dlm_handles[4];
1219 res_id[j + 1] = res_id[4];
1220 lock_modes[j + 1] = lock_modes[4];
1221 policies[j + 1] = policies[4];
1224 CDEBUG(D_DLMTRACE, "lock order: "LPU64"/"LPU64"/"LPU64"/"LPU64"\n",
1225 res_id[0]->name[0], res_id[1]->name[0], res_id[2]->name[0],
1226 res_id[3]->name[0]);
1228 /* XXX we could send ASTs on all these locks first before blocking? */
1229 for (i = 0; i < 4; i++) {
1230 flags = LDLM_FL_ATOMIC_CB;
1231 if (res_id[i]->name[0] == 0)
1233 if (i && res_eq(res_id[i], res_id[i-1])) {
1234 memcpy(dlm_handles[i], dlm_handles[i-1],
1235 sizeof(*(dlm_handles[i])));
1236 ldlm_lock_addref(dlm_handles[i], lock_modes[i]);
1238 /* we need to enqueue locks with different inodebits
1239 * at once, because otherwise concurrent thread can
1240 * hit the windown between these two locks and we'll
1241 * get to deadlock. see bug 10360. note also, that it
1242 * is impossible to have >2 equal res. */
1244 try_to_aggregate_locks(res_id[i], policies[i],
1245 res_id[i+1], policies[i+1]);
1246 rc = ldlm_cli_enqueue_local(obd->obd_namespace,
1247 res_id[i], LDLM_IBITS,
1248 policies[i], lock_modes[i],
1249 &flags, ldlm_blocking_ast,
1250 ldlm_completion_ast, NULL,
1254 GOTO(out_err, rc = -EIO);
1255 ldlm_lock_dump_handle(D_OTHER, dlm_handles[i]);
1262 ldlm_lock_decref(dlm_handles[i], lock_modes[i]);
1267 /* In the unlikely case that the child changed while we were waiting
1268 * on the lock, we need to drop the lock on the old child and either:
1269 * - if the child has a lower resource name, then we have to also
1270 * drop the parent lock and regain the locks in the right order
1271 * - in the rename case, if the child has a lower resource name than one of
1272 * the other parent/child resources (maxres) we also need to reget the locks
1273 * - if the child has a higher resource name (this is the common case)
1274 * we can just get the lock on the new child (still in lock order)
1276 * Returns 0 if the child did not change or if it changed but could be locked.
1277 * Returns 1 if the child changed and we need to re-lock (no locks held).
1278 * Returns -ve error with a valid dchild (no locks held). */
1279 static int mds_verify_child(struct obd_device *obd,
1280 struct ldlm_res_id *parent_res_id,
1281 struct lustre_handle *parent_lockh,
1282 struct dentry *dparent, int parent_mode,
1283 struct ldlm_res_id *child_res_id,
1284 struct lustre_handle *child_lockh,
1285 struct dentry **dchildp, int child_mode,
1286 ldlm_policy_data_t *child_policy,
1287 const char *name, int namelen,
1288 struct ldlm_res_id *maxres)
1290 struct dentry *vchild, *dchild = *dchildp;
1291 int rc = 0, cleanup_phase = 2; /* parent, child locks */
1294 vchild = ll_lookup_one_len(name, dparent, namelen - 1);
1296 GOTO(cleanup, rc = PTR_ERR(vchild));
1298 if (likely((vchild->d_inode == NULL && child_res_id->name[0] == 0) ||
1299 (vchild->d_inode != NULL &&
1300 child_res_id->name[0] == vchild->d_inode->i_ino &&
1301 child_res_id->name[1] == vchild->d_inode->i_generation))) {
1309 CDEBUG(D_DLMTRACE, "child inode changed: %p != %p (%lu != "LPU64")\n",
1310 vchild->d_inode, dchild ? dchild->d_inode : 0,
1311 vchild->d_inode ? vchild->d_inode->i_ino : 0,
1312 child_res_id->name[0]);
1313 if (child_res_id->name[0] != 0)
1314 ldlm_lock_decref(child_lockh, child_mode);
1318 cleanup_phase = 1; /* parent lock only */
1319 *dchildp = dchild = vchild;
1321 if (dchild->d_inode) {
1322 int flags = LDLM_FL_ATOMIC_CB;
1323 child_res_id->name[0] = dchild->d_inode->i_ino;
1324 child_res_id->name[1] = dchild->d_inode->i_generation;
1326 /* Make sure that we don't try to re-enqueue a lock on the
1327 * same resource if it happens that the source is renamed to
1328 * the target by another thread (bug 9974, thanks racer :-) */
1329 if (!res_gt(child_res_id, parent_res_id, NULL, NULL) ||
1330 !res_gt(child_res_id, maxres, NULL, NULL)) {
1331 CDEBUG(D_DLMTRACE, "relock "LPU64"<("LPU64"|"LPU64")\n",
1332 child_res_id->name[0], parent_res_id->name[0],
1334 GOTO(cleanup, rc = 1);
1337 rc = ldlm_cli_enqueue_local(obd->obd_namespace, child_res_id,
1338 LDLM_IBITS, child_policy,
1341 ldlm_completion_ast, NULL,
1342 NULL, 0, NULL, child_lockh);
1344 GOTO(cleanup, rc = -EIO);
1346 memset(child_res_id, 0, sizeof(*child_res_id));
1352 switch(cleanup_phase) {
1354 if (child_res_id->name[0] != 0)
1355 ldlm_lock_decref(child_lockh, child_mode);
1357 ldlm_lock_decref(parent_lockh, parent_mode);
1363 #define INODE_CTIME_AGE (10)
1364 #define INODE_CTIME_OLD(inode) (LTIME_S(inode->i_ctime) + \
1365 INODE_CTIME_AGE < CURRENT_SECONDS)
1367 int mds_get_parent_child_locked(struct obd_device *obd, struct mds_obd *mds,
1369 struct lustre_handle *parent_lockh,
1370 struct dentry **dparentp, int parent_mode,
1371 __u64 parent_lockpart,
1372 char *name, int namelen,
1373 struct lustre_handle *child_lockh,
1374 struct dentry **dchildp, int child_mode,
1375 __u64 child_lockpart)
1377 struct ldlm_res_id child_res_id = { .name = {0} };
1378 struct ldlm_res_id parent_res_id = { .name = {0} };
1379 ldlm_policy_data_t parent_policy = {.l_inodebits = { parent_lockpart }};
1380 ldlm_policy_data_t child_policy = {.l_inodebits = { child_lockpart }};
1381 struct inode *inode;
1382 int rc = 0, cleanup_phase = 0;
1385 /* Step 1: Lookup parent */
1386 *dparentp = mds_fid2dentry(mds, fid, NULL);
1387 if (IS_ERR(*dparentp)) {
1388 rc = PTR_ERR(*dparentp);
1393 CDEBUG(D_INODE, "parent ino %lu, name %s\n",
1394 (*dparentp)->d_inode->i_ino, name);
1396 parent_res_id.name[0] = (*dparentp)->d_inode->i_ino;
1397 parent_res_id.name[1] = (*dparentp)->d_inode->i_generation;
1399 cleanup_phase = 1; /* parent dentry */
1401 /* Step 2: Lookup child (without DLM lock, to get resource name) */
1402 *dchildp = ll_lookup_one_len(name, *dparentp, namelen - 1);
1403 if (IS_ERR(*dchildp)) {
1404 rc = PTR_ERR(*dchildp);
1405 CDEBUG(D_INODE, "child lookup error %d\n", rc);
1409 cleanup_phase = 2; /* child dentry */
1410 inode = (*dchildp)->d_inode;
1411 if (inode != NULL) {
1412 if (is_bad_inode(inode)) {
1413 CERROR("bad inode returned %lu/%u\n",
1414 inode->i_ino, inode->i_generation);
1415 GOTO(cleanup, rc = -ENOENT);
1417 inode = igrab(inode);
1422 child_res_id.name[0] = inode->i_ino;
1423 child_res_id.name[1] = inode->i_generation;
1425 /* If we want a LCK_CR for a directory, and this directory has not been
1426 changed for some time, we return not only a LOOKUP lock, but also an
1427 UPDATE lock to have negative dentry starts working for this dir.
1428 Also we apply same logic to non-directories. If the file is rarely
1429 changed - we return both locks and this might save us RPC on
1431 if ((child_mode & (LCK_CR|LCK_PR|LCK_CW)) && INODE_CTIME_OLD(inode))
1432 child_policy.l_inodebits.bits |= MDS_INODELOCK_UPDATE;
1437 cleanup_phase = 2; /* child dentry */
1439 /* Step 3: Lock parent and child in resource order. If child doesn't
1440 * exist, we still have to lock the parent and re-lookup. */
1441 rc = enqueue_ordered_locks(obd,&parent_res_id,parent_lockh,parent_mode,
1443 &child_res_id, child_lockh, child_mode,
1448 if (!(*dchildp)->d_inode)
1449 cleanup_phase = 3; /* parent lock */
1451 cleanup_phase = 4; /* child lock */
1453 /* Step 4: Re-lookup child to verify it hasn't changed since locking */
1454 rc = mds_verify_child(obd, &parent_res_id, parent_lockh, *dparentp,
1455 parent_mode, &child_res_id, child_lockh, dchildp,
1456 child_mode,&child_policy, name, namelen, &parent_res_id);
1466 switch (cleanup_phase) {
1468 ldlm_lock_decref(child_lockh, child_mode);
1470 ldlm_lock_decref(parent_lockh, parent_mode);
1481 void mds_reconstruct_generic(struct ptlrpc_request *req)
1483 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1485 mds_req_from_lcd(req, med->med_lcd);
1488 /* If we are unlinking an open file/dir (i.e. creating an orphan) then
1489 * we instead link the inode into the PENDING directory until it is
1490 * finally released. We can't simply call mds_reint_rename() or some
1491 * part thereof, because we don't have the inode to check for link
1492 * count/open status until after it is locked.
1494 * For lock ordering, caller must get child->i_mutex first, then
1495 * pending->i_mutex before starting journal transaction.
1497 * returns 1 on success
1498 * returns 0 if we lost a race and didn't make a new link
1499 * returns negative on error
1501 static int mds_orphan_add_link(struct mds_update_record *rec,
1502 struct obd_device *obd, struct dentry *dentry)
1504 struct mds_obd *mds = &obd->u.mds;
1505 struct inode *pending_dir = mds->mds_pending_dir->d_inode;
1506 struct inode *inode = dentry->d_inode;
1507 struct dentry *pending_child;
1508 char fidname[LL_FID_NAMELEN];
1509 int fidlen = 0, rc, mode;
1512 LASSERT(inode != NULL);
1513 LASSERT(!mds_inode_is_orphan(inode));
1514 #ifndef HAVE_I_ALLOC_SEM
1515 LASSERT(TRYLOCK_INODE_MUTEX(inode) == 0);
1517 LASSERT(TRYLOCK_INODE_MUTEX(pending_dir) == 0);
1519 fidlen = ll_fid2str(fidname, inode->i_ino, inode->i_generation);
1521 CDEBUG(D_INODE, "pending destroy of %dx open %d linked %s %s = %s\n",
1522 mds_orphan_open_count(inode), inode->i_nlink,
1523 S_ISDIR(inode->i_mode) ? "dir" :
1524 S_ISREG(inode->i_mode) ? "file" : "other",rec->ur_name,fidname);
1526 if (mds_orphan_open_count(inode) == 0 || inode->i_nlink != 0)
1529 pending_child = lookup_one_len(fidname, mds->mds_pending_dir, fidlen);
1530 if (IS_ERR(pending_child))
1531 RETURN(PTR_ERR(pending_child));
1533 if (pending_child->d_inode != NULL) {
1534 CERROR("re-destroying orphan file %s?\n", rec->ur_name);
1535 LASSERT(pending_child->d_inode == inode);
1536 GOTO(out_dput, rc = 0);
1539 /* link() is semanticaly-wrong for S_IFDIR, so we set S_IFREG
1540 * for linking and return real mode back then -bzzz */
1541 mode = inode->i_mode;
1542 inode->i_mode = S_IFREG;
1543 rc = vfs_link(dentry, pending_dir, pending_child);
1545 CERROR("error linking orphan %s to PENDING: rc = %d\n",
1548 mds_inode_set_orphan(inode);
1550 /* return mode and correct i_nlink if inode is directory */
1551 inode->i_mode = mode;
1552 LASSERTF(inode->i_nlink == 1, "%s nlink == %d\n",
1553 S_ISDIR(mode) ? "dir" : S_ISREG(mode) ? "file" : "other",
1555 if (S_ISDIR(mode)) {
1557 pending_dir->i_nlink++;
1558 mark_inode_dirty(inode);
1559 mark_inode_dirty(pending_dir);
1562 GOTO(out_dput, rc = 1);
1564 l_dput(pending_child);
1568 int mds_get_cookie_size(struct obd_device *obd, struct lov_mds_md *lmm)
1570 int count = le32_to_cpu(lmm->lmm_stripe_count);
1571 int real_csize = count * sizeof(struct llog_cookie);
1575 void mds_shrink_reply(struct obd_device *obd, struct ptlrpc_request *req,
1576 struct mds_body *body, int md_off)
1578 int cookie_size = 0, md_size = 0;
1580 if (body && body->valid & OBD_MD_FLEASIZE) {
1581 md_size = body->eadatasize;
1583 if (body && body->valid & OBD_MD_FLCOOKIE) {
1584 LASSERT(body->valid & OBD_MD_FLEASIZE);
1585 cookie_size = mds_get_cookie_size(obd, lustre_msg_buf(
1586 req->rq_repmsg, md_off, 0));
1589 CDEBUG(D_INFO, "Shrink to md_size %d cookie_size %d \n", md_size,
1592 lustre_shrink_reply(req, md_off, md_size, 1);
1594 lustre_shrink_reply(req, md_off + (md_size > 0), cookie_size, 0);
1597 static int mds_reint_unlink(struct mds_update_record *rec, int offset,
1598 struct ptlrpc_request *req,
1599 struct lustre_handle *lh)
1601 struct dentry *dparent = NULL, *dchild;
1602 struct mds_obd *mds = mds_req2mds(req);
1603 struct obd_device *obd = req->rq_export->exp_obd;
1604 struct mds_body *body = NULL;
1605 struct inode *child_inode = NULL;
1606 struct lustre_handle parent_lockh, child_lockh, child_reuse_lockh;
1607 void *handle = NULL;
1608 int rc = 0, cleanup_phase = 0;
1609 unsigned int qcids[MAXQUOTAS] = { 0, 0 };
1610 unsigned int qpids[MAXQUOTAS] = { 0, 0 };
1613 LASSERT(offset == REQ_REC_OFF); /* || offset == DLM_INTENT_REC_OFF); */
1614 offset = REPLY_REC_OFF;
1616 DEBUG_REQ(D_INODE, req, "parent ino "LPU64"/%u, child %s",
1617 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name);
1619 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1621 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNLINK))
1622 GOTO(cleanup, rc = -ENOENT);
1625 ldlm_request_cancel(req, rec->ur_dlm, 0);
1627 rc = mds_get_parent_child_locked(obd, mds, rec->ur_fid1,
1628 &parent_lockh, &dparent, LCK_EX,
1629 MDS_INODELOCK_UPDATE,
1630 rec->ur_name, rec->ur_namelen,
1631 &child_lockh, &dchild, LCK_EX,
1632 MDS_INODELOCK_FULL);
1636 cleanup_phase = 1; /* dchild, dparent, locks */
1639 child_inode = dchild->d_inode;
1640 if (child_inode == NULL) {
1641 CDEBUG(D_INODE, "child doesn't exist (dir %lu, name %s)\n",
1642 dparent->d_inode->i_ino, rec->ur_name);
1643 GOTO(cleanup, rc = -ENOENT);
1646 /* save uid/gid for quota acquire/release */
1647 qcids[USRQUOTA] = child_inode->i_uid;
1648 qcids[GRPQUOTA] = child_inode->i_gid;
1649 qpids[USRQUOTA] = dparent->d_inode->i_uid;
1650 qpids[GRPQUOTA] = dparent->d_inode->i_gid;
1652 cleanup_phase = 2; /* dchild has a lock */
1654 /* We have to do these checks ourselves, in case we are making an
1655 * orphan. The client tells us whether rmdir() or unlink() was called,
1656 * so we need to return appropriate errors (bug 72). */
1657 if ((rec->ur_mode & S_IFMT) == S_IFDIR) {
1658 if (!S_ISDIR(child_inode->i_mode))
1659 GOTO(cleanup, rc = -ENOTDIR);
1661 if (S_ISDIR(child_inode->i_mode))
1662 GOTO(cleanup, rc = -EISDIR);
1665 /* Check for EROFS after we check ENODENT, ENOTDIR, and EISDIR */
1666 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
1667 GOTO(cleanup, rc = -EROFS);
1669 /* Step 3: Get a lock on the ino to sync with creation WRT inode
1670 * reuse (see bug 2029). */
1671 rc = mds_lock_new_child(obd, child_inode, &child_reuse_lockh);
1675 cleanup_phase = 3; /* child inum lock */
1677 OBD_FAIL_WRITE(obd, OBD_FAIL_MDS_REINT_UNLINK_WRITE, dparent->d_inode->i_sb);
1679 /* ldlm_reply in buf[0] if called via intent */
1680 if (offset == DLM_INTENT_REC_OFF)
1681 offset = DLM_REPLY_REC_OFF;
1683 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof(*body));
1684 LASSERT(body != NULL);
1686 /* child orphan sem protects orphan_dec_test && is_orphan race */
1687 MDS_DOWN_READ_ORPHAN_SEM(child_inode);
1688 cleanup_phase = 4; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
1690 /* If this is potentially the last reference to this inode, get the
1691 * OBD EA data first so the client can destroy OST objects. We
1692 * only do the object removal later if no open files/links remain. */
1693 if ((S_ISDIR(child_inode->i_mode) && child_inode->i_nlink == 2) ||
1694 child_inode->i_nlink == 1) {
1695 if (mds_orphan_open_count(child_inode) > 0) {
1696 /* need to lock pending_dir before transaction */
1697 LOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
1698 cleanup_phase = 5; /* UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode); */
1699 } else if (S_ISREG(child_inode->i_mode)) {
1700 mds_pack_inode2fid(&body->fid1, child_inode);
1701 mds_pack_inode2body(body, child_inode);
1702 mds_pack_md(obd, req->rq_repmsg, offset + 1, body,
1703 child_inode, MDS_PACK_MD_LOCK, 0);
1707 /* Step 4: Do the unlink: we already verified ur_mode above (bug 72) */
1708 switch (child_inode->i_mode & S_IFMT) {
1710 /* Drop any lingering child directories before we start our
1711 * transaction, to avoid doing multiple inode dirty/delete
1712 * in our compound transaction (bug 1321). */
1713 shrink_dcache_parent(dchild);
1714 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_RMDIR,
1717 GOTO(cleanup, rc = PTR_ERR(handle));
1718 rc = vfs_rmdir(dparent->d_inode, dchild);
1719 mds_counter_incr(req->rq_export, LPROC_MDS_RMDIR);
1722 struct lov_mds_md *lmm = lustre_msg_buf(req->rq_repmsg,
1724 handle = fsfilt_start_log(obd, dparent->d_inode,
1725 FSFILT_OP_UNLINK, NULL,
1726 le32_to_cpu(lmm->lmm_stripe_count));
1728 GOTO(cleanup, rc = PTR_ERR(handle));
1729 rc = vfs_unlink(dparent->d_inode, dchild);
1730 mds_counter_incr(req->rq_export, LPROC_MDS_UNLINK);
1738 handle = fsfilt_start(obd, dparent->d_inode, FSFILT_OP_UNLINK,
1741 GOTO(cleanup, rc = PTR_ERR(handle));
1742 rc = vfs_unlink(dparent->d_inode, dchild);
1743 mds_counter_incr(req->rq_export, LPROC_MDS_UNLINK);
1746 CERROR("bad file type %o unlinking %s\n", rec->ur_mode,
1749 GOTO(cleanup, rc = -EINVAL);
1752 if (rc == 0 && child_inode->i_nlink == 0) {
1753 if (mds_orphan_open_count(child_inode) > 0)
1754 rc = mds_orphan_add_link(rec, obd, dchild);
1757 GOTO(cleanup, rc = 0);
1759 if (!S_ISREG(child_inode->i_mode))
1762 if (!(body->valid & OBD_MD_FLEASIZE)) {
1763 body->valid |=(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1764 OBD_MD_FLATIME | OBD_MD_FLMTIME);
1765 } else if (mds_log_op_unlink(obd,
1766 lustre_msg_buf(req->rq_repmsg, offset + 1, 0),
1767 lustre_msg_buflen(req->rq_repmsg, offset + 1),
1768 lustre_msg_buf(req->rq_repmsg, offset + 2, 0),
1769 lustre_msg_buflen(req->rq_repmsg, offset+2)) >
1771 body->valid |= OBD_MD_FLCOOKIE;
1781 iattr.ia_valid = ATTR_MTIME | ATTR_CTIME;
1782 LTIME_S(iattr.ia_mtime) = rec->ur_time;
1783 LTIME_S(iattr.ia_ctime) = rec->ur_time;
1785 err = fsfilt_setattr(obd, dparent, handle, &iattr, 0);
1787 CERROR("error on parent setattr: rc = %d\n", err);
1790 rc = mds_finish_transno(mds, dparent ? dparent->d_inode : NULL,
1791 handle, req, rc, 0, 0);
1793 (void)obd_set_info_async(mds->mds_osc_exp, strlen("unlinked"),
1794 "unlinked", 0, NULL, NULL);
1795 switch(cleanup_phase) {
1796 case 5: /* pending_dir semaphore */
1797 UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
1798 case 4: /* child inode semaphore */
1799 MDS_UP_READ_ORPHAN_SEM(child_inode);
1800 case 3: /* child ino-reuse lock */
1801 if (rc && body != NULL) {
1802 // Don't unlink the OST objects if the MDS unlink failed
1806 ldlm_lock_decref(&child_reuse_lockh, LCK_EX);
1808 ptlrpc_save_lock(req, &child_reuse_lockh, LCK_EX);
1809 case 2: /* child lock */
1810 ldlm_lock_decref(&child_lockh, LCK_EX);
1811 case 1: /* child and parent dentry, parent lock */
1813 ldlm_lock_decref(&parent_lockh, LCK_EX);
1815 ptlrpc_save_lock(req, &parent_lockh, LCK_EX);
1822 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1825 req->rq_status = rc;
1827 mds_shrink_reply(obd, req, body, REPLY_REC_OFF + 1);
1829 /* trigger dqrel on the owner of child and parent */
1830 lquota_adjust(mds_quota_interface_ref, obd, qcids, qpids, rc,
1835 static int mds_reint_link(struct mds_update_record *rec, int offset,
1836 struct ptlrpc_request *req,
1837 struct lustre_handle *lh)
1839 struct obd_device *obd = req->rq_export->exp_obd;
1840 struct dentry *de_src = NULL;
1841 struct dentry *de_tgt_dir = NULL;
1842 struct dentry *dchild = NULL;
1843 struct mds_obd *mds = mds_req2mds(req);
1844 struct lustre_handle *handle = NULL, tgt_dir_lockh, src_lockh;
1845 struct ldlm_res_id src_res_id = { .name = {0} };
1846 struct ldlm_res_id tgt_dir_res_id = { .name = {0} };
1847 ldlm_policy_data_t src_policy ={.l_inodebits = {MDS_INODELOCK_UPDATE}};
1848 ldlm_policy_data_t tgt_dir_policy =
1849 {.l_inodebits = {MDS_INODELOCK_UPDATE}};
1850 int rc = 0, cleanup_phase = 0;
1853 LASSERT(offset == REQ_REC_OFF);
1855 DEBUG_REQ(D_INODE, req, "original "LPU64"/%u to "LPU64"/%u %s",
1856 rec->ur_fid1->id, rec->ur_fid1->generation,
1857 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_name);
1859 mds_counter_incr(req->rq_export, LPROC_MDS_LINK);
1861 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
1863 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_LINK))
1864 GOTO(cleanup, rc = -ENOENT);
1867 ldlm_request_cancel(req, rec->ur_dlm, 0);
1869 /* Step 1: Lookup the source inode and target directory by FID */
1870 de_src = mds_fid2dentry(mds, rec->ur_fid1, NULL);
1872 GOTO(cleanup, rc = PTR_ERR(de_src));
1874 cleanup_phase = 1; /* source dentry */
1876 de_tgt_dir = mds_fid2dentry(mds, rec->ur_fid2, NULL);
1877 if (IS_ERR(de_tgt_dir)) {
1878 rc = PTR_ERR(de_tgt_dir);
1883 cleanup_phase = 2; /* target directory dentry */
1885 CDEBUG(D_INODE, "linking %.*s/%s to inode %lu\n",
1886 de_tgt_dir->d_name.len, de_tgt_dir->d_name.name, rec->ur_name,
1887 de_src->d_inode->i_ino);
1889 /* Step 2: Take the two locks */
1890 src_res_id.name[0] = de_src->d_inode->i_ino;
1891 src_res_id.name[1] = de_src->d_inode->i_generation;
1892 tgt_dir_res_id.name[0] = de_tgt_dir->d_inode->i_ino;
1893 tgt_dir_res_id.name[1] = de_tgt_dir->d_inode->i_generation;
1895 rc = enqueue_ordered_locks(obd, &src_res_id, &src_lockh, LCK_EX,
1897 &tgt_dir_res_id, &tgt_dir_lockh, LCK_EX,
1902 cleanup_phase = 3; /* locks */
1904 if (mds_inode_is_orphan(de_src->d_inode)) {
1905 CDEBUG(D_INODE, "an attempt to link an orphan inode %lu/%u\n",
1906 de_src->d_inode->i_ino,
1907 de_src->d_inode->i_generation);
1908 GOTO(cleanup, rc = -ENOENT);
1911 /* Step 3: Lookup the child */
1912 dchild = ll_lookup_one_len(rec->ur_name, de_tgt_dir, rec->ur_namelen-1);
1913 if (IS_ERR(dchild)) {
1914 rc = PTR_ERR(dchild);
1915 if (rc != -EPERM && rc != -EACCES && rc != -ENAMETOOLONG)
1916 CERROR("child lookup error %d\n", rc);
1920 cleanup_phase = 4; /* child dentry */
1922 if (dchild->d_inode) {
1923 CDEBUG(D_INODE, "child exists (dir %lu, name %s)\n",
1924 de_tgt_dir->d_inode->i_ino, rec->ur_name);
1929 /* Step 4: Do it. */
1930 OBD_FAIL_WRITE(obd, OBD_FAIL_MDS_REINT_LINK_WRITE, de_src->d_inode->i_sb);
1932 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
1933 GOTO(cleanup, rc = -EROFS);
1935 handle = fsfilt_start(obd, de_tgt_dir->d_inode, FSFILT_OP_LINK, NULL);
1937 GOTO(cleanup, rc = PTR_ERR(handle));
1939 rc = vfs_link(de_src, de_tgt_dir->d_inode, dchild);
1940 if (rc && rc != -EPERM && rc != -EACCES)
1941 CERROR("vfs_link error %d\n", rc);
1943 rc = mds_finish_transno(mds, de_tgt_dir ? de_tgt_dir->d_inode : NULL,
1944 handle, req, rc, 0, 0);
1947 switch (cleanup_phase) {
1948 case 4: /* child dentry */
1952 ldlm_lock_decref(&src_lockh, LCK_EX);
1953 ldlm_lock_decref(&tgt_dir_lockh, LCK_EX);
1955 ptlrpc_save_lock(req, &src_lockh, LCK_EX);
1956 ptlrpc_save_lock(req, &tgt_dir_lockh, LCK_EX);
1958 case 2: /* target dentry */
1960 case 1: /* source dentry */
1965 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
1968 req->rq_status = rc;
1972 /* The idea here is that we need to get four locks in the end:
1973 * one on each parent directory, one on each child. We need to take
1974 * these locks in some kind of order (to avoid deadlocks), and the order
1975 * I selected is "increasing resource number" order. We need to look up
1976 * the children, however, before we know what the resource number(s) are.
1977 * Thus the following plan:
1979 * 1,2. Look up the parents
1980 * 3,4. Look up the children
1981 * 5. Take locks on the parents and children, in order
1982 * 6. Verify that the children haven't changed since they were looked up
1984 * If there was a race and the children changed since they were first looked
1985 * up, it is possible that mds_verify_child() will be able to just grab the
1986 * lock on the new child resource (if it has a higher resource than any other)
1987 * but we need to compare against not only its parent, but also against the
1988 * parent and child of the "other half" of the rename, hence maxres_{src,tgt}.
1990 * We need the fancy igrab() on the child inodes because we aren't holding a
1991 * lock on the parent after the lookup is done, so dentry->d_inode may change
1992 * at any time, and igrab() itself doesn't like getting passed a NULL argument.
1994 int mds_get_parents_children_locked(struct obd_device *obd,
1995 struct mds_obd *mds,
1996 struct ll_fid *p1_fid,
1997 struct dentry **de_srcdirp,
1998 struct ll_fid *p2_fid,
1999 struct dentry **de_tgtdirp,
2001 const char *old_name, int old_len,
2002 struct dentry **de_oldp,
2003 const char *new_name, int new_len,
2004 struct dentry **de_newp,
2005 struct lustre_handle *dlm_handles,
2008 struct ldlm_res_id p1_res_id = { .name = {0} };
2009 struct ldlm_res_id p2_res_id = { .name = {0} };
2010 struct ldlm_res_id c1_res_id = { .name = {0} };
2011 struct ldlm_res_id c2_res_id = { .name = {0} };
2012 ldlm_policy_data_t p_policy = {.l_inodebits = {MDS_INODELOCK_UPDATE}};
2013 /* Only dentry should disappear, but the inode itself would be
2014 intact otherwise. */
2015 ldlm_policy_data_t c1_policy = {.l_inodebits = {MDS_INODELOCK_LOOKUP}};
2016 /* If something is going to be replaced, both dentry and inode locks are needed */
2017 ldlm_policy_data_t c2_policy = {.l_inodebits = {MDS_INODELOCK_FULL}};
2018 struct ldlm_res_id *maxres_src, *maxres_tgt;
2019 struct inode *inode;
2020 int rc = 0, cleanup_phase = 0;
2023 /* Step 1: Lookup the source directory */
2024 *de_srcdirp = mds_fid2dentry(mds, p1_fid, NULL);
2025 if (IS_ERR(*de_srcdirp))
2026 GOTO(cleanup, rc = PTR_ERR(*de_srcdirp));
2028 cleanup_phase = 1; /* source directory dentry */
2030 p1_res_id.name[0] = (*de_srcdirp)->d_inode->i_ino;
2031 p1_res_id.name[1] = (*de_srcdirp)->d_inode->i_generation;
2033 /* Step 2: Lookup the target directory */
2034 if (memcmp(p1_fid, p2_fid, sizeof(*p1_fid)) == 0) {
2035 *de_tgtdirp = dget(*de_srcdirp);
2037 *de_tgtdirp = mds_fid2dentry(mds, p2_fid, NULL);
2038 if (IS_ERR(*de_tgtdirp)) {
2039 rc = PTR_ERR(*de_tgtdirp);
2045 cleanup_phase = 2; /* target directory dentry */
2047 p2_res_id.name[0] = (*de_tgtdirp)->d_inode->i_ino;
2048 p2_res_id.name[1] = (*de_tgtdirp)->d_inode->i_generation;
2050 /* Step 3: Lookup the source child entry */
2051 *de_oldp = ll_lookup_one_len(old_name, *de_srcdirp, old_len - 1);
2052 if (IS_ERR(*de_oldp)) {
2053 rc = PTR_ERR(*de_oldp);
2054 CDEBUG(D_INODE, "old child lookup error (%.*s): rc %d\n",
2055 old_len - 1, old_name, rc);
2059 cleanup_phase = 3; /* original name dentry */
2061 inode = (*de_oldp)->d_inode;
2063 inode = igrab(inode);
2065 GOTO(cleanup, rc = -ENOENT);
2067 c1_res_id.name[0] = inode->i_ino;
2068 c1_res_id.name[1] = inode->i_generation;
2072 /* Step 4: Lookup the target child entry */
2074 GOTO(retry_locks, rc);
2075 *de_newp = ll_lookup_one_len(new_name, *de_tgtdirp, new_len - 1);
2076 if (IS_ERR(*de_newp)) {
2077 rc = PTR_ERR(*de_newp);
2078 CDEBUG(D_DENTRY, "new child lookup error (%.*s): rc %d\n",
2079 old_len - 1, old_name, rc);
2083 cleanup_phase = 4; /* target dentry */
2085 inode = (*de_newp)->d_inode;
2087 inode = igrab(inode);
2091 c2_res_id.name[0] = inode->i_ino;
2092 c2_res_id.name[1] = inode->i_generation;
2096 /* Step 5: Take locks on the parents and child(ren) */
2097 maxres_src = &p1_res_id;
2098 maxres_tgt = &p2_res_id;
2099 cleanup_phase = 4; /* target dentry */
2101 if (c1_res_id.name[0] != 0 && res_gt(&c1_res_id, &p1_res_id,NULL,NULL))
2102 maxres_src = &c1_res_id;
2103 if (c2_res_id.name[0] != 0 && res_gt(&c2_res_id, &p2_res_id,NULL,NULL))
2104 maxres_tgt = &c2_res_id;
2106 rc = enqueue_4ordered_locks(obd, &p1_res_id,&dlm_handles[0],parent_mode,
2108 &p2_res_id, &dlm_handles[1], parent_mode,
2110 &c1_res_id, &dlm_handles[2], child_mode,
2112 &c2_res_id, &dlm_handles[3], child_mode,
2117 cleanup_phase = 6; /* parent and child(ren) locks */
2119 /* Step 6a: Re-lookup source child to verify it hasn't changed */
2120 rc = mds_verify_child(obd, &p1_res_id, &dlm_handles[0], *de_srcdirp,
2121 parent_mode, &c1_res_id, &dlm_handles[2], de_oldp,
2122 child_mode, &c1_policy, old_name, old_len,
2125 if (c2_res_id.name[0] != 0)
2126 ldlm_lock_decref(&dlm_handles[3], child_mode);
2127 ldlm_lock_decref(&dlm_handles[1], parent_mode);
2134 if ((*de_oldp)->d_inode == NULL)
2135 GOTO(cleanup, rc = -ENOENT);
2139 /* Step 6b: Re-lookup target child to verify it hasn't changed */
2140 rc = mds_verify_child(obd, &p2_res_id, &dlm_handles[1], *de_tgtdirp,
2141 parent_mode, &c2_res_id, &dlm_handles[3], de_newp,
2142 child_mode, &c2_policy, new_name, new_len,
2145 ldlm_lock_decref(&dlm_handles[2], child_mode);
2146 ldlm_lock_decref(&dlm_handles[0], parent_mode);
2156 switch (cleanup_phase) {
2157 case 6: /* child lock(s) */
2158 if (c2_res_id.name[0] != 0)
2159 ldlm_lock_decref(&dlm_handles[3], child_mode);
2160 if (c1_res_id.name[0] != 0)
2161 ldlm_lock_decref(&dlm_handles[2], child_mode);
2162 case 5: /* parent locks */
2163 ldlm_lock_decref(&dlm_handles[1], parent_mode);
2164 ldlm_lock_decref(&dlm_handles[0], parent_mode);
2165 case 4: /* target dentry */
2167 case 3: /* source dentry */
2169 case 2: /* target directory dentry */
2170 l_dput(*de_tgtdirp);
2171 case 1: /* source directry dentry */
2172 l_dput(*de_srcdirp);
2179 static int mds_reint_rename(struct mds_update_record *rec, int offset,
2180 struct ptlrpc_request *req,
2181 struct lustre_handle *lockh)
2183 struct obd_device *obd = req->rq_export->exp_obd;
2184 struct dentry *de_srcdir = NULL;
2185 struct dentry *de_tgtdir = NULL;
2186 struct dentry *de_old = NULL;
2187 struct dentry *de_new = NULL;
2188 struct inode *old_inode = NULL, *new_inode = NULL;
2189 struct mds_obd *mds = mds_req2mds(req);
2190 struct lustre_handle dlm_handles[4];
2191 struct mds_body *body = NULL;
2192 struct lov_mds_md *lmm = NULL;
2193 int rc = 0, lock_count = 3, cleanup_phase = 0;
2194 void *handle = NULL;
2195 unsigned int qcids[MAXQUOTAS] = { 0, 0 };
2196 unsigned int qpids[4] = { 0, 0, 0, 0 };
2199 LASSERT(offset == REQ_REC_OFF);
2200 offset = REPLY_REC_OFF;
2202 DEBUG_REQ(D_INODE, req, "parent "LPU64"/%u %s to "LPU64"/%u %s",
2203 rec->ur_fid1->id, rec->ur_fid1->generation, rec->ur_name,
2204 rec->ur_fid2->id, rec->ur_fid2->generation, rec->ur_tgt);
2206 mds_counter_incr(req->rq_export, LPROC_MDS_RENAME);
2208 MDS_CHECK_RESENT(req, mds_reconstruct_generic(req));
2211 ldlm_request_cancel(req, rec->ur_dlm, 0);
2213 rc = mds_get_parents_children_locked(obd, mds, rec->ur_fid1, &de_srcdir,
2214 rec->ur_fid2, &de_tgtdir, LCK_EX,
2215 rec->ur_name, rec->ur_namelen,
2216 &de_old, rec->ur_tgt,
2217 rec->ur_tgtlen, &de_new,
2218 dlm_handles, LCK_EX);
2222 cleanup_phase = 1; /* parent(s), children, locks */
2224 old_inode = de_old->d_inode;
2225 new_inode = de_new->d_inode;
2227 if (new_inode != NULL)
2230 /* sanity check for src inode */
2231 if (old_inode->i_ino == de_srcdir->d_inode->i_ino ||
2232 old_inode->i_ino == de_tgtdir->d_inode->i_ino)
2233 GOTO(cleanup, rc = -EINVAL);
2235 if (req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2236 GOTO(cleanup, rc = -EROFS);
2238 if (new_inode == NULL)
2242 cleanup_phase = 2; /* iput(new_inode) when finished */
2244 /* sanity check for dest inode */
2245 if (new_inode->i_ino == de_srcdir->d_inode->i_ino ||
2246 new_inode->i_ino == de_tgtdir->d_inode->i_ino)
2247 GOTO(cleanup, rc = -EINVAL);
2249 if (old_inode == new_inode)
2250 GOTO(cleanup, rc = 0);
2252 /* save uids/gids for qunit acquire/release */
2253 qcids[USRQUOTA] = old_inode->i_uid;
2254 qcids[GRPQUOTA] = old_inode->i_gid;
2255 qpids[USRQUOTA] = de_tgtdir->d_inode->i_uid;
2256 qpids[GRPQUOTA] = de_tgtdir->d_inode->i_gid;
2257 qpids[2] = de_srcdir->d_inode->i_uid;
2258 qpids[3] = de_srcdir->d_inode->i_gid;
2260 /* if we are about to remove the target at first, pass the EA of
2261 * that inode to client to perform and cleanup on OST */
2262 body = lustre_msg_buf(req->rq_repmsg, offset, sizeof(*body));
2263 LASSERT(body != NULL);
2265 /* child orphan sem protects orphan_dec_test && is_orphan race */
2266 MDS_DOWN_READ_ORPHAN_SEM(new_inode);
2267 cleanup_phase = 3; /* MDS_UP_READ_ORPHAN_SEM(new_inode) when finished */
2269 if ((S_ISDIR(new_inode->i_mode) && new_inode->i_nlink == 2) ||
2270 new_inode->i_nlink == 1) {
2271 if (mds_orphan_open_count(new_inode) > 0) {
2272 /* need to lock pending_dir before transaction */
2273 LOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
2274 cleanup_phase = 4; /* UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode); */
2275 } else if (S_ISREG(new_inode->i_mode)) {
2276 mds_pack_inode2fid(&body->fid1, new_inode);
2277 mds_pack_inode2body(body, new_inode);
2278 mds_pack_md(obd, req->rq_repmsg, offset + 1, body,
2279 new_inode, MDS_PACK_MD_LOCK, 0);
2284 OBD_FAIL_WRITE(obd, OBD_FAIL_MDS_REINT_RENAME_WRITE,
2285 de_srcdir->d_inode->i_sb);
2287 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2288 /* Check if we are moving old entry into its child. 2.6 does not
2289 check for this in vfs_rename() anymore */
2290 if (is_subdir(de_new, de_old))
2291 GOTO(cleanup, rc = -EINVAL);
2294 lmm = lustre_msg_buf(req->rq_repmsg, offset + 1, 0);
2295 handle = fsfilt_start_log(obd, de_tgtdir->d_inode, FSFILT_OP_RENAME,
2296 NULL, le32_to_cpu(lmm->lmm_stripe_count));
2299 GOTO(cleanup, rc = PTR_ERR(handle));
2302 de_old->d_fsdata = req;
2303 de_new->d_fsdata = req;
2305 rc = vfs_rename(de_srcdir->d_inode, de_old, de_tgtdir->d_inode, de_new);
2308 if (rc == 0 && new_inode != NULL && new_inode->i_nlink == 0) {
2309 if (mds_orphan_open_count(new_inode) > 0)
2310 rc = mds_orphan_add_link(rec, obd, de_new);
2313 GOTO(cleanup, rc = 0);
2315 if (!S_ISREG(new_inode->i_mode))
2318 if (!(body->valid & OBD_MD_FLEASIZE)) {
2319 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
2320 OBD_MD_FLATIME | OBD_MD_FLMTIME);
2321 } else if (mds_log_op_unlink(obd,
2322 lustre_msg_buf(req->rq_repmsg,
2324 lustre_msg_buflen(req->rq_repmsg,
2326 lustre_msg_buf(req->rq_repmsg,
2328 lustre_msg_buflen(req->rq_repmsg,
2331 body->valid |= OBD_MD_FLCOOKIE;
2337 rc = mds_finish_transno(mds, de_tgtdir ? de_tgtdir->d_inode : NULL,
2338 handle, req, rc, 0, 0);
2340 switch (cleanup_phase) {
2342 UNLOCK_INODE_MUTEX(mds->mds_pending_dir->d_inode);
2344 MDS_UP_READ_ORPHAN_SEM(new_inode);
2349 if (lock_count == 4)
2350 ldlm_lock_decref(&(dlm_handles[3]), LCK_EX);
2351 ldlm_lock_decref(&(dlm_handles[2]), LCK_EX);
2352 ldlm_lock_decref(&(dlm_handles[1]), LCK_EX);
2353 ldlm_lock_decref(&(dlm_handles[0]), LCK_EX);
2355 if (lock_count == 4)
2356 ptlrpc_save_lock(req,&(dlm_handles[3]), LCK_EX);
2357 ptlrpc_save_lock(req, &(dlm_handles[2]), LCK_EX);
2358 ptlrpc_save_lock(req, &(dlm_handles[1]), LCK_EX);
2359 ptlrpc_save_lock(req, &(dlm_handles[0]), LCK_EX);
2368 CERROR("invalid cleanup_phase %d\n", cleanup_phase);
2371 req->rq_status = rc;
2373 /* acquire/release qunit */
2374 lquota_adjust(mds_quota_interface_ref, obd, qcids, qpids, rc,
2379 typedef int (*mds_reinter)(struct mds_update_record *, int offset,
2380 struct ptlrpc_request *, struct lustre_handle *);
2382 static mds_reinter reinters[REINT_MAX] = {
2383 [REINT_SETATTR] mds_reint_setattr,
2384 [REINT_CREATE] mds_reint_create,
2385 [REINT_LINK] mds_reint_link,
2386 [REINT_UNLINK] mds_reint_unlink,
2387 [REINT_RENAME] mds_reint_rename,
2388 [REINT_OPEN] mds_open
2391 int mds_reint_rec(struct mds_update_record *rec, int offset,
2392 struct ptlrpc_request *req, struct lustre_handle *lockh)
2394 struct obd_device *obd = req->rq_export->exp_obd;
2395 struct mds_obd *mds = &obd->u.mds;
2396 struct lvfs_run_ctxt saved;
2399 gid_t fsgid = rec->ur_uc.luc_fsgid;
2404 if (req->rq_uid != LNET_UID_ANY) {
2405 /* non-root local cluster client
2406 * NB root's creds are believed... */
2407 LASSERT (req->rq_uid != 0);
2408 rec->ur_uc.luc_fsuid = req->rq_uid;
2409 rec->ur_uc.luc_cap = 0;
2413 /* get group info of this user */
2414 rec->ur_uc.luc_uce = upcall_cache_get_entry(mds->mds_group_hash,
2415 rec->ur_uc.luc_fsuid,
2416 rec->ur_uc.luc_fsgid, 2,
2417 &rec->ur_uc.luc_suppgid1);
2419 if (IS_ERR(rec->ur_uc.luc_uce)) {
2420 rc = PTR_ERR(rec->ur_uc.luc_uce);
2421 rec->ur_uc.luc_uce = NULL;
2425 /* checked by unpacker */
2426 LASSERT(rec->ur_opcode < REINT_MAX && reinters[rec->ur_opcode] != NULL);
2429 if (rec->ur_uc.luc_uce)
2430 rec->ur_uc.luc_fsgid = rec->ur_uc.luc_uce->ue_primary;
2433 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &rec->ur_uc);
2436 if (rec->ur_uc.luc_uce && fsgid != rec->ur_uc.luc_fsgid &&
2437 in_group_p(fsgid)) {
2438 rec->ur_uc.luc_fsgid = fsgid;
2439 current->fsgid = saved.luc.luc_fsgid = fsgid;
2443 rc = reinters[rec->ur_opcode] (rec, offset, req, lockh);
2444 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &rec->ur_uc);
2446 upcall_cache_put_entry(mds->mds_group_hash, rec->ur_uc.luc_uce);