1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001-2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.sf.net/projects/lustre/
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # define EXPORT_SYMTAB
25 #define DEBUG_SUBSYSTEM S_MDC
28 # include <linux/module.h>
29 # include <linux/pagemap.h>
30 # include <linux/miscdevice.h>
31 # include <linux/init.h>
33 # include <liblustre.h>
36 #include <linux/obd_class.h>
37 #include <linux/lustre_mds.h>
38 #include <linux/lustre_dlm.h>
39 #include <linux/lprocfs_status.h>
40 #include "mdc_internal.h"
42 int it_disposition(struct lookup_intent *it, int flag)
44 return it->d.lustre.it_disposition & flag;
46 EXPORT_SYMBOL(it_disposition);
48 void it_set_disposition(struct lookup_intent *it, int flag)
50 it->d.lustre.it_disposition |= flag;
52 EXPORT_SYMBOL(it_set_disposition);
54 static void mdc_fid2mdc_op_data(struct mdc_op_data *data, struct ll_uctxt *ctxt,
55 struct ll_fid *f1, struct ll_fid *f2,
56 const char *name, int namelen, int mode)
67 memset(&data->fid2, 0, sizeof(data->fid2));
69 data->namelen = namelen;
70 data->create_mode = mode;
71 data->mod_time = CURRENT_SECONDS;
74 static int it_to_lock_mode(struct lookup_intent *it)
76 /* CREAT needs to be tested before open (both could be set) */
77 if (it->it_op & IT_CREAT)
79 else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP))
86 int it_open_error(int phase, struct lookup_intent *it)
88 if (it_disposition(it, DISP_OPEN_OPEN)) {
89 if (phase == DISP_OPEN_OPEN)
90 return it->d.lustre.it_status;
95 if (it_disposition(it, DISP_OPEN_CREATE)) {
96 if (phase == DISP_OPEN_CREATE)
97 return it->d.lustre.it_status;
102 if (it_disposition(it, DISP_LOOKUP_EXECD)) {
103 if (phase == DISP_LOOKUP_EXECD)
104 return it->d.lustre.it_status;
109 if (it_disposition(it, DISP_IT_EXECD)) {
110 if (phase == DISP_IT_EXECD)
111 return it->d.lustre.it_status;
115 CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
116 it->d.lustre.it_status);
120 EXPORT_SYMBOL(it_open_error);
122 /* this must be called on a lockh that is known to have a referenced lock */
123 void mdc_set_lock_data(__u64 *l, void *data)
125 struct ldlm_lock *lock;
126 struct lustre_handle *lockh = (struct lustre_handle *)l;
134 lock = ldlm_handle2lock(lockh);
136 LASSERT(lock != NULL);
137 l_lock(&lock->l_resource->lr_namespace->ns_lock);
139 if (lock->l_ast_data && lock->l_ast_data != data) {
140 struct inode *new_inode = data;
141 struct inode *old_inode = lock->l_ast_data;
142 LASSERTF(old_inode->i_state & I_FREEING,
143 "Found existing inode %p/%lu/%u state %lu in lock: "
144 "setting data to %p/%lu/%u\n", old_inode,
145 old_inode->i_ino, old_inode->i_generation,
147 new_inode, new_inode->i_ino, new_inode->i_generation);
150 lock->l_ast_data = data;
151 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
156 EXPORT_SYMBOL(mdc_set_lock_data);
158 int mdc_change_cbdata(struct obd_export *exp, struct ll_fid *fid,
159 ldlm_iterator_t it, void *data)
161 struct ldlm_res_id res_id = { .name = {0} };
164 res_id.name[0] = fid->id;
165 res_id.name[1] = fid->generation;
167 ldlm_change_cbdata(class_exp2obd(exp)->obd_namespace, &res_id, it,
174 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
176 /* Don't hold error requests for replay. */
177 if (req->rq_replay) {
178 unsigned long irqflags;
179 spin_lock_irqsave(&req->rq_lock, irqflags);
181 spin_unlock_irqrestore(&req->rq_lock, irqflags);
183 if (rc && req->rq_transno != 0) {
184 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
189 static int round_up(int val)
199 /* Save a large LOV EA into the request buffer so that it is available
200 * for replay. We don't do this in the initial request because the
201 * original request doesn't need this buffer (at most it sends just the
202 * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
203 * buffer and may also be difficult to allocate and save a very large
204 * request buffer for each open. (bug 5707)
206 * OOM here may cause recovery failure if lmm is needed (only for the
207 * original open if the MDS crashed just when this client also OOM'd)
208 * but this is incredibly unlikely, and questionable whether the client
209 * could do MDS recovery under OOM anyways... */
210 static void mdc_realloc_openmsg(struct ptlrpc_request *req,
211 struct mds_body *body, int size[5])
213 int new_size, old_size;
214 struct lustre_msg *new_msg;
217 old_size = lustre_msg_size(5, size);
219 size[4] = body->eadatasize;
220 new_size = lustre_msg_size(5, size);
221 OBD_ALLOC(new_msg, new_size);
222 if (new_msg != NULL) {
223 struct lustre_msg *old_msg = req->rq_reqmsg;
226 DEBUG_REQ(D_INFO, req, "replace reqmsg for larger EA %u\n",
228 memcpy(new_msg, old_msg, old_size);
229 new_msg->buflens[4] = body->eadatasize;
231 spin_lock_irqsave(&req->rq_lock, irqflags);
232 req->rq_reqmsg = new_msg;
233 req->rq_reqlen = new_size;
234 spin_unlock_irqrestore(&req->rq_lock, irqflags);
236 OBD_FREE(old_msg, old_size);
238 body->valid &= ~OBD_MD_FLEASIZE;
239 body->eadatasize = 0;
243 /* We always reserve enough space in the reply packet for a stripe MD, because
244 * we don't know in advance the file type. */
245 int mdc_enqueue(struct obd_export *exp,
247 struct lookup_intent *it,
249 struct mdc_op_data *data,
250 struct lustre_handle *lockh,
253 ldlm_completion_callback cb_completion,
254 ldlm_blocking_callback cb_blocking,
257 struct ptlrpc_request *req;
258 struct obd_device *obddev = class_exp2obd(exp);
259 struct ldlm_res_id res_id =
260 { .name = {data->fid1.id, data->fid1.generation} };
261 int size[5] = {sizeof(struct ldlm_request), sizeof(struct ldlm_intent)};
262 int rc, flags = LDLM_FL_HAS_INTENT;
263 int repsize[4] = {sizeof(struct ldlm_reply),
264 sizeof(struct mds_body),
265 obddev->u.cli.cl_max_mds_easize,
266 obddev->u.cli.cl_max_mds_cookiesize};
267 struct ldlm_reply *dlm_rep;
268 struct ldlm_intent *lit;
269 struct ldlm_request *lockreq;
271 unsigned long irqflags;
272 int reply_buffers = 0;
275 // LDLM_DEBUG_NOLOCK("mdsintent=%s,name=%s,dir=%lu",
276 // ldlm_it2str(it->it_op), it_name, it_inode->i_ino);
278 if (it->it_op & IT_OPEN) {
279 it->it_create_mode |= S_IFREG;
280 it->it_create_mode &= ~current->fs->umask;
282 size[2] = sizeof(struct mds_rec_create);
283 size[3] = data->namelen + 1;
284 /* As an optimization, we allocate an RPC request buffer for
285 * at least a default-sized LOV EA even if we aren't sending
286 * one. We grow the whole request to the next power-of-two
287 * size since we get that much from a slab allocation anyways.
288 * This avoids an allocation below in the common case where
289 * we need to save a default-sized LOV EA for open replay. */
290 size[4] = max(lmmsize, obddev->u.cli.cl_default_mds_easize);
291 rc = lustre_msg_size(5, size);
293 size[4] = min(size[4] + round_up(rc) - rc,
294 obddev->u.cli.cl_max_mds_easize);
295 req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE,
300 spin_lock_irqsave (&req->rq_lock, irqflags);
302 spin_unlock_irqrestore (&req->rq_lock, irqflags);
304 /* pack the intent */
305 lit = lustre_msg_buf(req->rq_reqmsg, 1, sizeof (*lit));
306 lit->opc = (__u64)it->it_op;
308 /* pack the intended request */
309 mdc_open_pack(req, 2, data, it->it_create_mode, 0,
310 it->it_flags, lmm, lmmsize);
311 /* get ready for the reply */
313 req->rq_replen = lustre_msg_size(3, repsize);
314 } else if (it->it_op & IT_UNLINK) {
315 size[2] = sizeof(struct mds_rec_unlink);
316 size[3] = data->namelen + 1;
317 req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 4,
322 /* pack the intent */
323 lit = lustre_msg_buf(req->rq_reqmsg, 1, sizeof (*lit));
324 lit->opc = (__u64)it->it_op;
326 /* pack the intended request */
327 mdc_unlink_pack(req, 2, data);
328 /* get ready for the reply */
330 req->rq_replen = lustre_msg_size(4, repsize);
331 } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
332 int valid = OBD_MD_FLNOTOBD | OBD_MD_FLEASIZE;
333 size[2] = sizeof(struct mds_body);
334 size[3] = data->namelen + 1;
336 req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 4,
341 /* pack the intent */
342 lit = lustre_msg_buf(req->rq_reqmsg, 1, sizeof (*lit));
343 lit->opc = (__u64)it->it_op;
345 /* pack the intended request */
346 mdc_getattr_pack(req, valid, 2, it->it_flags, data);
347 /* get ready for the reply */
349 req->rq_replen = lustre_msg_size(3, repsize);
350 } else if (it->it_op == IT_READDIR) {
351 req = ptlrpc_prep_req(class_exp2cliimp(exp), LDLM_ENQUEUE, 1,
356 /* get ready for the reply */
358 req->rq_replen = lustre_msg_size(1, repsize);
364 mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
365 rc = ldlm_cli_enqueue(exp, req, obddev->obd_namespace, res_id,
366 lock_type, NULL, lock_mode, &flags, cb_blocking,
367 cb_completion, NULL, cb_data, NULL, 0, NULL,
369 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
371 /* Similarly, if we're going to replay this request, we don't want to
372 * actually get a lock, just perform the intent. */
373 if (req->rq_transno || req->rq_replay) {
374 lockreq = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*lockreq));
375 lockreq->lock_flags |= LDLM_FL_INTENT_ONLY;
378 /* This can go when we're sure that this can never happen */
379 LASSERT(rc != -ENOENT);
380 if (rc == ELDLM_LOCK_ABORTED) {
382 memset(lockh, 0, sizeof(*lockh));
384 } else if (rc != 0) {
385 CERROR("ldlm_cli_enqueue: %d\n", rc);
386 LASSERTF(rc < 0, "rc %d\n", rc);
387 mdc_clear_replay_flag(req, rc);
388 ptlrpc_req_finished(req);
390 } else { /* rc = 0 */
391 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
394 /* If the server gave us back a different lock mode, we should
395 * fix up our variables. */
396 if (lock->l_req_mode != lock_mode) {
397 ldlm_lock_addref(lockh, lock->l_req_mode);
398 ldlm_lock_decref(lockh, lock_mode);
399 lock_mode = lock->l_req_mode;
402 ldlm_lock_allow_match(lock);
406 dlm_rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*dlm_rep));
407 LASSERT(dlm_rep != NULL); /* checked by ldlm_cli_enqueue() */
408 LASSERT_REPSWABBED(req, 0); /* swabbed by ldlm_cli_enqueue() */
410 it->d.lustre.it_disposition = (int) dlm_rep->lock_policy_res1;
411 it->d.lustre.it_status = (int) dlm_rep->lock_policy_res2;
412 it->d.lustre.it_lock_mode = lock_mode;
413 it->d.lustre.it_data = req;
415 if (it->d.lustre.it_status < 0 && req->rq_replay)
416 mdc_clear_replay_flag(req, it->d.lustre.it_status);
418 DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
419 it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status);
421 /* We know what to expect, so we do any byte flipping required here */
422 LASSERT(reply_buffers == 4 || reply_buffers == 3 || reply_buffers == 1);
423 if (reply_buffers >= 3) {
424 struct mds_body *body;
426 body = lustre_swab_repbuf(req, 1, sizeof (*body),
427 lustre_swab_mds_body);
429 CERROR ("Can't swab mds_body\n");
433 if ((body->valid & OBD_MD_FLEASIZE) != 0) {
434 /* The eadata is opaque; just check that it is
435 * there. Eventually, obd_unpackmd() will check
437 eadata = lustre_swab_repbuf(req, 2, body->eadatasize,
439 if (eadata == NULL) {
440 CERROR ("Missing/short eadata\n");
443 /* We save the reply LOV EA in case we have to replay
444 * a create for recovery. If we didn't allocate a
445 * large enough request buffer above we need to
446 * reallocate it here to hold the actual LOV EA. */
447 if (it->it_op & IT_OPEN) {
448 if (req->rq_reqmsg->buflens[4] <
450 mdc_realloc_openmsg(req, body, size);
452 lmm = lustre_msg_buf(req->rq_reqmsg, 4,
455 memcpy(lmm, eadata, body->eadatasize);
462 EXPORT_SYMBOL(mdc_enqueue);
465 * This long block is all about fixing up the lock and request state
466 * so that it is correct as of the moment _before_ the operation was
467 * applied; that way, the VFS will think that everything is normal and
468 * call Lustre's regular VFS methods.
470 * If we're performing a creation, that means that unless the creation
471 * failed with EEXIST, we should fake up a negative dentry.
473 * For everything else, we want to lookup to succeed.
475 * One additional note: if CREATE or OPEN succeeded, we add an extra
476 * reference to the request because we need to keep it around until
477 * ll_create/ll_open gets called.
479 * The server will return to us, in it_disposition, an indication of
480 * exactly what d.lustre.it_status refers to.
482 * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
483 * otherwise if DISP_OPEN_CREATE is set, then it status is the
484 * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
485 * DISP_LOOKUP_POS will be set, indicating whether the child lookup
488 * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
491 int mdc_intent_lock(struct obd_export *exp, struct ll_uctxt *uctxt,
492 struct ll_fid *pfid, const char *name, int len,
493 void *lmm, int lmmsize,
494 struct ll_fid *cfid, struct lookup_intent *it,
495 int lookup_flags, struct ptlrpc_request **reqp,
496 ldlm_blocking_callback cb_blocking)
498 struct lustre_handle lockh;
499 struct ptlrpc_request *request;
501 struct mds_body *mds_body;
502 struct lustre_handle old_lock;
503 struct ldlm_lock *lock;
507 CDEBUG(D_DLMTRACE,"name: %.*s in inode "LPU64", intent: %s flags %#o\n",
508 len, name, pfid->id, ldlm_it2str(it->it_op), it->it_flags);
510 if (cfid && (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR)) {
511 /* We could just return 1 immediately, but since we should only
512 * be called in revalidate_it if we already have a lock, let's
514 struct ldlm_res_id res_id = { .name = { cfid->id,
516 struct lustre_handle lockh;
519 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
520 LDLM_FL_BLOCK_GRANTED, &res_id,
521 LDLM_PLAIN, NULL, LCK_PR, &lockh);
524 rc = ldlm_lock_match(exp->exp_obd->obd_namespace,
525 LDLM_FL_BLOCK_GRANTED, &res_id,
526 LDLM_PLAIN, NULL, LCK_PW, &lockh);
529 memcpy(&it->d.lustre.it_lock_handle, &lockh,
531 it->d.lustre.it_lock_mode = mode;
536 /* lookup_it may be called only after revalidate_it has run, because
537 * revalidate_it cannot return errors, only zero. Returning zero causes
538 * this call to lookup, which *can* return an error.
540 * We only want to execute the request associated with the intent one
541 * time, however, so don't send the request again. Instead, skip past
542 * this and use the request from revalidate. In this case, revalidate
543 * never dropped its reference, so the refcounts are all OK */
544 if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
545 struct mdc_op_data op_data;
546 mdc_fid2mdc_op_data(&op_data, uctxt, pfid, cfid, name, len, 0);
548 rc = mdc_enqueue(exp, LDLM_PLAIN, it, it_to_lock_mode(it),
549 &op_data, &lockh, lmm, lmmsize,
550 ldlm_completion_ast, cb_blocking, NULL);
553 memcpy(&it->d.lustre.it_lock_handle, &lockh, sizeof(lockh));
555 request = *reqp = it->d.lustre.it_data;
556 LASSERT(request != NULL);
557 LASSERT(request != LP_POISON);
559 /* If we're doing an IT_OPEN which did not result in an actual
560 * successful open, then we need to remove the bit which saves
561 * this request for unconditional replay.
563 * It's important that we do this first! Otherwise we might exit the
564 * function without doing so, and try to replay a failed create
566 if (it->it_op & IT_OPEN && request->rq_replay &&
567 (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0))
568 mdc_clear_replay_flag(request, it->d.lustre.it_status);
570 if (!it_disposition(it, DISP_IT_EXECD)) {
571 /* The server failed before it even started executing the
572 * intent, i.e. because it couldn't unpack the request. */
573 LASSERT(it->d.lustre.it_status != 0);
574 RETURN(it->d.lustre.it_status);
576 rc = it_open_error(DISP_IT_EXECD, it);
580 mds_body = lustre_msg_buf(request->rq_repmsg, 1, sizeof(*mds_body));
581 LASSERT(mds_body != NULL); /* mdc_enqueue checked */
582 LASSERT_REPSWABBED(request, 1); /* mdc_enqueue swabbed */
584 /* If we were revalidating a fid/name pair, mark the intent in
585 * case we fail and get called again from lookup */
587 it_set_disposition(it, DISP_ENQ_COMPLETE);
588 /* Also: did we find the same inode? */
589 if (memcmp(cfid, &mds_body->fid1, sizeof(*cfid)))
593 rc = it_open_error(DISP_LOOKUP_EXECD, it);
597 /* keep requests around for the multiple phases of the call
598 * this shows the DISP_XX must guarantee we make it into the call
600 if (it_disposition(it, DISP_OPEN_CREATE) &&
601 !it_open_error(DISP_OPEN_CREATE, it))
602 ptlrpc_request_addref(request); /* balanced in ll_create_node */
603 if (it_disposition(it, DISP_OPEN_OPEN) &&
604 !it_open_error(DISP_OPEN_OPEN, it))
605 ptlrpc_request_addref(request); /* balanced in ll_file_open */
607 if (it->it_op & IT_CREAT) {
608 /* XXX this belongs in ll_create_it */
609 } else if (it->it_op == IT_OPEN) {
610 LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
612 LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP));
615 /* If we already have a matching lock, then cancel the new
616 * one. We have to set the data here instead of in
617 * mdc_enqueue, because we need to use the child's inode as
618 * the l_ast_data to match, and that's not available until
619 * intent_finish has performed the iget().) */
620 lock = ldlm_handle2lock(&lockh);
622 LDLM_DEBUG(lock, "matching against this");
624 memcpy(&old_lock, &lockh, sizeof(lockh));
625 if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
626 LDLM_PLAIN, NULL, LCK_NL, &old_lock)) {
627 ldlm_lock_decref_and_cancel(&lockh,
628 it->d.lustre.it_lock_mode);
629 memcpy(&lockh, &old_lock, sizeof(old_lock));
630 memcpy(&it->d.lustre.it_lock_handle, &lockh,
634 CDEBUG(D_DENTRY, "D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
635 len, name, ldlm_it2str(it->it_op), it->d.lustre.it_status,
636 it->d.lustre.it_disposition, rc);
640 EXPORT_SYMBOL(mdc_intent_lock);