Whamcloud - gitweb
b=20478
[fs/lustre-release.git] / lustre / mdc / mdc_locks.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #ifndef EXPORT_SYMTAB
38 # define EXPORT_SYMTAB
39 #endif
40 #define DEBUG_SUBSYSTEM S_MDC
41
42 #ifdef __KERNEL__
43 # include <linux/module.h>
44 # include <linux/pagemap.h>
45 # include <linux/miscdevice.h>
46 # include <linux/init.h>
47 #else
48 # include <liblustre.h>
49 #endif
50
51 #include <lustre_acl.h>
52 #include <obd_class.h>
53 #include <lustre_dlm.h>
54 /* fid_res_name_eq() */
55 #include <lustre_fid.h>
56 #include <lprocfs_status.h>
57 #include "mdc_internal.h"
58
59 int it_disposition(struct lookup_intent *it, int flag)
60 {
61         return it->d.lustre.it_disposition & flag;
62 }
63 EXPORT_SYMBOL(it_disposition);
64
65 void it_set_disposition(struct lookup_intent *it, int flag)
66 {
67         it->d.lustre.it_disposition |= flag;
68 }
69 EXPORT_SYMBOL(it_set_disposition);
70
71 void it_clear_disposition(struct lookup_intent *it, int flag)
72 {
73         it->d.lustre.it_disposition &= ~flag;
74 }
75 EXPORT_SYMBOL(it_clear_disposition);
76
77 int it_open_error(int phase, struct lookup_intent *it)
78 {
79         if (it_disposition(it, DISP_OPEN_OPEN)) {
80                 if (phase >= DISP_OPEN_OPEN)
81                         return it->d.lustre.it_status;
82                 else
83                         return 0;
84         }
85
86         if (it_disposition(it, DISP_OPEN_CREATE)) {
87                 if (phase >= DISP_OPEN_CREATE)
88                         return it->d.lustre.it_status;
89                 else
90                         return 0;
91         }
92
93         if (it_disposition(it, DISP_LOOKUP_EXECD)) {
94                 if (phase >= DISP_LOOKUP_EXECD)
95                         return it->d.lustre.it_status;
96                 else
97                         return 0;
98         }
99
100         if (it_disposition(it, DISP_IT_EXECD)) {
101                 if (phase >= DISP_IT_EXECD)
102                         return it->d.lustre.it_status;
103                 else
104                         return 0;
105         }
106         CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
107                it->d.lustre.it_status);
108         LBUG();
109         return 0;
110 }
111 EXPORT_SYMBOL(it_open_error);
112
113 /* this must be called on a lockh that is known to have a referenced lock */
114 int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
115                       __u32 *bits)
116 {
117         struct ldlm_lock *lock;
118         ENTRY;
119
120         if(bits)
121                 *bits = 0;
122
123         if (!*lockh) {
124                 EXIT;
125                 RETURN(0);
126         }
127
128         lock = ldlm_handle2lock((struct lustre_handle *)lockh);
129
130         LASSERT(lock != NULL);
131         lock_res_and_lock(lock);
132 #ifdef __KERNEL__
133         if (lock->l_ast_data && lock->l_ast_data != data) {
134                 struct inode *new_inode = data;
135                 struct inode *old_inode = lock->l_ast_data;
136                 LASSERTF(old_inode->i_state & I_FREEING,
137                          "Found existing inode %p/%lu/%u state %lu in lock: "
138                          "setting data to %p/%lu/%u\n", old_inode,
139                          old_inode->i_ino, old_inode->i_generation,
140                          old_inode->i_state,
141                          new_inode, new_inode->i_ino, new_inode->i_generation);
142         }
143 #endif
144         lock->l_ast_data = data;
145         if (bits)
146                 *bits = lock->l_policy_data.l_inodebits.bits;
147
148         unlock_res_and_lock(lock);
149         LDLM_LOCK_PUT(lock);
150
151         RETURN(0);
152 }
153
154 ldlm_mode_t mdc_lock_match(struct obd_export *exp, int flags,
155                            const struct lu_fid *fid, ldlm_type_t type,
156                            ldlm_policy_data_t *policy, ldlm_mode_t mode,
157                            struct lustre_handle *lockh)
158 {
159         struct ldlm_res_id res_id;
160         ldlm_mode_t rc;
161         ENTRY;
162
163         fid_build_reg_res_name(fid, &res_id);
164         rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
165                              &res_id, type, policy, mode, lockh, 0);
166         RETURN(rc);
167 }
168
169 int mdc_cancel_unused(struct obd_export *exp,
170                       const struct lu_fid *fid,
171                       ldlm_policy_data_t *policy,
172                       ldlm_mode_t mode, int flags, void *opaque)
173 {
174         struct ldlm_res_id res_id;
175         struct obd_device *obd = class_exp2obd(exp);
176         int rc;
177
178         ENTRY;
179
180         fid_build_reg_res_name(fid, &res_id);
181         rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
182                                              policy, mode, flags, opaque);
183         RETURN(rc);
184 }
185
186 int mdc_change_cbdata(struct obd_export *exp,
187                       const struct lu_fid *fid,
188                       ldlm_iterator_t it, void *data)
189 {
190         struct ldlm_res_id res_id;
191         ENTRY;
192
193         fid_build_reg_res_name(fid, &res_id);
194         ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace,
195                               &res_id, it, data);
196
197         EXIT;
198         return 0;
199 }
200
201 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
202 {
203         /* Don't hold error requests for replay. */
204         if (req->rq_replay) {
205                 spin_lock(&req->rq_lock);
206                 req->rq_replay = 0;
207                 spin_unlock(&req->rq_lock);
208         }
209         if (rc && req->rq_transno != 0) {
210                 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
211                 LBUG();
212         }
213 }
214
215 /* Save a large LOV EA into the request buffer so that it is available
216  * for replay.  We don't do this in the initial request because the
217  * original request doesn't need this buffer (at most it sends just the
218  * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
219  * buffer and may also be difficult to allocate and save a very large
220  * request buffer for each open. (bug 5707)
221  *
222  * OOM here may cause recovery failure if lmm is needed (only for the
223  * original open if the MDS crashed just when this client also OOM'd)
224  * but this is incredibly unlikely, and questionable whether the client
225  * could do MDS recovery under OOM anyways... */
226 static void mdc_realloc_openmsg(struct ptlrpc_request *req,
227                                 struct mdt_body *body)
228 {
229         int     rc;
230
231         /* FIXME: remove this explicit offset. */
232         rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
233                                         body->eadatasize);
234         if (rc) {
235                 CERROR("Can't enlarge segment %d size to %d\n",
236                        DLM_INTENT_REC_OFF + 4, body->eadatasize);
237                 body->valid &= ~OBD_MD_FLEASIZE;
238                 body->eadatasize = 0;
239         }
240 }
241
242 static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
243                                                    struct lookup_intent *it,
244                                                    struct md_op_data *op_data,
245                                                    void *lmm, int lmmsize,
246                                                    void *cb_data)
247 {
248         struct ptlrpc_request *req;
249         struct obd_device     *obddev = class_exp2obd(exp);
250         struct ldlm_intent    *lit;
251         int           joinfile = !!((it->it_create_mode & M_JOIN_FILE) &&
252                                     op_data->op_data);
253         CFS_LIST_HEAD(cancels);
254         int                    count = 0;
255         int                    mode;
256         int                    rc;
257         ENTRY;
258
259         it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
260
261         /* XXX: openlock is not cancelled for cross-refs. */
262         /* If inode is known, cancel conflicting OPEN locks. */
263         if (fid_is_sane(&op_data->op_fid2)) {
264                 if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
265                         mode = LCK_CW;
266 #ifdef FMODE_EXEC
267                 else if (it->it_flags & FMODE_EXEC)
268                         mode = LCK_PR;
269 #endif
270                 else
271                         mode = LCK_CR;
272                 count = mdc_resource_get_unused(exp, &op_data->op_fid2,
273                                                 &cancels, mode,
274                                                 MDS_INODELOCK_OPEN);
275         }
276
277         /* If CREATE or JOIN_FILE, cancel parent's UPDATE lock. */
278         if (it->it_op & IT_CREAT || joinfile)
279                 mode = LCK_EX;
280         else
281                 mode = LCK_CR;
282         count += mdc_resource_get_unused(exp, &op_data->op_fid1,
283                                          &cancels, mode,
284                                          MDS_INODELOCK_UPDATE);
285
286         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
287                                    &RQF_LDLM_INTENT_OPEN);
288         if (req == NULL) {
289                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
290                 RETURN(ERR_PTR(-ENOMEM));
291         }
292
293         /* parent capability */
294         mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
295         /* child capability, reserve the size according to parent capa, it will
296          * be filled after we get the reply */
297         mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa1);
298
299         req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
300                              op_data->op_namelen + 1);
301         req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
302                              max(lmmsize, obddev->u.cli.cl_default_mds_easize));
303         if (!joinfile) {
304                 req_capsule_set_size(&req->rq_pill, &RMF_REC_JOINFILE,
305                                      RCL_CLIENT, 0);
306         }
307
308         rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
309         if (rc) {
310                 ptlrpc_request_free(req);
311                 return NULL;
312         }
313
314         if (joinfile) {
315                 __u64 head_size = *(__u64 *)op_data->op_data;
316                 mdc_join_pack(req, op_data, head_size);
317         }
318
319         spin_lock(&req->rq_lock);
320         req->rq_replay = req->rq_import->imp_replayable;
321         spin_unlock(&req->rq_lock);
322
323         /* pack the intent */
324         lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
325         lit->opc = (__u64)it->it_op;
326
327         /* pack the intended request */
328         mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
329                       lmmsize);
330
331         /* for remote client, fetch remote perm for current user */
332         if (client_is_remote(exp))
333                 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
334                                      sizeof(struct mdt_remote_perm));
335         ptlrpc_request_set_replen(req);
336         return req;
337 }
338
339 static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
340                                                      struct lookup_intent *it,
341                                                      struct md_op_data *op_data)
342 {
343         struct ptlrpc_request *req;
344         struct obd_device     *obddev = class_exp2obd(exp);
345         struct ldlm_intent    *lit;
346         int                    rc;
347         ENTRY;
348
349         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
350                                    &RQF_LDLM_INTENT_UNLINK);
351         if (req == NULL)
352                 RETURN(ERR_PTR(-ENOMEM));
353
354         mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
355         req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
356                              op_data->op_namelen + 1);
357
358         rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
359         if (rc) {
360                 ptlrpc_request_free(req);
361                 RETURN(ERR_PTR(rc));
362         }
363
364         /* pack the intent */
365         lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
366         lit->opc = (__u64)it->it_op;
367
368         /* pack the intended request */
369         mdc_unlink_pack(req, op_data);
370
371         req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
372                              obddev->u.cli.cl_max_mds_easize);
373         req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
374                              obddev->u.cli.cl_max_mds_cookiesize);
375         ptlrpc_request_set_replen(req);
376         RETURN(req);
377 }
378
379 static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
380                                                       struct lookup_intent *it,
381                                                       struct md_op_data *op_data)
382 {
383         struct ptlrpc_request *req;
384         struct obd_device     *obddev = class_exp2obd(exp);
385         obd_valid              valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
386                                        OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
387                                        OBD_MD_FLMDSCAPA | OBD_MD_MEA |
388                                        (client_is_remote(exp) ?
389                                                OBD_MD_FLRMTPERM : OBD_MD_FLACL);
390         struct ldlm_intent    *lit;
391         int                    rc;
392         ENTRY;
393
394         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
395                                    &RQF_LDLM_INTENT_GETATTR);
396         if (req == NULL)
397                 RETURN(ERR_PTR(-ENOMEM));
398
399         mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
400         req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
401                              op_data->op_namelen + 1);
402
403         rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
404         if (rc) {
405                 ptlrpc_request_free(req);
406                 RETURN(ERR_PTR(rc));
407         }
408
409         /* pack the intent */
410         lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
411         lit->opc = (__u64)it->it_op;
412
413         /* pack the intended request */
414         mdc_getattr_pack(req, valid, it->it_flags, op_data);
415
416         req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
417                              obddev->u.cli.cl_max_mds_easize);
418         if (client_is_remote(exp))
419                 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
420                                      sizeof(struct mdt_remote_perm));
421         ptlrpc_request_set_replen(req);
422         RETURN(req);
423 }
424
425 static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp)
426 {
427         struct ptlrpc_request *req;
428         int rc;
429         ENTRY;
430
431         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
432         if (req == NULL)
433                 RETURN(ERR_PTR(-ENOMEM));
434
435         rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
436         if (rc) {
437                 ptlrpc_request_free(req);
438                 RETURN(ERR_PTR(rc));
439         }
440
441         ptlrpc_request_set_replen(req);
442         RETURN(req);
443 }
444
445 static int mdc_finish_enqueue(struct obd_export *exp,
446                               struct ptlrpc_request *req,
447                               struct ldlm_enqueue_info *einfo,
448                               struct lookup_intent *it,
449                               struct lustre_handle *lockh,
450                               int rc)
451 {
452         struct req_capsule  *pill = &req->rq_pill;
453         struct ldlm_request *lockreq;
454         struct ldlm_reply   *lockrep;
455         ENTRY;
456
457         LASSERT(rc >= 0);
458         /* Similarly, if we're going to replay this request, we don't want to
459          * actually get a lock, just perform the intent. */
460         if (req->rq_transno || req->rq_replay) {
461                 lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
462                 lockreq->lock_flags |= LDLM_FL_INTENT_ONLY;
463         }
464
465         if (rc == ELDLM_LOCK_ABORTED) {
466                 einfo->ei_mode = 0;
467                 memset(lockh, 0, sizeof(*lockh));
468                 rc = 0;
469         } else { /* rc = 0 */
470                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
471                 LASSERT(lock);
472
473                 /* If the server gave us back a different lock mode, we should
474                  * fix up our variables. */
475                 if (lock->l_req_mode != einfo->ei_mode) {
476                         ldlm_lock_addref(lockh, lock->l_req_mode);
477                         ldlm_lock_decref(lockh, einfo->ei_mode);
478                         einfo->ei_mode = lock->l_req_mode;
479                 }
480                 LDLM_LOCK_PUT(lock);
481         }
482
483         lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
484         LASSERT(lockrep != NULL);                 /* checked by ldlm_cli_enqueue() */
485
486         it->d.lustre.it_disposition = (int)lockrep->lock_policy_res1;
487         it->d.lustre.it_status = (int)lockrep->lock_policy_res2;
488         it->d.lustre.it_lock_mode = einfo->ei_mode;
489         it->d.lustre.it_lock_handle = lockh->cookie;
490         it->d.lustre.it_data = req;
491
492         if (it->d.lustre.it_status < 0 && req->rq_replay)
493                 mdc_clear_replay_flag(req, it->d.lustre.it_status);
494
495         /* If we're doing an IT_OPEN which did not result in an actual
496          * successful open, then we need to remove the bit which saves
497          * this request for unconditional replay.
498          *
499          * It's important that we do this first!  Otherwise we might exit the
500          * function without doing so, and try to replay a failed create
501          * (bug 3440) */
502         if (it->it_op & IT_OPEN && req->rq_replay &&
503             (!it_disposition(it, DISP_OPEN_OPEN) ||it->d.lustre.it_status != 0))
504                 mdc_clear_replay_flag(req, it->d.lustre.it_status);
505
506         DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
507                   it->it_op,it->d.lustre.it_disposition,it->d.lustre.it_status);
508
509         /* We know what to expect, so we do any byte flipping required here */
510         if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
511                 struct mdt_body *body;
512
513                 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
514                 if (body == NULL) {
515                         CERROR ("Can't swab mdt_body\n");
516                         RETURN (-EPROTO);
517                 }
518
519                 if (it_disposition(it, DISP_OPEN_OPEN) &&
520                     !it_open_error(DISP_OPEN_OPEN, it)) {
521                         /*
522                          * If this is a successful OPEN request, we need to set
523                          * replay handler and data early, so that if replay
524                          * happens immediately after swabbing below, new reply
525                          * is swabbed by that handler correctly.
526                          */
527                         mdc_set_open_replay_data(NULL, NULL, req);
528                 }
529
530                 if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
531                         void *eadata;
532
533                         /*
534                          * The eadata is opaque; just check that it is there.
535                          * Eventually, obd_unpackmd() will check the contents.
536                          */
537                         eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
538                                                               body->eadatasize);
539                         if (eadata == NULL)
540                                 RETURN(-EPROTO);
541
542                         /*
543                          * We save the reply LOV EA in case we have to replay a
544                          * create for recovery.  If we didn't allocate a large
545                          * enough request buffer above we need to reallocate it
546                          * here to hold the actual LOV EA.
547                          *
548                          * To not save LOV EA if request is not going to replay
549                          * (for example error one).
550                          */
551                         if ((it->it_op & IT_OPEN) && req->rq_replay) {
552                                 void *lmm;
553                                 if (req_capsule_get_size(pill, &RMF_EADATA,
554                                                          RCL_CLIENT) <
555                                     body->eadatasize) {
556                                         mdc_realloc_openmsg(req, body);
557                                         req_capsule_set_size(pill, &RMF_EADATA,
558                                                              RCL_CLIENT,
559                                                              body->eadatasize);
560                                 }
561                                 lmm = req_capsule_client_get(pill, &RMF_EADATA);
562                                 if (lmm)
563                                         memcpy(lmm, eadata, body->eadatasize);
564                         }
565                 }
566
567                 if (body->valid & OBD_MD_FLRMTPERM) {
568                         struct mdt_remote_perm *perm;
569
570                         LASSERT(client_is_remote(exp));
571                         perm = req_capsule_server_swab_get(pill, &RMF_ACL,
572                                                 lustre_swab_mdt_remote_perm);
573                         if (perm == NULL)
574                                 RETURN(-EPROTO);
575                 }
576                 if (body->valid & OBD_MD_FLMDSCAPA) {
577                         struct lustre_capa *capa, *p;
578
579                         capa = req_capsule_server_get(pill, &RMF_CAPA1);
580                         if (capa == NULL)
581                                 RETURN(-EPROTO);
582
583                         if (it->it_op & IT_OPEN) {
584                                 /* client fid capa will be checked in replay */
585                                 p = req_capsule_client_get(pill, &RMF_CAPA2);
586                                 LASSERT(p);
587                                 *p = *capa;
588                         }
589                 }
590                 if (body->valid & OBD_MD_FLOSSCAPA) {
591                         struct lustre_capa *capa;
592
593                         capa = req_capsule_server_get(pill, &RMF_CAPA2);
594                         if (capa == NULL)
595                                 RETURN(-EPROTO);
596                 }
597         }
598
599         RETURN(rc);
600 }
601
602 /* We always reserve enough space in the reply packet for a stripe MD, because
603  * we don't know in advance the file type. */
604 int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
605                 struct lookup_intent *it, struct md_op_data *op_data,
606                 struct lustre_handle *lockh, void *lmm, int lmmsize,
607                 struct ptlrpc_request **reqp, int extra_lock_flags)
608 {
609         struct obd_device     *obddev = class_exp2obd(exp);
610         struct ptlrpc_request *req = NULL;
611         struct req_capsule    *pill;
612         int                    flags = extra_lock_flags;
613         int                    rc;
614         struct ldlm_res_id res_id;
615         ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
616         ENTRY;
617
618         LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
619                  einfo->ei_type);
620
621         fid_build_reg_res_name(&op_data->op_fid1, &res_id);
622
623         if (it)
624                 flags |= LDLM_FL_HAS_INTENT;
625         if (it && it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR))
626                 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
627
628         if (reqp)
629                 req = *reqp;
630
631         if (!it) {
632                 /* The only way right now is FLOCK, in this case we hide flock
633                    policy as lmm, but lmmsize is 0 */
634                 LASSERT(lmm && lmmsize == 0);
635                 LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n",
636                          einfo->ei_type);
637                 policy = *(ldlm_policy_data_t *)lmm;
638                 res_id.name[3] = LDLM_FLOCK;
639         } else if (it->it_op & IT_OPEN) {
640                 int joinfile = !!((it->it_create_mode & M_JOIN_FILE) &&
641                                               op_data->op_data);
642
643                 req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize,
644                                            einfo->ei_cbdata);
645                 if (!joinfile) {
646                         policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
647                         einfo->ei_cbdata = NULL;
648                         lmm = NULL;
649                 } else
650                         it->it_create_mode &= ~M_JOIN_FILE;
651         } else if (it->it_op & IT_UNLINK)
652                 req = mdc_intent_unlink_pack(exp, it, op_data);
653         else if (it->it_op & (IT_GETATTR | IT_LOOKUP))
654                 req = mdc_intent_getattr_pack(exp, it, op_data);
655         else if (it->it_op == IT_READDIR)
656                 req = ldlm_enqueue_pack(exp);
657         else {
658                 LBUG();
659                 RETURN(-EINVAL);
660         }
661
662         if (IS_ERR(req))
663                 RETURN(PTR_ERR(req));
664         pill = &req->rq_pill;
665
666         /* It is important to obtain rpc_lock first (if applicable), so that
667          * threads that are serialised with rpc_lock are not polluting our
668          * rpcs in flight counter. We do not do flock request limiting, though*/
669         if (it) {
670                 mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
671                 mdc_enter_request(&obddev->u.cli);
672         }
673         rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
674                               0, NULL, lockh, 0);
675         if (reqp)
676                 *reqp = req;
677
678         if (it) {
679                 mdc_exit_request(&obddev->u.cli);
680                 mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
681         }
682         if (!it) {
683                 /* For flock requests we immediatelly return without further
684                    delay and let caller deal with the rest, since rest of
685                    this function metadata processing makes no sense for flock
686                    requests anyway */
687                 RETURN(rc);
688         }
689
690         if (rc < 0) {
691                 CERROR("ldlm_cli_enqueue: %d\n", rc);
692                 mdc_clear_replay_flag(req, rc);
693                 ptlrpc_req_finished(req);
694                 RETURN(rc);
695         }
696         rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
697
698         RETURN(rc);
699 }
700
701 static int mdc_finish_intent_lock(struct obd_export *exp,
702                                   struct ptlrpc_request *request,
703                                   struct md_op_data *op_data,
704                                   struct lookup_intent *it,
705                                   struct lustre_handle *lockh)
706 {
707         struct lustre_handle old_lock;
708         struct mdt_body *mdt_body;
709         struct ldlm_lock *lock;
710         int rc;
711
712
713         LASSERT(request != NULL);
714         LASSERT(request != LP_POISON);
715         LASSERT(request->rq_repmsg != LP_POISON);
716
717         if (!it_disposition(it, DISP_IT_EXECD)) {
718                 /* The server failed before it even started executing the
719                  * intent, i.e. because it couldn't unpack the request. */
720                 LASSERT(it->d.lustre.it_status != 0);
721                 RETURN(it->d.lustre.it_status);
722         }
723         rc = it_open_error(DISP_IT_EXECD, it);
724         if (rc)
725                 RETURN(rc);
726
727         mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
728         LASSERT(mdt_body != NULL);      /* mdc_enqueue checked */
729
730         /* If we were revalidating a fid/name pair, mark the intent in
731          * case we fail and get called again from lookup */
732         if (fid_is_sane(&op_data->op_fid2) &&
733             it->it_create_mode & M_CHECK_STALE &&
734             it->it_op != IT_GETATTR) {
735                 it_set_disposition(it, DISP_ENQ_COMPLETE);
736
737                 /* Also: did we find the same inode? */
738                 /* sever can return one of two fids:
739                  * op_fid2 - new allocated fid - if file is created.
740                  * op_fid3 - existent fid - if file only open.
741                  * op_fid3 is saved in lmv_intent_open */
742                 if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) &&
743                     (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) {
744                         CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID
745                                "\n", PFID(&op_data->op_fid2),
746                                PFID(&op_data->op_fid2), PFID(&mdt_body->fid1));
747                         RETURN(-ESTALE);
748                 }
749         }
750
751         rc = it_open_error(DISP_LOOKUP_EXECD, it);
752         if (rc)
753                 RETURN(rc);
754
755         /* keep requests around for the multiple phases of the call
756          * this shows the DISP_XX must guarantee we make it into the call
757          */
758         if (!it_disposition(it, DISP_ENQ_CREATE_REF) &&
759             it_disposition(it, DISP_OPEN_CREATE) &&
760             !it_open_error(DISP_OPEN_CREATE, it)) {
761                 it_set_disposition(it, DISP_ENQ_CREATE_REF);
762                 ptlrpc_request_addref(request); /* balanced in ll_create_node */
763         }
764         if (!it_disposition(it, DISP_ENQ_OPEN_REF) &&
765             it_disposition(it, DISP_OPEN_OPEN) &&
766             !it_open_error(DISP_OPEN_OPEN, it)) {
767                 it_set_disposition(it, DISP_ENQ_OPEN_REF);
768                 ptlrpc_request_addref(request); /* balanced in ll_file_open */
769                 /* BUG 11546 - eviction in the middle of open rpc processing */
770                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
771         }
772
773         if (it->it_op & IT_CREAT) {
774                 /* XXX this belongs in ll_create_it */
775         } else if (it->it_op == IT_OPEN) {
776                 LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
777         } else {
778                 LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP));
779         }
780
781         /* If we already have a matching lock, then cancel the new
782          * one.  We have to set the data here instead of in
783          * mdc_enqueue, because we need to use the child's inode as
784          * the l_ast_data to match, and that's not available until
785          * intent_finish has performed the iget().) */
786         lock = ldlm_handle2lock(lockh);
787         if (lock) {
788                 ldlm_policy_data_t policy = lock->l_policy_data;
789                 LDLM_DEBUG(lock, "matching against this");
790
791                 LASSERTF(fid_res_name_eq(&mdt_body->fid1,
792                                          &lock->l_resource->lr_name),
793                          "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
794                          (unsigned long)lock->l_resource->lr_name.name[0],
795                          (unsigned long)lock->l_resource->lr_name.name[1],
796                          (unsigned long)lock->l_resource->lr_name.name[2],
797                          (unsigned long)fid_seq(&mdt_body->fid1),
798                          (unsigned long)fid_oid(&mdt_body->fid1),
799                          (unsigned long)fid_ver(&mdt_body->fid1));
800                 LDLM_LOCK_PUT(lock);
801
802                 memcpy(&old_lock, lockh, sizeof(*lockh));
803                 if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
804                                     LDLM_IBITS, &policy, LCK_NL, &old_lock, 0)) {
805                         ldlm_lock_decref_and_cancel(lockh,
806                                                     it->d.lustre.it_lock_mode);
807                         memcpy(lockh, &old_lock, sizeof(old_lock));
808                         it->d.lustre.it_lock_handle = lockh->cookie;
809                 }
810         }
811         CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
812                op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
813                it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
814         RETURN(rc);
815 }
816
817 /*
818  * This long block is all about fixing up the lock and request state
819  * so that it is correct as of the moment _before_ the operation was
820  * applied; that way, the VFS will think that everything is normal and
821  * call Lustre's regular VFS methods.
822  *
823  * If we're performing a creation, that means that unless the creation
824  * failed with EEXIST, we should fake up a negative dentry.
825  *
826  * For everything else, we want to lookup to succeed.
827  *
828  * One additional note: if CREATE or OPEN succeeded, we add an extra
829  * reference to the request because we need to keep it around until
830  * ll_create/ll_open gets called.
831  *
832  * The server will return to us, in it_disposition, an indication of
833  * exactly what d.lustre.it_status refers to.
834  *
835  * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
836  * otherwise if DISP_OPEN_CREATE is set, then it status is the
837  * creation failure mode.  In either case, one of DISP_LOOKUP_NEG or
838  * DISP_LOOKUP_POS will be set, indicating whether the child lookup
839  * was successful.
840  *
841  * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
842  * child lookup.
843  */
844 int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
845                     void *lmm, int lmmsize, struct lookup_intent *it,
846                     int lookup_flags, struct ptlrpc_request **reqp,
847                     ldlm_blocking_callback cb_blocking,
848                     int extra_lock_flags)
849 {
850         struct lustre_handle lockh;
851         int rc = 0;
852         ENTRY;
853         LASSERT(it);
854
855         CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
856                ", intent: %s flags %#o\n", op_data->op_namelen,
857                op_data->op_name, PFID(&op_data->op_fid2),
858                PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
859                it->it_flags);
860
861         lockh.cookie = 0;
862         if (fid_is_sane(&op_data->op_fid2) &&
863             (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
864                 /* We could just return 1 immediately, but since we should only
865                  * be called in revalidate_it if we already have a lock, let's
866                  * verify that. */
867                 ldlm_policy_data_t policy;
868                 ldlm_mode_t mode;
869
870                 /* As not all attributes are kept under update lock, e.g.
871                    owner/group/acls are under lookup lock, we need both
872                    ibits for GETATTR. */
873
874                 /* For CMD, UPDATE lock and LOOKUP lock can not be got
875                  * at the same for cross-object, so we can not match
876                  * the 2 lock at the same time FIXME: but how to handle
877                  * the above situation */
878                 policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
879                         MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
880
881                 mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED,
882                                       &op_data->op_fid2, LDLM_IBITS, &policy,
883                                       LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh);
884                 if (mode) {
885                         it->d.lustre.it_lock_handle = lockh.cookie;
886                         it->d.lustre.it_lock_mode = mode;
887                 }
888
889                 /* Only return failure if it was not GETATTR by cfid
890                    (from inode_revalidate) */
891                 if (mode || op_data->op_namelen != 0)
892                         RETURN(!!mode);
893         }
894
895         /* lookup_it may be called only after revalidate_it has run, because
896          * revalidate_it cannot return errors, only zero.  Returning zero causes
897          * this call to lookup, which *can* return an error.
898          *
899          * We only want to execute the request associated with the intent one
900          * time, however, so don't send the request again.  Instead, skip past
901          * this and use the request from revalidate.  In this case, revalidate
902          * never dropped its reference, so the refcounts are all OK */
903         if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
904                 struct ldlm_enqueue_info einfo =
905                         { LDLM_IBITS, it_to_lock_mode(it), cb_blocking,
906                           ldlm_completion_ast, NULL, NULL, NULL };
907
908                 /* For case if upper layer did not alloc fid, do it now. */
909                 if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
910                         rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
911                         if (rc < 0) {
912                                 CERROR("Can't alloc new fid, rc %d\n", rc);
913                                 RETURN(rc);
914                         }
915                 }
916                 rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
917                                  lmm, lmmsize, NULL, extra_lock_flags);
918                 if (rc < 0)
919                         RETURN(rc);
920         } else if (!fid_is_sane(&op_data->op_fid2) ||
921                    !(it->it_create_mode & M_CHECK_STALE)) {
922                 /* DISP_ENQ_COMPLETE set means there is extra reference on
923                  * request referenced from this intent, saved for subsequent
924                  * lookup.  This path is executed when we proceed to this
925                  * lookup, so we clear DISP_ENQ_COMPLETE */
926                 it_clear_disposition(it, DISP_ENQ_COMPLETE);
927         }
928         *reqp = it->d.lustre.it_data;
929         rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
930         RETURN(rc);
931 }
932
933 static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
934                                               struct ptlrpc_request *req,
935                                               void *unused, int rc)
936 {
937         struct obd_export        *exp = req->rq_async_args.pointer_arg[0];
938         struct md_enqueue_info   *minfo = req->rq_async_args.pointer_arg[1];
939         struct ldlm_enqueue_info *einfo = req->rq_async_args.pointer_arg[2];
940         struct lookup_intent     *it;
941         struct lustre_handle     *lockh;
942         struct obd_device        *obddev;
943         int                       flags = LDLM_FL_HAS_INTENT;
944         ENTRY;
945
946         it    = &minfo->mi_it;
947         lockh = &minfo->mi_lockh;
948
949         obddev = class_exp2obd(exp);
950
951         mdc_exit_request(&obddev->u.cli);
952         if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE))
953                 rc = -ETIMEDOUT;
954
955         rc = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, 1, einfo->ei_mode,
956                                    &flags, NULL, 0, NULL, lockh, rc);
957         if (rc < 0) {
958                 CERROR("ldlm_cli_enqueue_fini: %d\n", rc);
959                 mdc_clear_replay_flag(req, rc);
960                 GOTO(out, rc);
961         }
962
963         rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
964         if (rc)
965                 GOTO(out, rc);
966
967         rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh);
968         EXIT;
969
970 out:
971         OBD_FREE_PTR(einfo);
972         minfo->mi_cb(req, minfo, rc);
973         return 0;
974 }
975
976 int mdc_intent_getattr_async(struct obd_export *exp,
977                              struct md_enqueue_info *minfo,
978                              struct ldlm_enqueue_info *einfo)
979 {
980         struct md_op_data       *op_data = &minfo->mi_data;
981         struct lookup_intent    *it = &minfo->mi_it;
982         struct ptlrpc_request   *req;
983         struct obd_device       *obddev = class_exp2obd(exp);
984         struct ldlm_res_id       res_id;
985         ldlm_policy_data_t       policy = {
986                                         .l_inodebits = { MDS_INODELOCK_LOOKUP }
987                                  };
988         int                      rc;
989         int                      flags = LDLM_FL_HAS_INTENT;
990         ENTRY;
991
992         CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n",
993                op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
994                ldlm_it2str(it->it_op), it->it_flags);
995
996         fid_build_reg_res_name(&op_data->op_fid1, &res_id);
997         req = mdc_intent_getattr_pack(exp, it, op_data);
998         if (!req)
999                 RETURN(-ENOMEM);
1000
1001         mdc_enter_request(&obddev->u.cli);
1002         rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
1003                               0, NULL, &minfo->mi_lockh, 1);
1004         if (rc < 0) {
1005                 mdc_exit_request(&obddev->u.cli);
1006                 RETURN(rc);
1007         }
1008
1009         req->rq_async_args.pointer_arg[0] = exp;
1010         req->rq_async_args.pointer_arg[1] = minfo;
1011         req->rq_async_args.pointer_arg[2] = einfo;
1012         req->rq_interpret_reply = mdc_intent_getattr_async_interpret;
1013         ptlrpcd_add_req(req, PSCOPE_OTHER);
1014
1015         RETURN(0);
1016 }
1017
1018 int mdc_revalidate_lock(struct obd_export *exp,
1019                         struct lookup_intent *it,
1020                         struct lu_fid *fid)
1021 {
1022         /* We could just return 1 immediately, but since we should only
1023          * be called in revalidate_it if we already have a lock, let's
1024          * verify that. */
1025         struct ldlm_res_id res_id;
1026         struct lustre_handle lockh;
1027         ldlm_policy_data_t policy;
1028         ldlm_mode_t mode;
1029         ENTRY;
1030
1031         fid_build_reg_res_name(fid, &res_id);
1032         /* As not all attributes are kept under update lock, e.g.
1033            owner/group/acls are under lookup lock, we need both
1034            ibits for GETATTR. */
1035         policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
1036                 MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP :
1037                 MDS_INODELOCK_LOOKUP;
1038
1039         mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
1040                                LDLM_FL_BLOCK_GRANTED, &res_id, LDLM_IBITS,
1041                                &policy, LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0);
1042         if (mode) {
1043                 it->d.lustre.it_lock_handle = lockh.cookie;
1044                 it->d.lustre.it_lock_mode = mode;
1045         }
1046
1047         RETURN(!!mode);
1048 }