Whamcloud - gitweb
d6f2e98a5c7ca2ef378a1dfcedec7e02321a1fb6
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71 #include <lustre_linkea.h>
72
73 mdl_mode_t mdt_mdl_lock_modes[] = {
74         [LCK_MINMODE] = MDL_MINMODE,
75         [LCK_EX]      = MDL_EX,
76         [LCK_PW]      = MDL_PW,
77         [LCK_PR]      = MDL_PR,
78         [LCK_CW]      = MDL_CW,
79         [LCK_CR]      = MDL_CR,
80         [LCK_NL]      = MDL_NL,
81         [LCK_GROUP]   = MDL_GROUP
82 };
83
84 ldlm_mode_t mdt_dlm_lock_modes[] = {
85         [MDL_MINMODE] = LCK_MINMODE,
86         [MDL_EX]      = LCK_EX,
87         [MDL_PW]      = LCK_PW,
88         [MDL_PR]      = LCK_PR,
89         [MDL_CW]      = LCK_CW,
90         [MDL_CR]      = LCK_CR,
91         [MDL_NL]      = LCK_NL,
92         [MDL_GROUP]   = LCK_GROUP
93 };
94
95 static struct mdt_device *mdt_dev(struct lu_device *d);
96 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
97
98 static const struct lu_object_operations mdt_obj_ops;
99
100 /* Slab for MDT object allocation */
101 static cfs_mem_cache_t *mdt_object_kmem;
102
103 static struct lu_kmem_descr mdt_caches[] = {
104         {
105                 .ckd_cache = &mdt_object_kmem,
106                 .ckd_name  = "mdt_obj",
107                 .ckd_size  = sizeof(struct mdt_object)
108         },
109         {
110                 .ckd_cache = NULL
111         }
112 };
113
114 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
115 {
116         if (!rep)
117                 return 0;
118         return (rep->lock_policy_res1 & flag);
119 }
120
121 void mdt_clear_disposition(struct mdt_thread_info *info,
122                            struct ldlm_reply *rep, int flag)
123 {
124         if (info)
125                 info->mti_opdata &= ~flag;
126         if (rep)
127                 rep->lock_policy_res1 &= ~flag;
128 }
129
130 void mdt_set_disposition(struct mdt_thread_info *info,
131                          struct ldlm_reply *rep, int flag)
132 {
133         if (info)
134                 info->mti_opdata |= flag;
135         if (rep)
136                 rep->lock_policy_res1 |= flag;
137 }
138
139 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
140 {
141         lh->mlh_pdo_hash = 0;
142         lh->mlh_reg_mode = lm;
143         lh->mlh_rreg_mode = lm;
144         lh->mlh_type = MDT_REG_LOCK;
145 }
146
147 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
148                        const char *name, int namelen)
149 {
150         lh->mlh_reg_mode = lm;
151         lh->mlh_rreg_mode = lm;
152         lh->mlh_type = MDT_PDO_LOCK;
153
154         if (name != NULL && (name[0] != '\0')) {
155                 LASSERT(namelen > 0);
156                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
157                 /* XXX Workaround for LU-2856
158                  * Zero is a valid return value of full_name_hash, but several
159                  * users of mlh_pdo_hash assume a non-zero hash value. We
160                  * therefore map zero onto an arbitrary, but consistent
161                  * value (1) to avoid problems further down the road. */
162                 if (unlikely(!lh->mlh_pdo_hash))
163                         lh->mlh_pdo_hash = 1;
164         } else {
165                 LASSERT(namelen == 0);
166                 lh->mlh_pdo_hash = 0ull;
167         }
168 }
169
170 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
171                               struct mdt_lock_handle *lh)
172 {
173         mdl_mode_t mode;
174         ENTRY;
175
176         /*
177          * Any dir access needs couple of locks:
178          *
179          * 1) on part of dir we gonna take lookup/modify;
180          *
181          * 2) on whole dir to protect it from concurrent splitting and/or to
182          * flush client's cache for readdir().
183          *
184          * so, for a given mode and object this routine decides what lock mode
185          * to use for lock #2:
186          *
187          * 1) if caller's gonna lookup in dir then we need to protect dir from
188          * being splitted only - LCK_CR
189          *
190          * 2) if caller's gonna modify dir then we need to protect dir from
191          * being splitted and to flush cache - LCK_CW
192          *
193          * 3) if caller's gonna modify dir and that dir seems ready for
194          * splitting then we need to protect it from any type of access
195          * (lookup/modify/split) - LCK_EX --bzzz
196          */
197
198         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
199         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
200
201         /*
202          * Ask underlaying level its opinion about preferable PDO lock mode
203          * having access type passed as regular lock mode:
204          *
205          * - MDL_MINMODE means that lower layer does not want to specify lock
206          * mode;
207          *
208          * - MDL_NL means that no PDO lock should be taken. This is used in some
209          * cases. Say, for non-splittable directories no need to use PDO locks
210          * at all.
211          */
212         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
213                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
214
215         if (mode != MDL_MINMODE) {
216                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
217         } else {
218                 /*
219                  * Lower layer does not want to specify locking mode. We do it
220                  * our selves. No special protection is needed, just flush
221                  * client's cache on modification and allow concurrent
222                  * mondification.
223                  */
224                 switch (lh->mlh_reg_mode) {
225                 case LCK_EX:
226                         lh->mlh_pdo_mode = LCK_EX;
227                         break;
228                 case LCK_PR:
229                         lh->mlh_pdo_mode = LCK_CR;
230                         break;
231                 case LCK_PW:
232                         lh->mlh_pdo_mode = LCK_CW;
233                         break;
234                 default:
235                         CERROR("Not expected lock type (0x%x)\n",
236                                (int)lh->mlh_reg_mode);
237                         LBUG();
238                 }
239         }
240
241         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
242         EXIT;
243 }
244
245 int mdt_getstatus(struct mdt_thread_info *info)
246 {
247         struct mdt_device       *mdt  = info->mti_mdt;
248         struct mdt_body         *repbody;
249         int                     rc;
250         ENTRY;
251
252         rc = mdt_check_ucred(info);
253         if (rc)
254                 RETURN(err_serious(rc));
255
256         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
257                 RETURN(err_serious(-ENOMEM));
258
259         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
260         repbody->fid1 = mdt->mdt_md_root_fid;
261         repbody->valid |= OBD_MD_FLID;
262
263         if (mdt->mdt_opts.mo_mds_capa &&
264             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
265                 struct mdt_object  *root;
266                 struct lustre_capa *capa;
267
268                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
269                 if (IS_ERR(root))
270                         RETURN(PTR_ERR(root));
271
272                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
273                 LASSERT(capa);
274                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
275                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
276                                  0);
277                 mdt_object_put(info->mti_env, root);
278                 if (rc == 0)
279                         repbody->valid |= OBD_MD_FLMDSCAPA;
280         }
281
282         RETURN(rc);
283 }
284
285 int mdt_statfs(struct mdt_thread_info *info)
286 {
287         struct ptlrpc_request           *req = mdt_info_req(info);
288         struct md_device                *next = info->mti_mdt->mdt_child;
289         struct ptlrpc_service_part      *svcpt;
290         struct obd_statfs               *osfs;
291         int                             rc;
292
293         ENTRY;
294
295         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
296
297         /* This will trigger a watchdog timeout */
298         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
299                          (MDT_SERVICE_WATCHDOG_FACTOR *
300                           at_get(&svcpt->scp_at_estimate)) + 1);
301
302         rc = mdt_check_ucred(info);
303         if (rc)
304                 RETURN(err_serious(rc));
305
306         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
307                 RETURN(err_serious(-ENOMEM));
308
309         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
310         if (!osfs)
311                 RETURN(-EPROTO);
312
313         /** statfs information are cached in the mdt_device */
314         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
315                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
316                 /** statfs data is too old, get up-to-date one */
317                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
318                 if (rc)
319                         RETURN(rc);
320                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
321                 info->mti_mdt->mdt_osfs = *osfs;
322                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
323                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
324         } else {
325                 /** use cached statfs data */
326                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
327                 *osfs = info->mti_mdt->mdt_osfs;
328                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
329         }
330
331         if (rc == 0)
332                 mdt_counter_incr(req, LPROC_MDT_STATFS);
333
334         RETURN(rc);
335 }
336
337 /**
338  * Pack SOM attributes into the reply.
339  * Call under a DLM UPDATE lock.
340  */
341 static void mdt_pack_size2body(struct mdt_thread_info *info,
342                                struct mdt_object *mo)
343 {
344         struct mdt_body *b;
345         struct md_attr *ma = &info->mti_attr;
346
347         LASSERT(ma->ma_attr.la_valid & LA_MODE);
348         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
349
350         /* Check if Size-on-MDS is supported, if this is a regular file,
351          * if SOM is enabled on the object and if SOM cache exists and valid.
352          * Otherwise do not pack Size-on-MDS attributes to the reply. */
353         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
354             !S_ISREG(ma->ma_attr.la_mode) ||
355             !mdt_object_is_som_enabled(mo) ||
356             !(ma->ma_valid & MA_SOM))
357                 return;
358
359         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
360         b->size = ma->ma_som->msd_size;
361         b->blocks = ma->ma_som->msd_blocks;
362 }
363
364 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
365                         const struct lu_attr *attr, const struct lu_fid *fid)
366 {
367         struct md_attr *ma = &info->mti_attr;
368
369         LASSERT(ma->ma_valid & MA_INODE);
370
371         b->atime      = attr->la_atime;
372         b->mtime      = attr->la_mtime;
373         b->ctime      = attr->la_ctime;
374         b->mode       = attr->la_mode;
375         b->size       = attr->la_size;
376         b->blocks     = attr->la_blocks;
377         b->uid        = attr->la_uid;
378         b->gid        = attr->la_gid;
379         b->flags      = attr->la_flags;
380         b->nlink      = attr->la_nlink;
381         b->rdev       = attr->la_rdev;
382
383         /*XXX should pack the reply body according to lu_valid*/
384         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
385                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
386                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
387                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
388
389         if (!S_ISREG(attr->la_mode)) {
390                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
391         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
392                 /* means no objects are allocated on osts. */
393                 LASSERT(!(ma->ma_valid & MA_LOV));
394                 /* just ignore blocks occupied by extend attributes on MDS */
395                 b->blocks = 0;
396                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
397                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
398         }
399
400         if (fid) {
401                 b->fid1 = *fid;
402                 b->valid |= OBD_MD_FLID;
403                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
404                                 PFID(fid), b->nlink, b->mode, b->size);
405         }
406
407         if (info)
408                 mdt_body_reverse_idmap(info, b);
409
410         if (b->valid & OBD_MD_FLSIZE)
411                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
412                        PFID(fid), (unsigned long long)b->size);
413 }
414
415 static inline int mdt_body_has_lov(const struct lu_attr *la,
416                                    const struct mdt_body *body)
417 {
418         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
419                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
420 }
421
422 void mdt_client_compatibility(struct mdt_thread_info *info)
423 {
424         struct mdt_body       *body;
425         struct ptlrpc_request *req = mdt_info_req(info);
426         struct obd_export     *exp = req->rq_export;
427         struct md_attr        *ma = &info->mti_attr;
428         struct lu_attr        *la = &ma->ma_attr;
429         ENTRY;
430
431         if (exp_connect_layout(exp))
432                 /* the client can deal with 16-bit lmm_stripe_count */
433                 RETURN_EXIT;
434
435         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
436
437         if (!mdt_body_has_lov(la, body))
438                 RETURN_EXIT;
439
440         /* now we have a reply with a lov for a client not compatible with the
441          * layout lock so we have to clean the layout generation number */
442         if (S_ISREG(la->la_mode))
443                 ma->ma_lmm->lmm_layout_gen = 0;
444         EXIT;
445 }
446
447 static int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
448                              char *name)
449 {
450         const struct lu_env *env = info->mti_env;
451         int rc;
452         ENTRY;
453
454         LASSERT(info->mti_big_lmm_used == 0);
455         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
456         if (rc < 0)
457                 RETURN(rc);
458
459         /* big_lmm may need to be grown */
460         if (info->mti_big_lmmsize < rc) {
461                 int size = size_roundup_power2(rc);
462
463                 if (info->mti_big_lmmsize > 0) {
464                         /* free old buffer */
465                         LASSERT(info->mti_big_lmm);
466                         OBD_FREE_LARGE(info->mti_big_lmm,
467                                        info->mti_big_lmmsize);
468                         info->mti_big_lmm = NULL;
469                         info->mti_big_lmmsize = 0;
470                 }
471
472                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
473                 if (info->mti_big_lmm == NULL)
474                         RETURN(-ENOMEM);
475                 info->mti_big_lmmsize = size;
476         }
477         LASSERT(info->mti_big_lmmsize >= rc);
478
479         info->mti_buf.lb_buf = info->mti_big_lmm;
480         info->mti_buf.lb_len = info->mti_big_lmmsize;
481         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
482
483         RETURN(rc);
484 }
485
486 int mdt_attr_get_lov(struct mdt_thread_info *info,
487                      struct mdt_object *o, struct md_attr *ma)
488 {
489         struct md_object *next = mdt_object_child(o);
490         struct lu_buf    *buf = &info->mti_buf;
491         int rc;
492
493         buf->lb_buf = ma->ma_lmm;
494         buf->lb_len = ma->ma_lmm_size;
495         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
496         if (rc > 0) {
497                 ma->ma_lmm_size = rc;
498                 ma->ma_valid |= MA_LOV;
499                 rc = 0;
500         } else if (rc == -ENODATA) {
501                 /* no LOV EA */
502                 rc = 0;
503         } else if (rc == -ERANGE) {
504                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LOV);
505                 if (rc > 0) {
506                         info->mti_big_lmm_used = 1;
507                         ma->ma_valid |= MA_LOV;
508                         ma->ma_lmm = info->mti_big_lmm;
509                         ma->ma_lmm_size = rc;
510                         /* update mdt_max_mdsize so all clients
511                          * will be aware about that */
512                         if (info->mti_mdt->mdt_max_mdsize < rc)
513                                 info->mti_mdt->mdt_max_mdsize = rc;
514                         rc = 0;
515                 }
516         }
517
518         return rc;
519 }
520
521 int mdt_attr_get_pfid(struct mdt_thread_info *info,
522                       struct mdt_object *o, struct lu_fid *pfid)
523 {
524         struct lu_buf           *buf = &info->mti_buf;
525         struct link_ea_header   *leh;
526         struct link_ea_entry    *lee;
527         int                      rc;
528         ENTRY;
529
530         buf->lb_buf = info->mti_big_lmm;
531         buf->lb_len = info->mti_big_lmmsize;
532         rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
533                           buf, XATTR_NAME_LINK);
534         /* ignore errors, MA_PFID won't be set and it is
535          * up to the caller to treat this as an error */
536         if (rc == -ERANGE || buf->lb_len == 0) {
537                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
538                 buf->lb_buf = info->mti_big_lmm;
539                 buf->lb_len = info->mti_big_lmmsize;
540         }
541
542         if (rc < 0)
543                 RETURN(rc);
544         if (rc < sizeof(*leh)) {
545                 CERROR("short LinkEA on "DFID": rc = %d\n",
546                        PFID(mdt_object_fid(o)), rc);
547                 RETURN(-ENODATA);
548         }
549
550         leh = (struct link_ea_header *) buf->lb_buf;
551         lee = (struct link_ea_entry *)(leh + 1);
552         if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
553                 leh->leh_magic = LINK_EA_MAGIC;
554                 leh->leh_reccount = __swab32(leh->leh_reccount);
555                 leh->leh_len = __swab64(leh->leh_len);
556         }
557         if (leh->leh_magic != LINK_EA_MAGIC)
558                 RETURN(-EINVAL);
559         if (leh->leh_reccount == 0)
560                 RETURN(-ENODATA);
561
562         memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
563         fid_be_to_cpu(pfid, pfid);
564
565         RETURN(0);
566 }
567
568 int mdt_attr_get_complex(struct mdt_thread_info *info,
569                          struct mdt_object *o, struct md_attr *ma)
570 {
571         const struct lu_env *env = info->mti_env;
572         struct md_object    *next = mdt_object_child(o);
573         struct lu_buf       *buf = &info->mti_buf;
574         u32                  mode = lu_object_attr(&next->mo_lu);
575         int                  need = ma->ma_need;
576         int                  rc = 0, rc2;
577         ENTRY;
578
579         ma->ma_valid = 0;
580
581         if (need & MA_INODE) {
582                 ma->ma_need = MA_INODE;
583                 rc = mo_attr_get(env, next, ma);
584                 if (rc)
585                         GOTO(out, rc);
586                 ma->ma_valid |= MA_INODE;
587         }
588
589         if (need & MA_PFID) {
590                 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
591                 if (rc == 0)
592                         ma->ma_valid |= MA_PFID;
593                 /* ignore this error, parent fid is not mandatory */
594                 rc = 0;
595         }
596
597         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
598                 rc = mdt_attr_get_lov(info, o, ma);
599                 if (rc)
600                         GOTO(out, rc);
601         }
602
603         if (need & MA_LMV && S_ISDIR(mode)) {
604                 buf->lb_buf = ma->ma_lmv;
605                 buf->lb_len = ma->ma_lmv_size;
606                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
607                 if (rc2 > 0) {
608                         ma->ma_lmv_size = rc2;
609                         ma->ma_valid |= MA_LMV;
610                 } else if (rc2 == -ENODATA) {
611                         /* no LMV EA */
612                         ma->ma_lmv_size = 0;
613                 } else
614                         GOTO(out, rc = rc2);
615         }
616
617         if (need & MA_SOM && S_ISREG(mode)) {
618                 buf->lb_buf = info->mti_xattr_buf;
619                 buf->lb_len = sizeof(info->mti_xattr_buf);
620                 CLASSERT(sizeof(struct som_attrs) <=
621                          sizeof(info->mti_xattr_buf));
622                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_SOM);
623                 rc2 = lustre_buf2som(info->mti_xattr_buf, rc2, ma->ma_som);
624                 if (rc2 == 0)
625                         ma->ma_valid |= MA_SOM;
626                 else if (rc2 < 0 && rc2 != -ENODATA)
627                         GOTO(out, rc = rc2);
628         }
629
630         if (need & MA_HSM && S_ISREG(mode)) {
631                 buf->lb_buf = info->mti_xattr_buf;
632                 buf->lb_len = sizeof(info->mti_xattr_buf);
633                 CLASSERT(sizeof(struct hsm_attrs) <=
634                          sizeof(info->mti_xattr_buf));
635                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM);
636                 rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm);
637                 if (rc2 == 0)
638                         ma->ma_valid |= MA_HSM;
639                 else if (rc2 < 0 && rc2 != -ENODATA)
640                         GOTO(out, rc = rc2);
641         }
642
643 #ifdef CONFIG_FS_POSIX_ACL
644         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
645                 buf->lb_buf = ma->ma_acl;
646                 buf->lb_len = ma->ma_acl_size;
647                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
648                 if (rc2 > 0) {
649                         ma->ma_acl_size = rc2;
650                         ma->ma_valid |= MA_ACL_DEF;
651                 } else if (rc2 == -ENODATA) {
652                         /* no ACLs */
653                         ma->ma_acl_size = 0;
654                 } else
655                         GOTO(out, rc = rc2);
656         }
657 #endif
658 out:
659         ma->ma_need = need;
660         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
661                rc, ma->ma_valid, ma->ma_lmm);
662         RETURN(rc);
663 }
664
665 static int mdt_getattr_internal(struct mdt_thread_info *info,
666                                 struct mdt_object *o, int ma_need)
667 {
668         struct md_object        *next = mdt_object_child(o);
669         const struct mdt_body   *reqbody = info->mti_body;
670         struct ptlrpc_request   *req = mdt_info_req(info);
671         struct md_attr          *ma = &info->mti_attr;
672         struct lu_attr          *la = &ma->ma_attr;
673         struct req_capsule      *pill = info->mti_pill;
674         const struct lu_env     *env = info->mti_env;
675         struct mdt_body         *repbody;
676         struct lu_buf           *buffer = &info->mti_buf;
677         int                     rc;
678         int                     is_root;
679         ENTRY;
680
681         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
682                 RETURN(err_serious(-ENOMEM));
683
684         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
685
686         ma->ma_valid = 0;
687
688         if (mdt_object_remote(o)) {
689                 /* This object is located on remote node.*/
690                 /* Return -EIO for old client */
691                 if (!mdt_is_dne_client(req->rq_export))
692                         GOTO(out, rc = -EIO);
693
694                 repbody->fid1 = *mdt_object_fid(o);
695                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
696                 GOTO(out, rc = 0);
697         }
698
699         buffer->lb_len = reqbody->eadatasize;
700         if (buffer->lb_len > 0)
701                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
702         else
703                 buffer->lb_buf = NULL;
704
705         /* If it is dir object and client require MEA, then we got MEA */
706         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
707             reqbody->valid & OBD_MD_MEA) {
708                 /* Assumption: MDT_MD size is enough for lmv size. */
709                 ma->ma_lmv = buffer->lb_buf;
710                 ma->ma_lmv_size = buffer->lb_len;
711                 ma->ma_need = MA_LMV | MA_INODE;
712         } else {
713                 ma->ma_lmm = buffer->lb_buf;
714                 ma->ma_lmm_size = buffer->lb_len;
715                 ma->ma_need = MA_LOV | MA_INODE | MA_HSM;
716         }
717
718         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
719             reqbody->valid & OBD_MD_FLDIREA  &&
720             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
721                 /* get default stripe info for this dir. */
722                 ma->ma_need |= MA_LOV_DEF;
723         }
724         ma->ma_need |= ma_need;
725         if (ma->ma_need & MA_SOM)
726                 ma->ma_som = &info->mti_u.som.data;
727
728         rc = mdt_attr_get_complex(info, o, ma);
729         if (unlikely(rc)) {
730                 CERROR("%s: getattr error for "DFID": rc = %d\n",
731                        mdt_obd_name(info->mti_mdt),
732                        PFID(mdt_object_fid(o)), rc);
733                 RETURN(rc);
734         }
735
736         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
737
738         /* the Lustre protocol supposes to return default striping
739          * on the user-visible root if explicitly requested */
740         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
741             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
742                 struct lu_fid      rootfid;
743                 struct mdt_object *root;
744                 struct mdt_device *mdt = info->mti_mdt;
745
746                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
747                 if (rc)
748                         RETURN(rc);
749                 root = mdt_object_find(env, mdt, &rootfid);
750                 if (IS_ERR(root))
751                         RETURN(PTR_ERR(root));
752                 rc = mdt_attr_get_lov(info, root, ma);
753                 mdt_object_put(info->mti_env, root);
754                 if (unlikely(rc)) {
755                         CERROR("%s: getattr error for "DFID": rc = %d\n",
756                                mdt_obd_name(info->mti_mdt),
757                                PFID(mdt_object_fid(o)), rc);
758                         RETURN(rc);
759                 }
760         }
761
762         if (likely(ma->ma_valid & MA_INODE))
763                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
764         else
765                 RETURN(-EFAULT);
766
767         if (mdt_body_has_lov(la, reqbody)) {
768                 if (ma->ma_valid & MA_LOV) {
769                         LASSERT(ma->ma_lmm_size);
770                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
771                         repbody->eadatasize = ma->ma_lmm_size;
772                         if (S_ISDIR(la->la_mode))
773                                 repbody->valid |= OBD_MD_FLDIREA;
774                         else
775                                 repbody->valid |= OBD_MD_FLEASIZE;
776                 }
777                 if (ma->ma_valid & MA_LMV) {
778                         LASSERT(S_ISDIR(la->la_mode));
779                         repbody->eadatasize = ma->ma_lmv_size;
780                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
781                 }
782         } else if (S_ISLNK(la->la_mode) &&
783                    reqbody->valid & OBD_MD_LINKNAME) {
784                 buffer->lb_buf = ma->ma_lmm;
785                 /* eadatasize from client includes NULL-terminator, so
786                  * there is no need to read it */
787                 buffer->lb_len = reqbody->eadatasize - 1;
788                 rc = mo_readlink(env, next, buffer);
789                 if (unlikely(rc <= 0)) {
790                         CERROR("%s: readlink failed for "DFID": rc = %d\n",
791                                mdt_obd_name(info->mti_mdt),
792                                PFID(mdt_object_fid(o)), rc);
793                         rc = -EFAULT;
794                 } else {
795                         int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
796
797                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
798                                 rc -= 2;
799                         repbody->valid |= OBD_MD_LINKNAME;
800                         /* we need to report back size with NULL-terminator
801                          * because client expects that */
802                         repbody->eadatasize = rc + 1;
803                         if (repbody->eadatasize != reqbody->eadatasize)
804                                 CDEBUG(D_INODE, "%s: Read shorter symlink %d "
805                                        "on "DFID ", expected %d\n",
806                                        mdt_obd_name(info->mti_mdt),
807                                        rc, PFID(mdt_object_fid(o)),
808                                        reqbody->eadatasize - 1);
809                         /* NULL terminate */
810                         ((char *)ma->ma_lmm)[rc] = 0;
811
812                         /* If the total CDEBUG() size is larger than a page, it
813                          * will print a warning to the console, avoid this by
814                          * printing just the last part of the symlink. */
815                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
816                                print_limit < rc ? "..." : "", print_limit,
817                                (char *)ma->ma_lmm + rc - print_limit, rc);
818                         rc = 0;
819                 }
820         }
821
822         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
823                 repbody->max_cookiesize = 0;
824                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
825                 repbody->valid |= OBD_MD_FLMODEASIZE;
826                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
827                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
828                        repbody->max_cookiesize);
829         }
830
831         if (exp_connect_rmtclient(info->mti_exp) &&
832             reqbody->valid & OBD_MD_FLRMTPERM) {
833                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
834
835                 /* mdt_getattr_lock only */
836                 rc = mdt_pack_remote_perm(info, o, buf);
837                 if (rc) {
838                         repbody->valid &= ~OBD_MD_FLRMTPERM;
839                         repbody->aclsize = 0;
840                         RETURN(rc);
841                 } else {
842                         repbody->valid |= OBD_MD_FLRMTPERM;
843                         repbody->aclsize = sizeof(struct mdt_remote_perm);
844                 }
845         }
846 #ifdef CONFIG_FS_POSIX_ACL
847         else if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
848                  (reqbody->valid & OBD_MD_FLACL)) {
849                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
850                 buffer->lb_len = req_capsule_get_size(pill,
851                                                       &RMF_ACL, RCL_SERVER);
852                 if (buffer->lb_len > 0) {
853                         rc = mo_xattr_get(env, next, buffer,
854                                           XATTR_NAME_ACL_ACCESS);
855                         if (rc < 0) {
856                                 if (rc == -ENODATA) {
857                                         repbody->aclsize = 0;
858                                         repbody->valid |= OBD_MD_FLACL;
859                                         rc = 0;
860                                 } else if (rc == -EOPNOTSUPP) {
861                                         rc = 0;
862                                 } else {
863                                         CERROR("%s: unable to read "DFID
864                                                " ACL: rc = %d\n",
865                                                mdt_obd_name(info->mti_mdt),
866                                                PFID(mdt_object_fid(o)), rc);
867                                 }
868                         } else {
869                                 repbody->aclsize = rc;
870                                 repbody->valid |= OBD_MD_FLACL;
871                                 rc = 0;
872                         }
873                 }
874         }
875 #endif
876
877         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
878             info->mti_mdt->mdt_opts.mo_mds_capa &&
879             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
880                 struct lustre_capa *capa;
881
882                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
883                 LASSERT(capa);
884                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
885                 rc = mo_capa_get(env, next, capa, 0);
886                 if (rc)
887                         RETURN(rc);
888                 repbody->valid |= OBD_MD_FLMDSCAPA;
889         }
890
891 out:
892         if (rc == 0)
893                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
894
895         RETURN(rc);
896 }
897
898 static int mdt_renew_capa(struct mdt_thread_info *info)
899 {
900         struct mdt_object  *obj = info->mti_object;
901         struct mdt_body    *body;
902         struct lustre_capa *capa, *c;
903         int rc;
904         ENTRY;
905
906         /* if object doesn't exist, or server has disabled capability,
907          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
908          * flag not set.
909          */
910         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
911             !(exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA))
912                 RETURN(0);
913
914         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
915         LASSERT(body != NULL);
916
917         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
918         LASSERT(c);
919
920         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
921         LASSERT(capa);
922
923         *capa = *c;
924         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
925         if (rc == 0)
926                 body->valid |= OBD_MD_FLOSSCAPA;
927         RETURN(rc);
928 }
929
930 int mdt_getattr(struct mdt_thread_info *info)
931 {
932         struct mdt_object       *obj = info->mti_object;
933         struct req_capsule      *pill = info->mti_pill;
934         struct mdt_body         *reqbody;
935         struct mdt_body         *repbody;
936         mode_t                   mode;
937         int rc, rc2;
938         ENTRY;
939
940         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
941         LASSERT(reqbody);
942
943         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
944                 rc = req_capsule_server_pack(pill);
945                 if (unlikely(rc))
946                         RETURN(err_serious(rc));
947                 rc = mdt_renew_capa(info);
948                 GOTO(out_shrink, rc);
949         }
950
951         LASSERT(obj != NULL);
952         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
953
954         mode = lu_object_attr(&obj->mot_obj.mo_lu);
955
956         /* old clients may not report needed easize, use max value then */
957         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
958                              reqbody->eadatasize == 0 ?
959                              info->mti_mdt->mdt_max_mdsize :
960                              reqbody->eadatasize);
961
962         rc = req_capsule_server_pack(pill);
963         if (unlikely(rc != 0))
964                 RETURN(err_serious(rc));
965
966         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
967         LASSERT(repbody != NULL);
968         repbody->eadatasize = 0;
969         repbody->aclsize = 0;
970
971         if (reqbody->valid & OBD_MD_FLRMTPERM)
972                 rc = mdt_init_ucred(info, reqbody);
973         else
974                 rc = mdt_check_ucred(info);
975         if (unlikely(rc))
976                 GOTO(out_shrink, rc);
977
978         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
979
980         /*
981          * Don't check capability at all, because rename might getattr for
982          * remote obj, and at that time no capability is available.
983          */
984         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
985         rc = mdt_getattr_internal(info, obj, 0);
986         if (reqbody->valid & OBD_MD_FLRMTPERM)
987                 mdt_exit_ucred(info);
988         EXIT;
989 out_shrink:
990         mdt_client_compatibility(info);
991         rc2 = mdt_fix_reply(info);
992         if (rc == 0)
993                 rc = rc2;
994         return rc;
995 }
996
997 int mdt_is_subdir(struct mdt_thread_info *info)
998 {
999         struct mdt_object     *o = info->mti_object;
1000         struct req_capsule    *pill = info->mti_pill;
1001         const struct mdt_body *body = info->mti_body;
1002         struct mdt_body       *repbody;
1003         int                    rc;
1004         ENTRY;
1005
1006         LASSERT(o != NULL);
1007
1008         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1009
1010         /*
1011          * We save last checked parent fid to @repbody->fid1 for remote
1012          * directory case.
1013          */
1014         LASSERT(fid_is_sane(&body->fid2));
1015         LASSERT(mdt_object_exists(o) && !mdt_object_remote(o));
1016         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1017                            &body->fid2, &repbody->fid1);
1018         if (rc == 0 || rc == -EREMOTE)
1019                 repbody->valid |= OBD_MD_FLID;
1020
1021         RETURN(rc);
1022 }
1023
1024 int mdt_swap_layouts(struct mdt_thread_info *info)
1025 {
1026         struct ptlrpc_request   *req = mdt_info_req(info);
1027         struct obd_export       *exp = req->rq_export;
1028         struct mdt_object       *o1, *o2, *o;
1029         struct mdt_lock_handle  *lh1, *lh2;
1030         struct mdc_swap_layouts *msl;
1031         int                      rc;
1032         ENTRY;
1033
1034         /* client does not support layout lock, so layout swaping
1035          * is disabled.
1036          * FIXME: there is a problem for old clients which don't support
1037          * layout lock yet. If those clients have already opened the file
1038          * they won't be notified at all so that old layout may still be
1039          * used to do IO. This can be fixed after file release is landed by
1040          * doing exclusive open and taking full EX ibits lock. - Jinshan */
1041         if (!exp_connect_layout(exp))
1042                 RETURN(-EOPNOTSUPP);
1043
1044         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA1, RCL_CLIENT))
1045                 mdt_set_capainfo(info, 0, &info->mti_body->fid1,
1046                                  req_capsule_client_get(info->mti_pill,
1047                                                         &RMF_CAPA1));
1048
1049         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA2, RCL_CLIENT))
1050                 mdt_set_capainfo(info, 1, &info->mti_body->fid2,
1051                                  req_capsule_client_get(info->mti_pill,
1052                                                         &RMF_CAPA2));
1053
1054         o1 = info->mti_object;
1055         o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
1056                                 &info->mti_body->fid2);
1057         if (IS_ERR(o))
1058                 GOTO(out, rc = PTR_ERR(o));
1059
1060         if (mdt_object_remote(o) || !mdt_object_exists(o)) /* remote object */
1061                 GOTO(put, rc = -ENOENT);
1062
1063         rc = lu_fid_cmp(&info->mti_body->fid1, &info->mti_body->fid2);
1064         if (unlikely(rc == 0)) /* same file, you kidding me? no-op. */
1065                 GOTO(put, rc);
1066
1067         if (rc < 0)
1068                 swap(o1, o2);
1069
1070         /* permission check. Make sure the calling process having permission
1071          * to write both files. */
1072         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
1073                                 MAY_WRITE);
1074         if (rc < 0)
1075                 GOTO(put, rc);
1076
1077         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
1078                                 MAY_WRITE);
1079         if (rc < 0)
1080                 GOTO(put, rc);
1081
1082         msl = req_capsule_client_get(info->mti_pill, &RMF_SWAP_LAYOUTS);
1083         if (msl == NULL)
1084                 GOTO(put, rc = -EPROTO);
1085
1086         lh1 = &info->mti_lh[MDT_LH_NEW];
1087         mdt_lock_reg_init(lh1, LCK_EX);
1088         lh2 = &info->mti_lh[MDT_LH_OLD];
1089         mdt_lock_reg_init(lh2, LCK_EX);
1090
1091         rc = mdt_object_lock(info, o1, lh1, MDS_INODELOCK_LAYOUT,
1092                              MDT_LOCAL_LOCK);
1093         if (rc < 0)
1094                 GOTO(put, rc);
1095
1096         rc = mdt_object_lock(info, o2, lh2, MDS_INODELOCK_LAYOUT,
1097                              MDT_LOCAL_LOCK);
1098         if (rc < 0)
1099                 GOTO(unlock1, rc);
1100
1101         rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
1102                              mdt_object_child(o2), msl->msl_flags);
1103         GOTO(unlock2, rc);
1104 unlock2:
1105         mdt_object_unlock(info, o2, lh2, rc);
1106 unlock1:
1107         mdt_object_unlock(info, o1, lh1, rc);
1108 put:
1109         mdt_object_put(info->mti_env, o);
1110 out:
1111         RETURN(rc);
1112 }
1113
1114 static int mdt_raw_lookup(struct mdt_thread_info *info,
1115                           struct mdt_object *parent,
1116                           const struct lu_name *lname,
1117                           struct ldlm_reply *ldlm_rep)
1118 {
1119         struct md_object *next = mdt_object_child(info->mti_object);
1120         const struct mdt_body *reqbody = info->mti_body;
1121         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1122         struct mdt_body *repbody;
1123         int rc;
1124         ENTRY;
1125
1126         if (reqbody->valid != OBD_MD_FLID)
1127                 RETURN(0);
1128
1129         LASSERT(!info->mti_cross_ref);
1130
1131         /* Only got the fid of this obj by name */
1132         fid_zero(child_fid);
1133         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1134                         &info->mti_spec);
1135 #if 0
1136         /* XXX is raw_lookup possible as intent operation? */
1137         if (rc != 0) {
1138                 if (rc == -ENOENT)
1139                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1140                 RETURN(rc);
1141         } else
1142                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1143
1144         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1145 #endif
1146         if (rc == 0) {
1147                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1148                 repbody->fid1 = *child_fid;
1149                 repbody->valid = OBD_MD_FLID;
1150         }
1151         RETURN(1);
1152 }
1153
1154 /*
1155  * UPDATE lock should be taken against parent, and be release before exit;
1156  * child_bits lock should be taken against child, and be returned back:
1157  *            (1)normal request should release the child lock;
1158  *            (2)intent request will grant the lock to client.
1159  */
1160 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1161                                  struct mdt_lock_handle *lhc,
1162                                  __u64 child_bits,
1163                                  struct ldlm_reply *ldlm_rep)
1164 {
1165         struct ptlrpc_request  *req       = mdt_info_req(info);
1166         struct mdt_body        *reqbody   = NULL;
1167         struct mdt_object      *parent    = info->mti_object;
1168         struct mdt_object      *child;
1169         struct md_object       *next      = mdt_object_child(parent);
1170         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1171         struct lu_name         *lname     = NULL;
1172         const char             *name      = NULL;
1173         int                     namelen   = 0;
1174         struct mdt_lock_handle *lhp       = NULL;
1175         struct ldlm_lock       *lock;
1176         struct ldlm_res_id     *res_id;
1177         int                     is_resent;
1178         int                     ma_need = 0;
1179         int                     rc;
1180
1181         ENTRY;
1182
1183         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1184         LASSERT(ergo(is_resent,
1185                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1186
1187         LASSERT(parent != NULL);
1188         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1189         if (name == NULL)
1190                 RETURN(err_serious(-EFAULT));
1191
1192         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1193                                        RCL_CLIENT) - 1;
1194         if (!info->mti_cross_ref) {
1195                 /*
1196                  * XXX: Check for "namelen == 0" is for getattr by fid
1197                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1198                  * that is the name must contain at least one character and
1199                  * the terminating '\0'
1200                  */
1201                 if (namelen == 0) {
1202                         reqbody = req_capsule_client_get(info->mti_pill,
1203                                                          &RMF_MDT_BODY);
1204                         if (unlikely(reqbody == NULL))
1205                                 RETURN(err_serious(-EFAULT));
1206
1207                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1208                                 RETURN(err_serious(-EINVAL));
1209
1210                         name = NULL;
1211                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1212                                "ldlm_rep = %p\n",
1213                                PFID(mdt_object_fid(parent)),
1214                                PFID(&reqbody->fid2), ldlm_rep);
1215                 } else {
1216                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1217                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1218                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1219                                name, ldlm_rep);
1220                 }
1221         }
1222         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1223
1224         if (unlikely(!mdt_object_exists(parent))) {
1225                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1226                                 &parent->mot_obj.mo_lu,
1227                                 "Parent doesn't exist!\n");
1228                 RETURN(-ESTALE);
1229         } else if (!info->mti_cross_ref) {
1230                 LASSERTF(!mdt_object_remote(parent),
1231                          "Parent "DFID" is on remote server\n",
1232                          PFID(mdt_object_fid(parent)));
1233         }
1234         if (lname) {
1235                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1236                 if (rc != 0) {
1237                         if (rc > 0)
1238                                 rc = 0;
1239                         RETURN(rc);
1240                 }
1241         }
1242
1243         if (info->mti_cross_ref) {
1244                 /* Only getattr on the child. Parent is on another node. */
1245                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1246                 child = parent;
1247                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1248                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1249
1250                 if (is_resent) {
1251                         /* Do not take lock for resent case. */
1252                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1253                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1254                                  lhc->mlh_reg_lh.cookie);
1255                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1256                                                 &lock->l_resource->lr_name));
1257                         LDLM_LOCK_PUT(lock);
1258                         rc = 0;
1259                 } else {
1260                         mdt_lock_handle_init(lhc);
1261                         mdt_lock_reg_init(lhc, LCK_PR);
1262
1263                         /*
1264                          * Object's name is on another MDS, no lookup lock is
1265                          * needed here but update is.
1266                          */
1267                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1268                         child_bits |= MDS_INODELOCK_PERM | MDS_INODELOCK_UPDATE;
1269
1270                         rc = mdt_object_lock(info, child, lhc, child_bits,
1271                                              MDT_LOCAL_LOCK);
1272                 }
1273                 if (rc == 0) {
1274                         /* Finally, we can get attr for child. */
1275                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1276                                          BYPASS_CAPA);
1277                         rc = mdt_getattr_internal(info, child, 0);
1278                         if (unlikely(rc != 0))
1279                                 mdt_object_unlock(info, child, lhc, 1);
1280                 }
1281                 RETURN(rc);
1282         }
1283
1284         if (lname) {
1285                 /* step 1: lock parent only if parent is a directory */
1286                 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1287                         lhp = &info->mti_lh[MDT_LH_PARENT];
1288                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1289                         rc = mdt_object_lock(info, parent, lhp,
1290                                              MDS_INODELOCK_UPDATE,
1291                                              MDT_LOCAL_LOCK);
1292                         if (unlikely(rc != 0))
1293                                 RETURN(rc);
1294                 }
1295
1296                 /* step 2: lookup child's fid by name */
1297                 fid_zero(child_fid);
1298                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1299                                 &info->mti_spec);
1300
1301                 if (rc != 0) {
1302                         if (rc == -ENOENT)
1303                                 mdt_set_disposition(info, ldlm_rep,
1304                                                     DISP_LOOKUP_NEG);
1305                         GOTO(out_parent, rc);
1306                 } else
1307                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1308         } else {
1309                 *child_fid = reqbody->fid2;
1310                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1311         }
1312
1313         /*
1314          *step 3: find the child object by fid & lock it.
1315          *        regardless if it is local or remote.
1316          */
1317         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1318
1319         if (unlikely(IS_ERR(child)))
1320                 GOTO(out_parent, rc = PTR_ERR(child));
1321         if (is_resent) {
1322                 /* Do not take lock for resent case. */
1323                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1324                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1325                          lhc->mlh_reg_lh.cookie);
1326
1327                 res_id = &lock->l_resource->lr_name;
1328                 if (!fid_res_name_eq(mdt_object_fid(child),
1329                                     &lock->l_resource->lr_name)) {
1330                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1331                                                  &lock->l_resource->lr_name),
1332                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1333                                  (unsigned long)res_id->name[0],
1334                                  (unsigned long)res_id->name[1],
1335                                  (unsigned long)res_id->name[2],
1336                                  PFID(mdt_object_fid(parent)));
1337                           CWARN("Although resent, but still not get child lock"
1338                                 "parent:"DFID" child:"DFID"\n",
1339                                 PFID(mdt_object_fid(parent)),
1340                                 PFID(mdt_object_fid(child)));
1341                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1342                           LDLM_LOCK_PUT(lock);
1343                           GOTO(relock, 0);
1344                 }
1345                 LDLM_LOCK_PUT(lock);
1346                 rc = 0;
1347         } else {
1348                 bool try_layout = false;
1349
1350 relock:
1351                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1352                 mdt_lock_handle_init(lhc);
1353                 mdt_lock_reg_init(lhc, LCK_PR);
1354
1355                 if (!mdt_object_exists(child)) {
1356                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1357                                         &child->mot_obj.mo_lu,
1358                                         "Object doesn't exist!\n");
1359                         GOTO(out_child, rc = -ENOENT);
1360                 }
1361
1362                 if (!(child_bits & MDS_INODELOCK_UPDATE) &&
1363                       mdt_object_exists(child) && !mdt_object_remote(child)) {
1364                         struct md_attr *ma = &info->mti_attr;
1365
1366                         ma->ma_valid = 0;
1367                         ma->ma_need = MA_INODE;
1368                         rc = mdt_attr_get_complex(info, child, ma);
1369                         if (unlikely(rc != 0))
1370                                 GOTO(out_child, rc);
1371
1372                         /* If the file has not been changed for some time, we
1373                          * return not only a LOOKUP lock, but also an UPDATE
1374                          * lock and this might save us RPC on later STAT. For
1375                          * directories, it also let negative dentry starts
1376                          * working for this dir. */
1377                         if (ma->ma_valid & MA_INODE &&
1378                             ma->ma_attr.la_valid & LA_CTIME &&
1379                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1380                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1381                                 child_bits |= MDS_INODELOCK_UPDATE;
1382                 }
1383
1384                 /* layout lock must be granted in a best-effort way
1385                  * for IT operations */
1386                 LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT));
1387                 if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) &&
1388                     exp_connect_layout(info->mti_exp) &&
1389                     S_ISREG(lu_object_attr(&child->mot_obj.mo_lu)) &&
1390                     ldlm_rep != NULL) {
1391                         /* try to grant layout lock for regular file. */
1392                         try_layout = true;
1393                 }
1394
1395                 rc = 0;
1396                 if (try_layout) {
1397                         child_bits |= MDS_INODELOCK_LAYOUT;
1398                         /* try layout lock, it may fail to be granted due to
1399                          * contention at LOOKUP or UPDATE */
1400                         if (!mdt_object_lock_try(info, child, lhc, child_bits,
1401                                                  MDT_CROSS_LOCK)) {
1402                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1403                                 LASSERT(child_bits != 0);
1404                                 rc = mdt_object_lock(info, child, lhc,
1405                                                 child_bits, MDT_CROSS_LOCK);
1406                         } else {
1407                                 ma_need |= MA_LOV;
1408                         }
1409                 } else {
1410                         rc = mdt_object_lock(info, child, lhc, child_bits,
1411                                                 MDT_CROSS_LOCK);
1412                 }
1413                 if (unlikely(rc != 0))
1414                         GOTO(out_child, rc);
1415         }
1416
1417         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1418         /* Get MA_SOM attributes if update lock is given. */
1419         if (lock &&
1420             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1421             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1422                 ma_need |= MA_SOM;
1423
1424         /* finally, we can get attr for child. */
1425         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1426         rc = mdt_getattr_internal(info, child, ma_need);
1427         if (unlikely(rc != 0)) {
1428                 mdt_object_unlock(info, child, lhc, 1);
1429         } else if (lock) {
1430                 /* Debugging code. */
1431                 res_id = &lock->l_resource->lr_name;
1432                 LDLM_DEBUG(lock, "Returning lock to client");
1433                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1434                                          &lock->l_resource->lr_name),
1435                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1436                          (unsigned long)res_id->name[0],
1437                          (unsigned long)res_id->name[1],
1438                          (unsigned long)res_id->name[2],
1439                          PFID(mdt_object_fid(child)));
1440                 if (mdt_object_exists(child) && !mdt_object_remote(child))
1441                         mdt_pack_size2body(info, child);
1442         }
1443         if (lock)
1444                 LDLM_LOCK_PUT(lock);
1445
1446         EXIT;
1447 out_child:
1448         mdt_object_put(info->mti_env, child);
1449 out_parent:
1450         if (lhp)
1451                 mdt_object_unlock(info, parent, lhp, 1);
1452         return rc;
1453 }
1454
1455 /* normal handler: should release the child lock */
1456 int mdt_getattr_name(struct mdt_thread_info *info)
1457 {
1458         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1459         struct mdt_body        *reqbody;
1460         struct mdt_body        *repbody;
1461         int rc, rc2;
1462         ENTRY;
1463
1464         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1465         LASSERT(reqbody != NULL);
1466         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1467         LASSERT(repbody != NULL);
1468
1469         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1470         repbody->eadatasize = 0;
1471         repbody->aclsize = 0;
1472
1473         rc = mdt_init_ucred(info, reqbody);
1474         if (unlikely(rc))
1475                 GOTO(out_shrink, rc);
1476
1477         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1478         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1479                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1480                 lhc->mlh_reg_lh.cookie = 0;
1481         }
1482         mdt_exit_ucred(info);
1483         EXIT;
1484 out_shrink:
1485         mdt_client_compatibility(info);
1486         rc2 = mdt_fix_reply(info);
1487         if (rc == 0)
1488                 rc = rc2;
1489         return rc;
1490 }
1491
1492 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1493                          void *karg, void *uarg);
1494
1495 int mdt_set_info(struct mdt_thread_info *info)
1496 {
1497         struct ptlrpc_request *req = mdt_info_req(info);
1498         char *key;
1499         void *val;
1500         int keylen, vallen, rc = 0;
1501         ENTRY;
1502
1503         rc = req_capsule_server_pack(info->mti_pill);
1504         if (rc)
1505                 RETURN(rc);
1506
1507         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1508         if (key == NULL) {
1509                 DEBUG_REQ(D_HA, req, "no set_info key");
1510                 RETURN(-EFAULT);
1511         }
1512
1513         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1514                                       RCL_CLIENT);
1515
1516         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1517         if (val == NULL) {
1518                 DEBUG_REQ(D_HA, req, "no set_info val");
1519                 RETURN(-EFAULT);
1520         }
1521
1522         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1523                                       RCL_CLIENT);
1524
1525         /* Swab any part of val you need to here */
1526         if (KEY_IS(KEY_READ_ONLY)) {
1527                 req->rq_status = 0;
1528                 lustre_msg_set_status(req->rq_repmsg, 0);
1529
1530                 spin_lock(&req->rq_export->exp_lock);
1531                 if (*(__u32 *)val)
1532                         *exp_connect_flags_ptr(req->rq_export) |=
1533                                 OBD_CONNECT_RDONLY;
1534                 else
1535                         *exp_connect_flags_ptr(req->rq_export) &=
1536                                 ~OBD_CONNECT_RDONLY;
1537                 spin_unlock(&req->rq_export->exp_lock);
1538
1539         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1540                 struct changelog_setinfo *cs =
1541                         (struct changelog_setinfo *)val;
1542                 if (vallen != sizeof(*cs)) {
1543                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1544                         RETURN(-EINVAL);
1545                 }
1546                 if (ptlrpc_req_need_swab(req)) {
1547                         __swab64s(&cs->cs_recno);
1548                         __swab32s(&cs->cs_id);
1549                 }
1550
1551                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1552                                    vallen, val, NULL);
1553                 lustre_msg_set_status(req->rq_repmsg, rc);
1554
1555         } else {
1556                 RETURN(-EINVAL);
1557         }
1558         RETURN(0);
1559 }
1560
1561 /**
1562  * Top-level handler for MDT connection requests.
1563  */
1564 int mdt_connect(struct mdt_thread_info *info)
1565 {
1566         int rc;
1567         struct obd_connect_data *reply;
1568         struct obd_export *exp;
1569         struct ptlrpc_request *req = mdt_info_req(info);
1570
1571         rc = target_handle_connect(req);
1572         if (rc != 0)
1573                 return err_serious(rc);
1574
1575         LASSERT(req->rq_export != NULL);
1576         info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1577         rc = mdt_init_sec_level(info);
1578         if (rc != 0) {
1579                 obd_disconnect(class_export_get(req->rq_export));
1580                 return rc;
1581         }
1582
1583         /* To avoid exposing partially initialized connection flags, changes up
1584          * to this point have been staged in reply->ocd_connect_flags. Now that
1585          * connection handling has completed successfully, atomically update
1586          * the connect flags in the shared export data structure. LU-1623 */
1587         reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
1588         exp = req->rq_export;
1589         spin_lock(&exp->exp_lock);
1590         *exp_connect_flags_ptr(exp) = reply->ocd_connect_flags;
1591         spin_unlock(&exp->exp_lock);
1592
1593         rc = mdt_init_idmap(info);
1594         if (rc != 0)
1595                 obd_disconnect(class_export_get(req->rq_export));
1596
1597         return rc;
1598 }
1599
1600 int mdt_disconnect(struct mdt_thread_info *info)
1601 {
1602         int rc;
1603         ENTRY;
1604
1605         rc = target_handle_disconnect(mdt_info_req(info));
1606         if (rc)
1607                 rc = err_serious(rc);
1608         RETURN(rc);
1609 }
1610
1611 static int mdt_sendpage(struct mdt_thread_info *info,
1612                         struct lu_rdpg *rdpg, int nob)
1613 {
1614         struct ptlrpc_request   *req = mdt_info_req(info);
1615         struct obd_export       *exp = req->rq_export;
1616         struct ptlrpc_bulk_desc *desc;
1617         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1618         int                      tmpcount;
1619         int                      tmpsize;
1620         int                      i;
1621         int                      rc;
1622         ENTRY;
1623
1624         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
1625                                     MDS_BULK_PORTAL);
1626         if (desc == NULL)
1627                 RETURN(-ENOMEM);
1628
1629         if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1630                 /* old client requires reply size in it's PAGE_SIZE,
1631                  * which is rdpg->rp_count */
1632                 nob = rdpg->rp_count;
1633
1634         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1635              i++, tmpcount -= tmpsize) {
1636                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1637                 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1638         }
1639
1640         LASSERT(desc->bd_nob == nob);
1641         rc = target_bulk_io(exp, desc, lwi);
1642         ptlrpc_free_bulk_pin(desc);
1643         RETURN(rc);
1644 }
1645
1646 int mdt_readpage(struct mdt_thread_info *info)
1647 {
1648         struct mdt_object *object = info->mti_object;
1649         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1650         struct mdt_body   *reqbody;
1651         struct mdt_body   *repbody;
1652         int                rc;
1653         int                i;
1654         ENTRY;
1655
1656         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1657                 RETURN(err_serious(-ENOMEM));
1658
1659         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1660         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1661         if (reqbody == NULL || repbody == NULL)
1662                 RETURN(err_serious(-EFAULT));
1663
1664         /*
1665          * prepare @rdpg before calling lower layers and transfer itself. Here
1666          * reqbody->size contains offset of where to start to read and
1667          * reqbody->nlink contains number bytes to read.
1668          */
1669         rdpg->rp_hash = reqbody->size;
1670         if (rdpg->rp_hash != reqbody->size) {
1671                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1672                        rdpg->rp_hash, reqbody->size);
1673                 RETURN(-EFAULT);
1674         }
1675
1676         rdpg->rp_attrs = reqbody->mode;
1677         if (exp_connect_flags(info->mti_exp) & OBD_CONNECT_64BITHASH)
1678                 rdpg->rp_attrs |= LUDA_64BITHASH;
1679         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1680                                 exp_max_brw_size(info->mti_exp));
1681         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1682                           CFS_PAGE_SHIFT;
1683         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1684         if (rdpg->rp_pages == NULL)
1685                 RETURN(-ENOMEM);
1686
1687         for (i = 0; i < rdpg->rp_npages; ++i) {
1688                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1689                 if (rdpg->rp_pages[i] == NULL)
1690                         GOTO(free_rdpg, rc = -ENOMEM);
1691         }
1692
1693         /* call lower layers to fill allocated pages with directory data */
1694         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1695         if (rc < 0)
1696                 GOTO(free_rdpg, rc);
1697
1698         /* send pages to client */
1699         rc = mdt_sendpage(info, rdpg, rc);
1700
1701         EXIT;
1702 free_rdpg:
1703
1704         for (i = 0; i < rdpg->rp_npages; i++)
1705                 if (rdpg->rp_pages[i] != NULL)
1706                         cfs_free_page(rdpg->rp_pages[i]);
1707         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1708
1709         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1710                 RETURN(0);
1711
1712         return rc;
1713 }
1714
1715 static int mdt_reint_internal(struct mdt_thread_info *info,
1716                               struct mdt_lock_handle *lhc,
1717                               __u32 op)
1718 {
1719         struct req_capsule      *pill = info->mti_pill;
1720         struct mdt_body         *repbody;
1721         int                      rc = 0, rc2;
1722         ENTRY;
1723
1724
1725         rc = mdt_reint_unpack(info, op);
1726         if (rc != 0) {
1727                 CERROR("Can't unpack reint, rc %d\n", rc);
1728                 RETURN(err_serious(rc));
1729         }
1730
1731         /* for replay (no_create) lmm is not needed, client has it already */
1732         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1733                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1734                                      info->mti_rr.rr_eadatalen);
1735
1736         /* llog cookies are always 0, the field is kept for compatibility */
1737         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1738                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1739
1740         rc = req_capsule_server_pack(pill);
1741         if (rc != 0) {
1742                 CERROR("Can't pack response, rc %d\n", rc);
1743                 RETURN(err_serious(rc));
1744         }
1745
1746         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1747                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1748                 LASSERT(repbody);
1749                 repbody->eadatasize = 0;
1750                 repbody->aclsize = 0;
1751         }
1752
1753         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1754
1755         /* for replay no cookkie / lmm need, because client have this already */
1756         if (info->mti_spec.no_create)
1757                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1758                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1759
1760         rc = mdt_init_ucred_reint(info);
1761         if (rc)
1762                 GOTO(out_shrink, rc);
1763
1764         rc = mdt_fix_attr_ucred(info, op);
1765         if (rc != 0)
1766                 GOTO(out_ucred, rc = err_serious(rc));
1767
1768         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1769                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1770                 GOTO(out_ucred, rc);
1771         }
1772         rc = mdt_reint_rec(info, lhc);
1773         EXIT;
1774 out_ucred:
1775         mdt_exit_ucred(info);
1776 out_shrink:
1777         mdt_client_compatibility(info);
1778         rc2 = mdt_fix_reply(info);
1779         if (rc == 0)
1780                 rc = rc2;
1781         return rc;
1782 }
1783
1784 static long mdt_reint_opcode(struct mdt_thread_info *info,
1785                              const struct req_format **fmt)
1786 {
1787         struct mdt_rec_reint *rec;
1788         long opc;
1789
1790         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1791         if (rec != NULL) {
1792                 opc = rec->rr_opcode;
1793                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1794                 if (opc < REINT_MAX && fmt[opc] != NULL)
1795                         req_capsule_extend(info->mti_pill, fmt[opc]);
1796                 else {
1797                         CERROR("%s: Unsupported opcode '%ld' from client '%s': "
1798                                "rc = %d\n", mdt_obd_name(info->mti_mdt), opc,
1799                                info->mti_mdt->mdt_ldlm_client->cli_name,
1800                                -EFAULT);
1801                         opc = err_serious(-EFAULT);
1802                 }
1803         } else {
1804                 opc = err_serious(-EFAULT);
1805         }
1806         return opc;
1807 }
1808
1809 int mdt_reint(struct mdt_thread_info *info)
1810 {
1811         long opc;
1812         int  rc;
1813
1814         static const struct req_format *reint_fmts[REINT_MAX] = {
1815                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1816                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1817                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1818                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1819                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1820                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1821                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR,
1822                 [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK
1823         };
1824
1825         ENTRY;
1826
1827         opc = mdt_reint_opcode(info, reint_fmts);
1828         if (opc >= 0) {
1829                 /*
1830                  * No lock possible here from client to pass it to reint code
1831                  * path.
1832                  */
1833                 rc = mdt_reint_internal(info, NULL, opc);
1834         } else {
1835                 rc = opc;
1836         }
1837
1838         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1839         RETURN(rc);
1840 }
1841
1842 /* this should sync the whole device */
1843 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1844 {
1845         struct dt_device *dt = mdt->mdt_bottom;
1846         int rc;
1847         ENTRY;
1848
1849         rc = dt->dd_ops->dt_sync(env, dt);
1850         RETURN(rc);
1851 }
1852
1853 /* this should sync this object */
1854 static int mdt_object_sync(struct mdt_thread_info *info)
1855 {
1856         struct md_object *next;
1857         int rc;
1858         ENTRY;
1859
1860         if (!mdt_object_exists(info->mti_object)) {
1861                 CWARN("Non existing object  "DFID"!\n",
1862                       PFID(mdt_object_fid(info->mti_object)));
1863                 RETURN(-ESTALE);
1864         }
1865         next = mdt_object_child(info->mti_object);
1866         rc = mo_object_sync(info->mti_env, next);
1867
1868         RETURN(rc);
1869 }
1870
1871 int mdt_sync(struct mdt_thread_info *info)
1872 {
1873         struct ptlrpc_request *req = mdt_info_req(info);
1874         struct req_capsule *pill = info->mti_pill;
1875         struct mdt_body *body;
1876         int rc;
1877         ENTRY;
1878
1879         /* The fid may be zero, so we req_capsule_set manually */
1880         req_capsule_set(pill, &RQF_MDS_SYNC);
1881
1882         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1883         if (body == NULL)
1884                 RETURN(err_serious(-EINVAL));
1885
1886         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1887                 RETURN(err_serious(-ENOMEM));
1888
1889         if (fid_seq(&body->fid1) == 0) {
1890                 /* sync the whole device */
1891                 rc = req_capsule_server_pack(pill);
1892                 if (rc == 0)
1893                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1894                 else
1895                         rc = err_serious(rc);
1896         } else {
1897                 /* sync an object */
1898                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1899                 if (rc == 0) {
1900                         rc = mdt_object_sync(info);
1901                         if (rc == 0) {
1902                                 const struct lu_fid *fid;
1903                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1904
1905                                 info->mti_attr.ma_need = MA_INODE;
1906                                 info->mti_attr.ma_valid = 0;
1907                                 rc = mdt_attr_get_complex(info, info->mti_object,
1908                                                           &info->mti_attr);
1909                                 if (rc == 0) {
1910                                         body = req_capsule_server_get(pill,
1911                                                                 &RMF_MDT_BODY);
1912                                         fid = mdt_object_fid(info->mti_object);
1913                                         mdt_pack_attr2body(info, body, la, fid);
1914                                 }
1915                         }
1916                 } else
1917                         rc = err_serious(rc);
1918         }
1919         if (rc == 0)
1920                 mdt_counter_incr(req, LPROC_MDT_SYNC);
1921
1922         RETURN(rc);
1923 }
1924
1925 /*
1926  * Quotacheck handler.
1927  * in-kernel quotacheck isn't supported any more.
1928  */
1929 int mdt_quotacheck(struct mdt_thread_info *info)
1930 {
1931         struct obd_quotactl     *oqctl;
1932         int                      rc;
1933         ENTRY;
1934
1935         oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
1936         if (oqctl == NULL)
1937                 RETURN(err_serious(-EPROTO));
1938
1939         rc = req_capsule_server_pack(info->mti_pill);
1940         if (rc)
1941                 RETURN(err_serious(rc));
1942
1943         /* deprecated, not used any more */
1944         RETURN(-EOPNOTSUPP);
1945 }
1946
1947 /*
1948  * Handle quota control requests to consult current usage/limit, but also
1949  * to configure quota enforcement
1950  */
1951 int mdt_quotactl(struct mdt_thread_info *info)
1952 {
1953         struct obd_export       *exp  = info->mti_exp;
1954         struct req_capsule      *pill = info->mti_pill;
1955         struct obd_quotactl     *oqctl, *repoqc;
1956         int                      id, rc;
1957         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
1958         ENTRY;
1959
1960         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1961         if (oqctl == NULL)
1962                 RETURN(err_serious(-EPROTO));
1963
1964         rc = req_capsule_server_pack(pill);
1965         if (rc)
1966                 RETURN(err_serious(rc));
1967
1968         switch (oqctl->qc_cmd) {
1969         case Q_QUOTACHECK:
1970         case LUSTRE_Q_INVALIDATE:
1971         case LUSTRE_Q_FINVALIDATE:
1972         case Q_QUOTAON:
1973         case Q_QUOTAOFF:
1974         case Q_INITQUOTA:
1975                 /* deprecated, not used any more */
1976                 RETURN(-EOPNOTSUPP);
1977                 /* master quotactl */
1978         case Q_GETINFO:
1979         case Q_SETINFO:
1980         case Q_SETQUOTA:
1981         case Q_GETQUOTA:
1982                 if (qmt == NULL)
1983                         RETURN(-EOPNOTSUPP);
1984                 /* slave quotactl */
1985         case Q_GETOINFO:
1986         case Q_GETOQUOTA:
1987                 break;
1988         default:
1989                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
1990                 RETURN(-EFAULT);
1991         }
1992
1993         /* map uid/gid for remote client */
1994         id = oqctl->qc_id;
1995         if (exp_connect_rmtclient(exp)) {
1996                 struct lustre_idmap_table *idmap;
1997
1998                 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
1999
2000                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
2001                              oqctl->qc_cmd != Q_GETINFO))
2002                         RETURN(-EPERM);
2003
2004                 if (oqctl->qc_type == USRQUOTA)
2005                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
2006                                                      oqctl->qc_id);
2007                 else if (oqctl->qc_type == GRPQUOTA)
2008                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
2009                                                      oqctl->qc_id);
2010                 else
2011                         RETURN(-EINVAL);
2012
2013                 if (id == CFS_IDMAP_NOTFOUND) {
2014                         CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
2015                         RETURN(-EACCES);
2016                 }
2017         }
2018
2019         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2020         if (repoqc == NULL)
2021                 RETURN(err_serious(-EFAULT));
2022
2023         if (oqctl->qc_id != id)
2024                 swap(oqctl->qc_id, id);
2025
2026         switch (oqctl->qc_cmd) {
2027
2028         case Q_GETINFO:
2029         case Q_SETINFO:
2030         case Q_SETQUOTA:
2031         case Q_GETQUOTA:
2032                 /* forward quotactl request to QMT */
2033                 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2034                 break;
2035
2036         case Q_GETOINFO:
2037         case Q_GETOQUOTA:
2038                 /* slave quotactl */
2039                 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2040                                    oqctl);
2041                 break;
2042
2043         default:
2044                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2045                 RETURN(-EFAULT);
2046         }
2047
2048         if (oqctl->qc_id != id)
2049                 swap(oqctl->qc_id, id);
2050
2051         *repoqc = *oqctl;
2052         RETURN(rc);
2053 }
2054
2055 /*
2056  * OBD PING and other handlers.
2057  */
2058 int mdt_obd_ping(struct mdt_thread_info *info)
2059 {
2060         int rc;
2061         ENTRY;
2062
2063         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2064
2065         rc = target_handle_ping(mdt_info_req(info));
2066         if (rc < 0)
2067                 rc = err_serious(rc);
2068         RETURN(rc);
2069 }
2070
2071 /*
2072  * OBD_IDX_READ handler
2073  */
2074 int mdt_obd_idx_read(struct mdt_thread_info *info)
2075 {
2076         struct mdt_device       *mdt = info->mti_mdt;
2077         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2078         struct idx_info         *req_ii, *rep_ii;
2079         int                      rc, i;
2080         ENTRY;
2081
2082         memset(rdpg, 0, sizeof(*rdpg));
2083         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2084
2085         /* extract idx_info buffer from request & reply */
2086         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2087         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2088                 RETURN(err_serious(-EPROTO));
2089
2090         rc = req_capsule_server_pack(info->mti_pill);
2091         if (rc)
2092                 RETURN(err_serious(rc));
2093
2094         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2095         if (rep_ii == NULL)
2096                 RETURN(err_serious(-EFAULT));
2097         rep_ii->ii_magic = IDX_INFO_MAGIC;
2098
2099         /* extract hash to start with */
2100         rdpg->rp_hash = req_ii->ii_hash_start;
2101
2102         /* extract requested attributes */
2103         rdpg->rp_attrs = req_ii->ii_attrs;
2104
2105         /* check that fid packed in request is valid and supported */
2106         if (!fid_is_sane(&req_ii->ii_fid))
2107                 RETURN(-EINVAL);
2108         rep_ii->ii_fid = req_ii->ii_fid;
2109
2110         /* copy flags */
2111         rep_ii->ii_flags = req_ii->ii_flags;
2112
2113         /* compute number of pages to allocate, ii_count is the number of 4KB
2114          * containers */
2115         if (req_ii->ii_count <= 0)
2116                 GOTO(out, rc = -EFAULT);
2117         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2118                                exp_max_brw_size(info->mti_exp));
2119         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2120
2121         /* allocate pages to store the containers */
2122         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2123         if (rdpg->rp_pages == NULL)
2124                 GOTO(out, rc = -ENOMEM);
2125         for (i = 0; i < rdpg->rp_npages; i++) {
2126                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2127                 if (rdpg->rp_pages[i] == NULL)
2128                         GOTO(out, rc = -ENOMEM);
2129         }
2130
2131         /* populate pages with key/record pairs */
2132         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2133         if (rc < 0)
2134                 GOTO(out, rc);
2135
2136         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2137                  "asked %d > %d\n", rc, rdpg->rp_count);
2138
2139         /* send pages to client */
2140         rc = mdt_sendpage(info, rdpg, rc);
2141
2142         GOTO(out, rc);
2143 out:
2144         if (rdpg->rp_pages) {
2145                 for (i = 0; i < rdpg->rp_npages; i++)
2146                         if (rdpg->rp_pages[i])
2147                                 cfs_free_page(rdpg->rp_pages[i]);
2148                 OBD_FREE(rdpg->rp_pages,
2149                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2150         }
2151         return rc;
2152 }
2153
2154 int mdt_obd_log_cancel(struct mdt_thread_info *info)
2155 {
2156         return err_serious(-EOPNOTSUPP);
2157 }
2158
2159 int mdt_obd_qc_callback(struct mdt_thread_info *info)
2160 {
2161         return err_serious(-EOPNOTSUPP);
2162 }
2163
2164 /*
2165  * LLOG handlers.
2166  */
2167
2168 /** clone llog ctxt from child (mdd)
2169  * This allows remote llog (replicator) access.
2170  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2171  * context was originally set up, or we can handle them directly.
2172  * I choose the latter, but that means I need any llog
2173  * contexts set up by child to be accessable by the mdt.  So we clone the
2174  * context into our context list here.
2175  */
2176 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2177                                int idx)
2178 {
2179         struct md_device  *next = mdt->mdt_child;
2180         struct llog_ctxt *ctxt;
2181         int rc;
2182
2183         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2184                 return 0;
2185
2186         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2187         if (rc || ctxt == NULL) {
2188                 return 0;
2189         }
2190
2191         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2192         if (rc)
2193                 CERROR("Can't set mdt ctxt %d\n", rc);
2194
2195         return rc;
2196 }
2197
2198 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2199                                  struct mdt_device *mdt, int idx)
2200 {
2201         struct llog_ctxt *ctxt;
2202
2203         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2204         if (ctxt == NULL)
2205                 return 0;
2206         /* Put once for the get we just did, and once for the clone */
2207         llog_ctxt_put(ctxt);
2208         llog_ctxt_put(ctxt);
2209         return 0;
2210 }
2211
2212 int mdt_llog_create(struct mdt_thread_info *info)
2213 {
2214         int rc;
2215
2216         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2217         rc = llog_origin_handle_open(mdt_info_req(info));
2218         return (rc < 0 ? err_serious(rc) : rc);
2219 }
2220
2221 int mdt_llog_destroy(struct mdt_thread_info *info)
2222 {
2223         int rc;
2224
2225         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2226         rc = llog_origin_handle_destroy(mdt_info_req(info));
2227         return (rc < 0 ? err_serious(rc) : rc);
2228 }
2229
2230 int mdt_llog_read_header(struct mdt_thread_info *info)
2231 {
2232         int rc;
2233
2234         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2235         rc = llog_origin_handle_read_header(mdt_info_req(info));
2236         return (rc < 0 ? err_serious(rc) : rc);
2237 }
2238
2239 int mdt_llog_next_block(struct mdt_thread_info *info)
2240 {
2241         int rc;
2242
2243         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2244         rc = llog_origin_handle_next_block(mdt_info_req(info));
2245         return (rc < 0 ? err_serious(rc) : rc);
2246 }
2247
2248 int mdt_llog_prev_block(struct mdt_thread_info *info)
2249 {
2250         int rc;
2251
2252         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2253         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2254         return (rc < 0 ? err_serious(rc) : rc);
2255 }
2256
2257
2258 /*
2259  * DLM handlers.
2260  */
2261
2262 static struct ldlm_callback_suite cbs = {
2263         .lcs_completion = ldlm_server_completion_ast,
2264         .lcs_blocking   = ldlm_server_blocking_ast,
2265         .lcs_glimpse    = ldlm_server_glimpse_ast
2266 };
2267
2268 int mdt_enqueue(struct mdt_thread_info *info)
2269 {
2270         struct ptlrpc_request *req;
2271         int rc;
2272
2273         /*
2274          * info->mti_dlm_req already contains swapped and (if necessary)
2275          * converted dlm request.
2276          */
2277         LASSERT(info->mti_dlm_req != NULL);
2278
2279         req = mdt_info_req(info);
2280         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2281                                   req, info->mti_dlm_req, &cbs);
2282         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2283         return rc ? err_serious(rc) : req->rq_status;
2284 }
2285
2286 int mdt_convert(struct mdt_thread_info *info)
2287 {
2288         int rc;
2289         struct ptlrpc_request *req;
2290
2291         LASSERT(info->mti_dlm_req);
2292         req = mdt_info_req(info);
2293         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2294         return rc ? err_serious(rc) : req->rq_status;
2295 }
2296
2297 int mdt_bl_callback(struct mdt_thread_info *info)
2298 {
2299         CERROR("bl callbacks should not happen on MDS\n");
2300         LBUG();
2301         return err_serious(-EOPNOTSUPP);
2302 }
2303
2304 int mdt_cp_callback(struct mdt_thread_info *info)
2305 {
2306         CERROR("cp callbacks should not happen on MDS\n");
2307         LBUG();
2308         return err_serious(-EOPNOTSUPP);
2309 }
2310
2311 /*
2312  * sec context handlers
2313  */
2314 int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2315 {
2316         int rc;
2317
2318         rc = mdt_handle_idmap(info);
2319
2320         if (unlikely(rc)) {
2321                 struct ptlrpc_request *req = mdt_info_req(info);
2322                 __u32                  opc;
2323
2324                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2325                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2326                         sptlrpc_svc_ctx_invalidate(req);
2327         }
2328
2329         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2330
2331         return rc;
2332 }
2333
2334 /*
2335  * quota request handlers
2336  */
2337 int mdt_quota_dqacq(struct mdt_thread_info *info)
2338 {
2339         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2340         int                      rc;
2341         ENTRY;
2342
2343         if (qmt == NULL)
2344                 RETURN(err_serious(-EOPNOTSUPP));
2345
2346         rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2347         RETURN(rc);
2348 }
2349
2350 static struct mdt_object *mdt_obj(struct lu_object *o)
2351 {
2352         LASSERT(lu_device_is_mdt(o->lo_dev));
2353         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2354 }
2355
2356 struct mdt_object *mdt_object_new(const struct lu_env *env,
2357                                   struct mdt_device *d,
2358                                   const struct lu_fid *f)
2359 {
2360         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2361         struct lu_object *o;
2362         struct mdt_object *m;
2363         ENTRY;
2364
2365         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2366         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2367         if (unlikely(IS_ERR(o)))
2368                 m = (struct mdt_object *)o;
2369         else
2370                 m = mdt_obj(o);
2371         RETURN(m);
2372 }
2373
2374 struct mdt_object *mdt_object_find(const struct lu_env *env,
2375                                    struct mdt_device *d,
2376                                    const struct lu_fid *f)
2377 {
2378         struct lu_object *o;
2379         struct mdt_object *m;
2380         ENTRY;
2381
2382         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2383         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2384         if (unlikely(IS_ERR(o)))
2385                 m = (struct mdt_object *)o;
2386         else
2387                 m = mdt_obj(o);
2388         RETURN(m);
2389 }
2390
2391 /**
2392  * Asyncronous commit for mdt device.
2393  *
2394  * Pass asynchonous commit call down the MDS stack.
2395  *
2396  * \param env environment
2397  * \param mdt the mdt device
2398  */
2399 static void mdt_device_commit_async(const struct lu_env *env,
2400                                     struct mdt_device *mdt)
2401 {
2402         struct dt_device *dt = mdt->mdt_bottom;
2403         int rc;
2404
2405         rc = dt->dd_ops->dt_commit_async(env, dt);
2406         if (unlikely(rc != 0))
2407                 CWARN("async commit start failed with rc = %d", rc);
2408 }
2409
2410 /**
2411  * Mark the lock as "synchonous".
2412  *
2413  * Mark the lock to deffer transaction commit to the unlock time.
2414  *
2415  * \param lock the lock to mark as "synchonous"
2416  *
2417  * \see mdt_is_lock_sync
2418  * \see mdt_save_lock
2419  */
2420 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2421 {
2422         lock->l_ast_data = (void*)1;
2423 }
2424
2425 /**
2426  * Check whehter the lock "synchonous" or not.
2427  *
2428  * \param lock the lock to check
2429  * \retval 1 the lock is "synchonous"
2430  * \retval 0 the lock isn't "synchronous"
2431  *
2432  * \see mdt_set_lock_sync
2433  * \see mdt_save_lock
2434  */
2435 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2436 {
2437         return lock->l_ast_data != NULL;
2438 }
2439
2440 /**
2441  * Blocking AST for mdt locks.
2442  *
2443  * Starts transaction commit if in case of COS lock conflict or
2444  * deffers such a commit to the mdt_save_lock.
2445  *
2446  * \param lock the lock which blocks a request or cancelling lock
2447  * \param desc unused
2448  * \param data unused
2449  * \param flag indicates whether this cancelling or blocking callback
2450  * \retval 0
2451  * \see ldlm_blocking_ast_nocheck
2452  */
2453 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2454                      void *data, int flag)
2455 {
2456         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2457         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2458         int rc;
2459         ENTRY;
2460
2461         if (flag == LDLM_CB_CANCELING)
2462                 RETURN(0);
2463         lock_res_and_lock(lock);
2464         if (lock->l_blocking_ast != mdt_blocking_ast) {
2465                 unlock_res_and_lock(lock);
2466                 RETURN(0);
2467         }
2468         if (mdt_cos_is_enabled(mdt) &&
2469             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2470             lock->l_blocking_lock != NULL &&
2471             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2472                 mdt_set_lock_sync(lock);
2473         }
2474         rc = ldlm_blocking_ast_nocheck(lock);
2475
2476         /* There is no lock conflict if l_blocking_lock == NULL,
2477          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2478          * when the last reference to a local lock was released */
2479         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2480                 struct lu_env env;
2481
2482                 rc = lu_env_init(&env, LCT_LOCAL);
2483                 if (unlikely(rc != 0))
2484                         CWARN("lu_env initialization failed with rc = %d,"
2485                               "cannot start asynchronous commit\n", rc);
2486                 else
2487                         mdt_device_commit_async(&env, mdt);
2488                 lu_env_fini(&env);
2489         }
2490         RETURN(rc);
2491 }
2492
2493 int mdt_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2494                         void *data, int flag)
2495 {
2496         struct lustre_handle lockh;
2497         int               rc;
2498
2499         switch (flag) {
2500         case LDLM_CB_BLOCKING:
2501                 ldlm_lock2handle(lock, &lockh);
2502                 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
2503                 if (rc < 0) {
2504                         CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2505                         RETURN(rc);
2506                 }
2507                 break;
2508         case LDLM_CB_CANCELING:
2509                 LDLM_DEBUG(lock, "Revoke remote lock\n");
2510                 break;
2511         default:
2512                 LBUG();
2513         }
2514         RETURN(0);
2515 }
2516
2517 int mdt_remote_object_lock(struct mdt_thread_info *mti,
2518                            struct mdt_object *o, struct lustre_handle *lh,
2519                            ldlm_mode_t mode, __u64 ibits)
2520 {
2521         struct ldlm_enqueue_info *einfo = &mti->mti_einfo;
2522         ldlm_policy_data_t *policy = &mti->mti_policy;
2523         int rc = 0;
2524         ENTRY;
2525
2526         LASSERT(mdt_object_remote(o));
2527
2528         LASSERT((ibits & MDS_INODELOCK_UPDATE));
2529
2530         memset(einfo, 0, sizeof(*einfo));
2531         einfo->ei_type = LDLM_IBITS;
2532         einfo->ei_mode = mode;
2533         einfo->ei_cb_bl = mdt_md_blocking_ast;
2534         einfo->ei_cb_cp = ldlm_completion_ast;
2535
2536         memset(policy, 0, sizeof(*policy));
2537         policy->l_inodebits.bits = ibits;
2538
2539         rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo,
2540                             policy);
2541         RETURN(rc);
2542 }
2543
2544 static int mdt_object_lock0(struct mdt_thread_info *info, struct mdt_object *o,
2545                             struct mdt_lock_handle *lh, __u64 ibits,
2546                             bool nonblock, int locality)
2547 {
2548         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2549         ldlm_policy_data_t *policy = &info->mti_policy;
2550         struct ldlm_res_id *res_id = &info->mti_res_id;
2551         __u64 dlmflags;
2552         int rc;
2553         ENTRY;
2554
2555         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2556         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2557         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2558         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2559
2560         if (mdt_object_remote(o)) {
2561                 if (locality == MDT_CROSS_LOCK) {
2562                         ibits &= ~(MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM);
2563                         ibits |= MDS_INODELOCK_LOOKUP;
2564                 } else {
2565                         LASSERTF(!(ibits &
2566                                   (MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM)),
2567                                 "%s: wrong bit "LPX64" for remote obj "DFID"\n",
2568                                 mdt_obd_name(info->mti_mdt), ibits,
2569                                 PFID(mdt_object_fid(o)));
2570                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2571                 }
2572                 /* No PDO lock on remote object */
2573                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2574         }
2575
2576         if (lh->mlh_type == MDT_PDO_LOCK) {
2577                 /* check for exists after object is locked */
2578                 if (mdt_object_exists(o) == 0) {
2579                         /* Non-existent object shouldn't have PDO lock */
2580                         RETURN(-ESTALE);
2581                 } else {
2582                         /* Non-dir object shouldn't have PDO lock */
2583                         if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2584                                 RETURN(-ENOTDIR);
2585                 }
2586         }
2587
2588         memset(policy, 0, sizeof(*policy));
2589         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2590
2591         dlmflags = LDLM_FL_ATOMIC_CB;
2592         if (nonblock)
2593                 dlmflags |= LDLM_FL_BLOCK_NOWAIT;
2594
2595         /*
2596          * Take PDO lock on whole directory and build correct @res_id for lock
2597          * on part of directory.
2598          */
2599         if (lh->mlh_pdo_hash != 0) {
2600                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2601                 mdt_lock_pdo_mode(info, o, lh);
2602                 if (lh->mlh_pdo_mode != LCK_NL) {
2603                         /*
2604                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2605                          * is never going to be sent to client and we do not
2606                          * want it slowed down due to possible cancels.
2607                          */
2608                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2609                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2610                                           policy, res_id, dlmflags,
2611                                           &info->mti_exp->exp_handle.h_cookie);
2612                         if (unlikely(rc))
2613                                 RETURN(rc);
2614                 }
2615
2616                 /*
2617                  * Finish res_id initializing by name hash marking part of
2618                  * directory which is taking modification.
2619                  */
2620                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2621         }
2622
2623         policy->l_inodebits.bits = ibits;
2624
2625         /*
2626          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2627          * going to be sent to client. If it is - mdt_intent_policy() path will
2628          * fix it up and turn FL_LOCAL flag off.
2629          */
2630         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2631                           res_id, LDLM_FL_LOCAL_ONLY | dlmflags,
2632                           &info->mti_exp->exp_handle.h_cookie);
2633         if (rc)
2634                 mdt_object_unlock(info, o, lh, 1);
2635         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2636                  lh->mlh_pdo_hash != 0 &&
2637                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2638                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2639         }
2640
2641         RETURN(rc);
2642 }
2643
2644 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2645                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2646 {
2647         return mdt_object_lock0(info, o, lh, ibits, false, locality);
2648 }
2649
2650 int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *o,
2651                         struct mdt_lock_handle *lh, __u64 ibits, int locality)
2652 {
2653         struct mdt_lock_handle tmp = *lh;
2654         int rc;
2655
2656         rc = mdt_object_lock0(info, o, &tmp, ibits, true, locality);
2657         if (rc == 0)
2658                 *lh = tmp;
2659
2660         return rc == 0;
2661 }
2662
2663 /**
2664  * Save a lock within request object.
2665  *
2666  * Keep the lock referenced until whether client ACK or transaction
2667  * commit happens or release the lock immediately depending on input
2668  * parameters. If COS is ON, a write lock is converted to COS lock
2669  * before saving.
2670  *
2671  * \param info thead info object
2672  * \param h lock handle
2673  * \param mode lock mode
2674  * \param decref force immediate lock releasing
2675  */
2676 static
2677 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2678                    ldlm_mode_t mode, int decref)
2679 {
2680         ENTRY;
2681
2682         if (lustre_handle_is_used(h)) {
2683                 if (decref || !info->mti_has_trans ||
2684                     !(mode & (LCK_PW | LCK_EX))){
2685                         mdt_fid_unlock(h, mode);
2686                 } else {
2687                         struct mdt_device *mdt = info->mti_mdt;
2688                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2689                         struct ptlrpc_request *req = mdt_info_req(info);
2690                         int no_ack = 0;
2691
2692                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2693                                  h->cookie);
2694                         CDEBUG(D_HA, "request = %p reply state = %p"
2695                                " transno = "LPD64"\n",
2696                                req, req->rq_reply_state, req->rq_transno);
2697                         if (mdt_cos_is_enabled(mdt)) {
2698                                 no_ack = 1;
2699                                 ldlm_lock_downgrade(lock, LCK_COS);
2700                                 mode = LCK_COS;
2701                         }
2702                         ptlrpc_save_lock(req, h, mode, no_ack);
2703                         if (mdt_is_lock_sync(lock)) {
2704                                 CDEBUG(D_HA, "found sync-lock,"
2705                                        " async commit started\n");
2706                                 mdt_device_commit_async(info->mti_env,
2707                                                         mdt);
2708                         }
2709                         LDLM_LOCK_PUT(lock);
2710                 }
2711                 h->cookie = 0ull;
2712         }
2713
2714         EXIT;
2715 }
2716
2717 /**
2718  * Unlock mdt object.
2719  *
2720  * Immeditely release the regular lock and the PDO lock or save the
2721  * lock in reqeuest and keep them referenced until client ACK or
2722  * transaction commit.
2723  *
2724  * \param info thread info object
2725  * \param o mdt object
2726  * \param lh mdt lock handle referencing regular and PDO locks
2727  * \param decref force immediate lock releasing
2728  */
2729 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2730                        struct mdt_lock_handle *lh, int decref)
2731 {
2732         ENTRY;
2733
2734         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2735         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2736
2737         if (lustre_handle_is_used(&lh->mlh_rreg_lh))
2738                 ldlm_lock_decref(&lh->mlh_rreg_lh, lh->mlh_rreg_mode);
2739
2740         EXIT;
2741 }
2742
2743 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2744                                         const struct lu_fid *f,
2745                                         struct mdt_lock_handle *lh,
2746                                         __u64 ibits)
2747 {
2748         struct mdt_object *o;
2749
2750         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2751         if (!IS_ERR(o)) {
2752                 int rc;
2753
2754                 rc = mdt_object_lock(info, o, lh, ibits,
2755                                      MDT_LOCAL_LOCK);
2756                 if (rc != 0) {
2757                         mdt_object_put(info->mti_env, o);
2758                         o = ERR_PTR(rc);
2759                 }
2760         }
2761         return o;
2762 }
2763
2764 void mdt_object_unlock_put(struct mdt_thread_info * info,
2765                            struct mdt_object * o,
2766                            struct mdt_lock_handle *lh,
2767                            int decref)
2768 {
2769         mdt_object_unlock(info, o, lh, decref);
2770         mdt_object_put(info->mti_env, o);
2771 }
2772
2773 struct mdt_handler *mdt_handler_find(__u32 opc, struct mdt_opc_slice *supported)
2774 {
2775         struct mdt_opc_slice *s;
2776         struct mdt_handler   *h;
2777
2778         h = NULL;
2779         for (s = supported; s->mos_hs != NULL; s++) {
2780                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2781                         h = s->mos_hs + (opc - s->mos_opc_start);
2782                         if (likely(h->mh_opc != 0))
2783                                 LASSERTF(h->mh_opc == opc,
2784                                          "opcode mismatch %d != %d\n",
2785                                          h->mh_opc, opc);
2786                         else
2787                                 h = NULL; /* unsupported opc */
2788                         break;
2789                 }
2790         }
2791         return h;
2792 }
2793
2794 static int mdt_lock_resname_compat(struct mdt_device *m,
2795                                    struct ldlm_request *req)
2796 {
2797         /* XXX something... later. */
2798         return 0;
2799 }
2800
2801 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2802 {
2803         /* XXX something... later. */
2804         return 0;
2805 }
2806
2807 /*
2808  * Generic code handling requests that have struct mdt_body passed in:
2809  *
2810  *  - extract mdt_body from request and save it in @info, if present;
2811  *
2812  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2813  *  @info;
2814  *
2815  *  - if HABEO_CORPUS flag is set for this request type check whether object
2816  *  actually exists on storage (lu_object_exists()).
2817  *
2818  */
2819 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2820 {
2821         const struct mdt_body    *body;
2822         struct mdt_object        *obj;
2823         const struct lu_env      *env;
2824         struct req_capsule       *pill;
2825         int                       rc;
2826         ENTRY;
2827
2828         env = info->mti_env;
2829         pill = info->mti_pill;
2830
2831         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2832         if (body == NULL)
2833                 RETURN(-EFAULT);
2834
2835         if (!(body->valid & OBD_MD_FLID))
2836                 RETURN(0);
2837
2838         if (!fid_is_sane(&body->fid1)) {
2839                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2840                 RETURN(-EINVAL);
2841         }
2842
2843         /*
2844          * Do not get size or any capa fields before we check that request
2845          * contains capa actually. There are some requests which do not, for
2846          * instance MDS_IS_SUBDIR.
2847          */
2848         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2849             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2850                 mdt_set_capainfo(info, 0, &body->fid1,
2851                                  req_capsule_client_get(pill, &RMF_CAPA1));
2852
2853         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2854         if (!IS_ERR(obj)) {
2855                 if ((flags & HABEO_CORPUS) &&
2856                     !mdt_object_exists(obj)) {
2857                         mdt_object_put(env, obj);
2858                         /* for capability renew ENOENT will be handled in
2859                          * mdt_renew_capa */
2860                         if (body->valid & OBD_MD_FLOSSCAPA)
2861                                 rc = 0;
2862                         else
2863                                 rc = -ENOENT;
2864                 } else {
2865                         info->mti_object = obj;
2866                         rc = 0;
2867                 }
2868         } else
2869                 rc = PTR_ERR(obj);
2870
2871         RETURN(rc);
2872 }
2873
2874 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2875 {
2876         struct req_capsule *pill = info->mti_pill;
2877         int rc;
2878         ENTRY;
2879
2880         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2881                 rc = mdt_body_unpack(info, flags);
2882         else
2883                 rc = 0;
2884
2885         if (rc == 0 && (flags & HABEO_REFERO)) {
2886                 /* Pack reply. */
2887                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2888                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2889                                              info->mti_body->eadatasize);
2890                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2891                         req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2892                                              RCL_SERVER, 0);
2893
2894                 rc = req_capsule_server_pack(pill);
2895         }
2896         RETURN(rc);
2897 }
2898
2899 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2900 {
2901         struct md_device *next = m->mdt_child;
2902
2903         return next->md_ops->mdo_init_capa_ctxt(env, next,
2904                                                 m->mdt_opts.mo_mds_capa,
2905                                                 m->mdt_capa_timeout,
2906                                                 m->mdt_capa_alg,
2907                                                 m->mdt_capa_keys);
2908 }
2909
2910 /*
2911  * Invoke handler for this request opc. Also do necessary preprocessing
2912  * (according to handler ->mh_flags), and post-processing (setting of
2913  * ->last_{xid,committed}).
2914  */
2915 static int mdt_req_handle(struct mdt_thread_info *info,
2916                           struct mdt_handler *h, struct ptlrpc_request *req)
2917 {
2918         int   rc, serious = 0;
2919         __u32 flags;
2920
2921         ENTRY;
2922
2923         LASSERT(h->mh_act != NULL);
2924         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2925         LASSERT(current->journal_info == NULL);
2926
2927         /*
2928          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2929          * to put same checks into handlers like mdt_close(), mdt_reint(),
2930          * etc., without talking to mdt authors first. Checking same thing
2931          * there again is useless and returning 0 error without packing reply
2932          * is buggy! Handlers either pack reply or return error.
2933          *
2934          * We return 0 here and do not send any reply in order to emulate
2935          * network failure. Do not send any reply in case any of NET related
2936          * fail_id has occured.
2937          */
2938         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2939                 RETURN(0);
2940
2941         rc = 0;
2942         flags = h->mh_flags;
2943         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2944
2945         if (h->mh_fmt != NULL) {
2946                 req_capsule_set(info->mti_pill, h->mh_fmt);
2947                 rc = mdt_unpack_req_pack_rep(info, flags);
2948         }
2949
2950         if (rc == 0 && flags & MUTABOR &&
2951             exp_connect_flags(req->rq_export) & OBD_CONNECT_RDONLY)
2952                 /* should it be rq_status? */
2953                 rc = -EROFS;
2954
2955         if (rc == 0 && flags & HABEO_CLAVIS) {
2956                 struct ldlm_request *dlm_req;
2957
2958                 LASSERT(h->mh_fmt != NULL);
2959
2960                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2961                 if (dlm_req != NULL) {
2962                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2963                                         LDLM_IBITS &&
2964                                      dlm_req->lock_desc.l_policy_data.\
2965                                         l_inodebits.bits == 0)) {
2966                                 /*
2967                                  * Lock without inodebits makes no sense and
2968                                  * will oops later in ldlm. If client miss to
2969                                  * set such bits, do not trigger ASSERTION.
2970                                  *
2971                                  * For liblustre flock case, it maybe zero.
2972                                  */
2973                                 rc = -EPROTO;
2974                         } else {
2975                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2976                                         rc = mdt_lock_resname_compat(
2977                                                                 info->mti_mdt,
2978                                                                 dlm_req);
2979                                 info->mti_dlm_req = dlm_req;
2980                         }
2981                 } else {
2982                         rc = -EFAULT;
2983                 }
2984         }
2985
2986         /* capability setting changed via /proc, needs reinitialize ctxt */
2987         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2988                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2989                 info->mti_mdt->mdt_capa_conf = 0;
2990         }
2991
2992         if (likely(rc == 0)) {
2993                 /*
2994                  * Process request, there can be two types of rc:
2995                  * 1) errors with msg unpack/pack, other failures outside the
2996                  * operation itself. This is counted as serious errors;
2997                  * 2) errors during fs operation, should be placed in rq_status
2998                  * only
2999                  */
3000                 rc = h->mh_act(info);
3001                 if (rc == 0 &&
3002                     !req->rq_no_reply && req->rq_reply_state == NULL) {
3003                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
3004                                   "pack reply and returned 0 error\n",
3005                                   h->mh_name);
3006                         LBUG();
3007                 }
3008                 serious = is_serious(rc);
3009                 rc = clear_serious(rc);
3010         } else
3011                 serious = 1;
3012
3013         req->rq_status = rc;
3014
3015         /*
3016          * ELDLM_* codes which > 0 should be in rq_status only as well as
3017          * all non-serious errors.
3018          */
3019         if (rc > 0 || !serious)
3020                 rc = 0;
3021
3022         LASSERT(current->journal_info == NULL);
3023
3024         if (rc == 0 && (flags & HABEO_CLAVIS) &&
3025             info->mti_mdt->mdt_opts.mo_compat_resname) {
3026                 struct ldlm_reply *dlmrep;
3027
3028                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3029                 if (dlmrep != NULL)
3030                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3031         }
3032
3033         /* If we're DISCONNECTing, the mdt_export_data is already freed */
3034         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3035                 target_committed_to_req(req);
3036
3037         if (unlikely(req_is_replay(req) &&
3038                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3039                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3040                 LBUG();
3041         }
3042
3043         target_send_reply(req, rc, info->mti_fail_id);
3044         RETURN(0);
3045 }
3046
3047 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3048 {
3049         lh->mlh_type = MDT_NUL_LOCK;
3050         lh->mlh_reg_lh.cookie = 0ull;
3051         lh->mlh_reg_mode = LCK_MINMODE;
3052         lh->mlh_pdo_lh.cookie = 0ull;
3053         lh->mlh_pdo_mode = LCK_MINMODE;
3054         lh->mlh_rreg_lh.cookie = 0ull;
3055         lh->mlh_rreg_mode = LCK_MINMODE;
3056 }
3057
3058 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3059 {
3060         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3061         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3062 }
3063
3064 /*
3065  * Initialize fields of struct mdt_thread_info. Other fields are left in
3066  * uninitialized state, because it's too expensive to zero out whole
3067  * mdt_thread_info (> 1K) on each request arrival.
3068  */
3069 static void mdt_thread_info_init(struct ptlrpc_request *req,
3070                                  struct mdt_thread_info *info)
3071 {
3072         int i;
3073
3074         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3075         info->mti_pill = &req->rq_pill;
3076
3077         /* lock handle */
3078         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3079                 mdt_lock_handle_init(&info->mti_lh[i]);
3080
3081         /* mdt device: it can be NULL while CONNECT */
3082         if (req->rq_export) {
3083                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3084                 info->mti_exp = req->rq_export;
3085         } else
3086                 info->mti_mdt = NULL;
3087         info->mti_env = req->rq_svc_thread->t_env;
3088         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3089         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3090         info->mti_mos = NULL;
3091
3092         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3093         info->mti_big_buf = LU_BUF_NULL;
3094         info->mti_body = NULL;
3095         info->mti_object = NULL;
3096         info->mti_dlm_req = NULL;
3097         info->mti_has_trans = 0;
3098         info->mti_cross_ref = 0;
3099         info->mti_opdata = 0;
3100         info->mti_big_lmm_used = 0;
3101
3102         /* To not check for split by default. */
3103         info->mti_spec.no_create = 0;
3104         info->mti_spec.sp_rm_entry = 0;
3105 }
3106
3107 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3108 {
3109         int i;
3110
3111         req_capsule_fini(info->mti_pill);
3112         if (info->mti_object != NULL) {
3113                 mdt_object_put(info->mti_env, info->mti_object);
3114                 info->mti_object = NULL;
3115         }
3116
3117         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3118                 mdt_lock_handle_fini(&info->mti_lh[i]);
3119         info->mti_env = NULL;
3120
3121         if (unlikely(info->mti_big_buf.lb_buf != NULL))
3122                 lu_buf_free(&info->mti_big_buf);
3123 }
3124
3125 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3126                                        struct obd_device *obd, int *process)
3127 {
3128         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3129         case MDS_CONNECT: /* This will never get here, but for completeness. */
3130         case OST_CONNECT: /* This will never get here, but for completeness. */
3131         case MDS_DISCONNECT:
3132         case OST_DISCONNECT:
3133         case OBD_IDX_READ:
3134                *process = 1;
3135                RETURN(0);
3136
3137         case MDS_CLOSE:
3138         case MDS_DONE_WRITING:
3139         case MDS_SYNC: /* used in unmounting */
3140         case OBD_PING:
3141         case MDS_REINT:
3142         case UPDATE_OBJ:
3143         case SEQ_QUERY:
3144         case FLD_QUERY:
3145         case LDLM_ENQUEUE:
3146                 *process = target_queue_recovery_request(req, obd);
3147                 RETURN(0);
3148
3149         default:
3150                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3151                 *process = -EAGAIN;
3152                 RETURN(0);
3153         }
3154 }
3155
3156 /*
3157  * Handle recovery. Return:
3158  *        +1: continue request processing;
3159  *       -ve: abort immediately with the given error code;
3160  *         0: send reply with error code in req->rq_status;
3161  */
3162 static int mdt_recovery(struct mdt_thread_info *info)
3163 {
3164         struct ptlrpc_request *req = mdt_info_req(info);
3165         struct obd_device *obd;
3166
3167         ENTRY;
3168
3169         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3170         case MDS_CONNECT:
3171         case SEC_CTX_INIT:
3172         case SEC_CTX_INIT_CONT:
3173         case SEC_CTX_FINI:
3174                 {
3175 #if 0
3176                         int rc;
3177
3178                         rc = mdt_handle_idmap(info);
3179                         if (rc)
3180                                 RETURN(rc);
3181                         else
3182 #endif
3183                                 RETURN(+1);
3184                 }
3185         }
3186
3187         if (unlikely(!class_connected_export(req->rq_export))) {
3188                 CDEBUG(D_HA, "operation %d on unconnected MDS from %s\n",
3189                        lustre_msg_get_opc(req->rq_reqmsg),
3190                        libcfs_id2str(req->rq_peer));
3191                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3192                  * mds_A will get -ENOTCONN(especially for ping req),
3193                  * which will cause that mds_A deactive timeout, then when
3194                  * mds_A cleanup, the cleanup process will be suspended since
3195                  * deactive timeout is not zero.
3196                  */
3197                 req->rq_status = -ENOTCONN;
3198                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3199                 RETURN(0);
3200         }
3201
3202         /* sanity check: if the xid matches, the request must be marked as a
3203          * resent or replayed */
3204         if (req_xid_is_last(req)) {
3205                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3206                       (MSG_RESENT | MSG_REPLAY))) {
3207                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3208                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3209                                   lustre_msg_get_flags(req->rq_reqmsg));
3210                         LBUG();
3211                         req->rq_status = -ENOTCONN;
3212                         RETURN(-ENOTCONN);
3213                 }
3214         }
3215
3216         /* else: note the opposite is not always true; a RESENT req after a
3217          * failover will usually not match the last_xid, since it was likely
3218          * never committed. A REPLAYed request will almost never match the
3219          * last xid, however it could for a committed, but still retained,
3220          * open. */
3221
3222         obd = req->rq_export->exp_obd;
3223
3224         /* Check for aborted recovery... */
3225         if (unlikely(obd->obd_recovering)) {
3226                 int rc;
3227                 int should_process;
3228                 DEBUG_REQ(D_INFO, req, "Got new replay");
3229                 rc = mdt_filter_recovery_request(req, obd, &should_process);
3230                 if (rc != 0 || !should_process)
3231                         RETURN(rc);
3232                 else if (should_process < 0) {
3233                         req->rq_status = should_process;
3234                         rc = ptlrpc_error(req);
3235                         RETURN(rc);
3236                 }
3237         }
3238         RETURN(+1);
3239 }
3240
3241 static int mdt_msg_check_version(struct lustre_msg *msg)
3242 {
3243         int rc;
3244
3245         switch (lustre_msg_get_opc(msg)) {
3246         case MDS_CONNECT:
3247         case MDS_DISCONNECT:
3248         case OBD_PING:
3249         case SEC_CTX_INIT:
3250         case SEC_CTX_INIT_CONT:
3251         case SEC_CTX_FINI:
3252         case OBD_IDX_READ:
3253                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
3254                 if (rc)
3255                         CERROR("bad opc %u version %08x, expecting %08x\n",
3256                                lustre_msg_get_opc(msg),
3257                                lustre_msg_get_version(msg),
3258                                LUSTRE_OBD_VERSION);
3259                 break;
3260         case MDS_GETSTATUS:
3261         case MDS_GETATTR:
3262         case MDS_GETATTR_NAME:
3263         case MDS_STATFS:
3264         case MDS_READPAGE:
3265         case MDS_WRITEPAGE:
3266         case MDS_IS_SUBDIR:
3267         case MDS_REINT:
3268         case MDS_CLOSE:
3269         case MDS_DONE_WRITING:
3270         case MDS_PIN:
3271         case MDS_SYNC:
3272         case MDS_GETXATTR:
3273         case MDS_SETXATTR:
3274         case MDS_SET_INFO:
3275         case MDS_GET_INFO:
3276         case MDS_HSM_PROGRESS:
3277         case MDS_HSM_REQUEST:
3278         case MDS_HSM_CT_REGISTER:
3279         case MDS_HSM_CT_UNREGISTER:
3280         case MDS_HSM_STATE_GET:
3281         case MDS_HSM_STATE_SET:
3282         case MDS_HSM_ACTION:
3283         case MDS_QUOTACHECK:
3284         case MDS_QUOTACTL:
3285         case UPDATE_OBJ:
3286         case MDS_SWAP_LAYOUTS:
3287         case QUOTA_DQACQ:
3288         case QUOTA_DQREL:
3289         case SEQ_QUERY:
3290         case FLD_QUERY:
3291                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
3292                 if (rc)
3293                         CERROR("bad opc %u version %08x, expecting %08x\n",
3294                                lustre_msg_get_opc(msg),
3295                                lustre_msg_get_version(msg),
3296                                LUSTRE_MDS_VERSION);
3297                 break;
3298         case LDLM_ENQUEUE:
3299         case LDLM_CONVERT:
3300         case LDLM_BL_CALLBACK:
3301         case LDLM_CP_CALLBACK:
3302                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
3303                 if (rc)
3304                         CERROR("bad opc %u version %08x, expecting %08x\n",
3305                                lustre_msg_get_opc(msg),
3306                                lustre_msg_get_version(msg),
3307                                LUSTRE_DLM_VERSION);
3308                 break;
3309         case OBD_LOG_CANCEL:
3310         case LLOG_ORIGIN_HANDLE_CREATE:
3311         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
3312         case LLOG_ORIGIN_HANDLE_READ_HEADER:
3313         case LLOG_ORIGIN_HANDLE_CLOSE:
3314         case LLOG_ORIGIN_HANDLE_DESTROY:
3315         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
3316         case LLOG_CATINFO:
3317                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
3318                 if (rc)
3319                         CERROR("bad opc %u version %08x, expecting %08x\n",
3320                                lustre_msg_get_opc(msg),
3321                                lustre_msg_get_version(msg),
3322                                LUSTRE_LOG_VERSION);
3323                 break;
3324         default:
3325                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
3326                 rc = -ENOTSUPP;
3327         }
3328         return rc;
3329 }
3330
3331 static int mdt_handle0(struct ptlrpc_request *req,
3332                        struct mdt_thread_info *info,
3333                        struct mdt_opc_slice *supported)
3334 {
3335         struct mdt_handler *h;
3336         struct lustre_msg  *msg;
3337         int                 rc;
3338
3339         ENTRY;
3340
3341         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
3342                 RETURN(0);
3343
3344         LASSERT(current->journal_info == NULL);
3345
3346         msg = req->rq_reqmsg;
3347         rc = mdt_msg_check_version(msg);
3348         if (likely(rc == 0)) {
3349                 rc = mdt_recovery(info);
3350                 if (likely(rc == +1)) {
3351                         h = mdt_handler_find(lustre_msg_get_opc(msg),
3352                                              supported);
3353                         if (likely(h != NULL)) {
3354                                 rc = mdt_req_handle(info, h, req);
3355                         } else {
3356                                 CERROR("The unsupported opc: 0x%x\n",
3357                                        lustre_msg_get_opc(msg) );
3358                                 req->rq_status = -ENOTSUPP;
3359                                 rc = ptlrpc_error(req);
3360                                 RETURN(rc);
3361                         }
3362                 }
3363         } else
3364                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
3365         RETURN(rc);
3366 }
3367
3368 /*
3369  * MDT handler function called by ptlrpc service thread when request comes.
3370  *
3371  * XXX common "target" functionality should be factored into separate module
3372  * shared by mdt, ost and stand-alone services like fld.
3373  */
3374 int mdt_handle_common(struct ptlrpc_request *req,
3375                       struct mdt_opc_slice *supported)
3376 {
3377         struct lu_env          *env;
3378         struct mdt_thread_info *info;
3379         int                     rc;
3380         ENTRY;
3381
3382         env = req->rq_svc_thread->t_env;
3383         /* Refill(initilize) the context(mdt_thread_info), in case it is
3384          * not initialized yet. Usually it happens during start up, after
3385          * MDS(ptlrpc threads) is start up, it gets the first CONNECT request,
3386          * before MDT_thread_info is initialized */
3387         lu_env_refill(env);
3388         LASSERT(env != NULL);
3389         LASSERT(env->le_ses != NULL);
3390         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
3391         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3392         LASSERT(info != NULL);
3393
3394         mdt_thread_info_init(req, info);
3395
3396         rc = mdt_handle0(req, info, supported);
3397
3398         mdt_thread_info_fini(info);
3399         RETURN(rc);
3400 }
3401
3402 /*
3403  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
3404  * as well.
3405  */
3406 int mdt_recovery_handle(struct ptlrpc_request *req)
3407 {
3408         int rc;
3409         ENTRY;
3410
3411         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3412         case FLD_QUERY:
3413                 rc = mdt_handle_common(req, mdt_fld_handlers);
3414                 break;
3415         case SEQ_QUERY:
3416                 rc = mdt_handle_common(req, mdt_seq_handlers);
3417                 break;
3418         default:
3419                 rc = mdt_handle_common(req, mdt_regular_handlers);
3420                 break;
3421         }
3422
3423         RETURN(rc);
3424 }
3425
3426 enum mdt_it_code {
3427         MDT_IT_OPEN,
3428         MDT_IT_OCREAT,
3429         MDT_IT_CREATE,
3430         MDT_IT_GETATTR,
3431         MDT_IT_READDIR,
3432         MDT_IT_LOOKUP,
3433         MDT_IT_UNLINK,
3434         MDT_IT_TRUNC,
3435         MDT_IT_GETXATTR,
3436         MDT_IT_LAYOUT,
3437         MDT_IT_QUOTA,
3438         MDT_IT_NR
3439 };
3440
3441 static int mdt_intent_getattr(enum mdt_it_code opcode,
3442                               struct mdt_thread_info *info,
3443                               struct ldlm_lock **,
3444                               __u64);
3445 static int mdt_intent_layout(enum mdt_it_code opcode,
3446                              struct mdt_thread_info *info,
3447                              struct ldlm_lock **,
3448                              __u64);
3449 static int mdt_intent_reint(enum mdt_it_code opcode,
3450                             struct mdt_thread_info *info,
3451                             struct ldlm_lock **,
3452                             __u64);
3453
3454 static struct mdt_it_flavor {
3455         const struct req_format *it_fmt;
3456         __u32                    it_flags;
3457         int                    (*it_act)(enum mdt_it_code ,
3458                                          struct mdt_thread_info *,
3459                                          struct ldlm_lock **,
3460                                          __u64);
3461         long                     it_reint;
3462 } mdt_it_flavor[] = {
3463         [MDT_IT_OPEN]     = {
3464                 .it_fmt   = &RQF_LDLM_INTENT,
3465                 /*.it_flags = HABEO_REFERO,*/
3466                 .it_flags = 0,
3467                 .it_act   = mdt_intent_reint,
3468                 .it_reint = REINT_OPEN
3469         },
3470         [MDT_IT_OCREAT]   = {
3471                 .it_fmt   = &RQF_LDLM_INTENT,
3472                 .it_flags = MUTABOR,
3473                 .it_act   = mdt_intent_reint,
3474                 .it_reint = REINT_OPEN
3475         },
3476         [MDT_IT_CREATE]   = {
3477                 .it_fmt   = &RQF_LDLM_INTENT,
3478                 .it_flags = MUTABOR,
3479                 .it_act   = mdt_intent_reint,
3480                 .it_reint = REINT_CREATE
3481         },
3482         [MDT_IT_GETATTR]  = {
3483                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3484                 .it_flags = HABEO_REFERO,
3485                 .it_act   = mdt_intent_getattr
3486         },
3487         [MDT_IT_READDIR]  = {
3488                 .it_fmt   = NULL,
3489                 .it_flags = 0,
3490                 .it_act   = NULL
3491         },
3492         [MDT_IT_LOOKUP]   = {
3493                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3494                 .it_flags = HABEO_REFERO,
3495                 .it_act   = mdt_intent_getattr
3496         },
3497         [MDT_IT_UNLINK]   = {
3498                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3499                 .it_flags = MUTABOR,
3500                 .it_act   = NULL,
3501                 .it_reint = REINT_UNLINK
3502         },
3503         [MDT_IT_TRUNC]    = {
3504                 .it_fmt   = NULL,
3505                 .it_flags = MUTABOR,
3506                 .it_act   = NULL
3507         },
3508         [MDT_IT_GETXATTR] = {
3509                 .it_fmt   = NULL,
3510                 .it_flags = 0,
3511                 .it_act   = NULL
3512         },
3513         [MDT_IT_LAYOUT] = {
3514                 .it_fmt   = &RQF_LDLM_INTENT_LAYOUT,
3515                 .it_flags = 0,
3516                 .it_act   = mdt_intent_layout
3517         }
3518 };
3519
3520 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3521                             struct ldlm_lock **lockp,
3522                             struct ldlm_lock *new_lock,
3523                             struct mdt_lock_handle *lh,
3524                             __u64 flags)
3525 {
3526         struct ptlrpc_request  *req = mdt_info_req(info);
3527         struct ldlm_lock       *lock = *lockp;
3528
3529         /*
3530          * Get new lock only for cases when possible resent did not find any
3531          * lock.
3532          */
3533         if (new_lock == NULL)
3534                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3535
3536         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3537                 lh->mlh_reg_lh.cookie = 0;
3538                 RETURN(0);
3539         }
3540
3541         LASSERTF(new_lock != NULL,
3542                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3543
3544         /*
3545          * If we've already given this lock to a client once, then we should
3546          * have no readers or writers.  Otherwise, we should have one reader
3547          * _or_ writer ref (which will be zeroed below) before returning the
3548          * lock to a client.
3549          */
3550         if (new_lock->l_export == req->rq_export) {
3551                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3552         } else {
3553                 LASSERT(new_lock->l_export == NULL);
3554                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3555         }
3556
3557         *lockp = new_lock;
3558
3559         if (new_lock->l_export == req->rq_export) {
3560                 /*
3561                  * Already gave this to the client, which means that we
3562                  * reconstructed a reply.
3563                  */
3564                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3565                         MSG_RESENT);
3566                 lh->mlh_reg_lh.cookie = 0;
3567                 RETURN(ELDLM_LOCK_REPLACED);
3568         }
3569
3570         /*
3571          * Fixup the lock to be given to the client.
3572          */
3573         lock_res_and_lock(new_lock);
3574         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3575          * possible blocking AST. */
3576         while (new_lock->l_readers > 0) {
3577                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3578                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3579                 new_lock->l_readers--;
3580         }
3581         while (new_lock->l_writers > 0) {
3582                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3583                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3584                 new_lock->l_writers--;
3585         }
3586
3587         new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3588         new_lock->l_blocking_ast = lock->l_blocking_ast;
3589         new_lock->l_completion_ast = lock->l_completion_ast;
3590         new_lock->l_remote_handle = lock->l_remote_handle;
3591         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3592
3593         unlock_res_and_lock(new_lock);
3594
3595         cfs_hash_add(new_lock->l_export->exp_lock_hash,
3596                      &new_lock->l_remote_handle,
3597                      &new_lock->l_exp_hash);
3598
3599         LDLM_LOCK_RELEASE(new_lock);
3600         lh->mlh_reg_lh.cookie = 0;
3601
3602         RETURN(ELDLM_LOCK_REPLACED);
3603 }
3604
3605 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3606                                     struct ldlm_lock *new_lock,
3607                                     struct ldlm_lock **old_lock,
3608                                     struct mdt_lock_handle *lh)
3609 {
3610         struct ptlrpc_request  *req = mdt_info_req(info);
3611         struct obd_export      *exp = req->rq_export;
3612         struct lustre_handle    remote_hdl;
3613         struct ldlm_request    *dlmreq;
3614         struct ldlm_lock       *lock;
3615
3616         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3617                 return;
3618
3619         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3620         remote_hdl = dlmreq->lock_handle[0];
3621
3622         /* In the function below, .hs_keycmp resolves to
3623          * ldlm_export_lock_keycmp() */
3624         /* coverity[overrun-buffer-val] */
3625         lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3626         if (lock) {
3627                 if (lock != new_lock) {
3628                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3629                         lh->mlh_reg_mode = lock->l_granted_mode;
3630
3631                         LDLM_DEBUG(lock, "Restoring lock cookie");
3632                         DEBUG_REQ(D_DLMTRACE, req,
3633                                   "restoring lock cookie "LPX64,
3634                                   lh->mlh_reg_lh.cookie);
3635                         if (old_lock)
3636                                 *old_lock = LDLM_LOCK_GET(lock);
3637                         cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3638                         return;
3639                 }
3640
3641                 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3642         }
3643
3644         /*
3645          * If the xid matches, then we know this is a resent request, and allow
3646          * it. (It's probably an OPEN, for which we don't send a lock.
3647          */
3648         if (req_xid_is_last(req))
3649                 return;
3650
3651         /*
3652          * This remote handle isn't enqueued, so we never received or processed
3653          * this request.  Clear MSG_RESENT, because it can be handled like any
3654          * normal request now.
3655          */
3656         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3657
3658         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3659                   remote_hdl.cookie);
3660 }
3661
3662 static int mdt_intent_getattr(enum mdt_it_code opcode,
3663                               struct mdt_thread_info *info,
3664                               struct ldlm_lock **lockp,
3665                               __u64 flags)
3666 {
3667         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3668         struct ldlm_lock       *new_lock = NULL;
3669         __u64                   child_bits;
3670         struct ldlm_reply      *ldlm_rep;
3671         struct ptlrpc_request  *req;
3672         struct mdt_body        *reqbody;
3673         struct mdt_body        *repbody;
3674         int                     rc, rc2;
3675         ENTRY;
3676
3677         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
3678         LASSERT(reqbody);
3679
3680         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
3681         LASSERT(repbody);
3682
3683         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
3684         repbody->eadatasize = 0;
3685         repbody->aclsize = 0;
3686
3687         switch (opcode) {
3688         case MDT_IT_LOOKUP:
3689                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM;
3690                 break;
3691         case MDT_IT_GETATTR:
3692                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
3693                              MDS_INODELOCK_PERM;
3694                 break;
3695         default:
3696                 CERROR("Unsupported intent (%d)\n", opcode);
3697                 GOTO(out_shrink, rc = -EINVAL);
3698         }
3699
3700         rc = mdt_init_ucred(info, reqbody);
3701         if (rc)
3702                 GOTO(out_shrink, rc);
3703
3704         req = info->mti_pill->rc_req;
3705         ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3706         mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD);
3707
3708         /* Get lock from request for possible resent case. */
3709         mdt_intent_fixup_resent(info, *lockp, &new_lock, lhc);
3710
3711         rc = mdt_getattr_name_lock(info, lhc, child_bits, ldlm_rep);
3712         ldlm_rep->lock_policy_res2 = clear_serious(rc);
3713
3714         if (mdt_get_disposition(ldlm_rep, DISP_LOOKUP_NEG))
3715                 ldlm_rep->lock_policy_res2 = 0;
3716         if (!mdt_get_disposition(ldlm_rep, DISP_LOOKUP_POS) ||
3717             ldlm_rep->lock_policy_res2) {
3718                 lhc->mlh_reg_lh.cookie = 0ull;
3719                 GOTO(out_ucred, rc = ELDLM_LOCK_ABORTED);
3720         }
3721
3722         rc = mdt_intent_lock_replace(info, lockp, new_lock, lhc, flags);
3723         EXIT;
3724 out_ucred:
3725         mdt_exit_ucred(info);
3726 out_shrink:
3727         mdt_client_compatibility(info);
3728    &nb