Whamcloud - gitweb
LU-2783 ofd: Implement OBD_IOC_GET_OBJ_VERSION
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71 #include <lustre_linkea.h>
72
73 mdl_mode_t mdt_mdl_lock_modes[] = {
74         [LCK_MINMODE] = MDL_MINMODE,
75         [LCK_EX]      = MDL_EX,
76         [LCK_PW]      = MDL_PW,
77         [LCK_PR]      = MDL_PR,
78         [LCK_CW]      = MDL_CW,
79         [LCK_CR]      = MDL_CR,
80         [LCK_NL]      = MDL_NL,
81         [LCK_GROUP]   = MDL_GROUP
82 };
83
84 ldlm_mode_t mdt_dlm_lock_modes[] = {
85         [MDL_MINMODE] = LCK_MINMODE,
86         [MDL_EX]      = LCK_EX,
87         [MDL_PW]      = LCK_PW,
88         [MDL_PR]      = LCK_PR,
89         [MDL_CW]      = LCK_CW,
90         [MDL_CR]      = LCK_CR,
91         [MDL_NL]      = LCK_NL,
92         [MDL_GROUP]   = LCK_GROUP
93 };
94
95 static struct mdt_device *mdt_dev(struct lu_device *d);
96 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
97
98 static const struct lu_object_operations mdt_obj_ops;
99
100 /* Slab for MDT object allocation */
101 static cfs_mem_cache_t *mdt_object_kmem;
102
103 static struct lu_kmem_descr mdt_caches[] = {
104         {
105                 .ckd_cache = &mdt_object_kmem,
106                 .ckd_name  = "mdt_obj",
107                 .ckd_size  = sizeof(struct mdt_object)
108         },
109         {
110                 .ckd_cache = NULL
111         }
112 };
113
114 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
115 {
116         if (!rep)
117                 return 0;
118         return (rep->lock_policy_res1 & flag);
119 }
120
121 void mdt_clear_disposition(struct mdt_thread_info *info,
122                            struct ldlm_reply *rep, int flag)
123 {
124         if (info)
125                 info->mti_opdata &= ~flag;
126         if (rep)
127                 rep->lock_policy_res1 &= ~flag;
128 }
129
130 void mdt_set_disposition(struct mdt_thread_info *info,
131                          struct ldlm_reply *rep, int flag)
132 {
133         if (info)
134                 info->mti_opdata |= flag;
135         if (rep)
136                 rep->lock_policy_res1 |= flag;
137 }
138
139 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
140 {
141         lh->mlh_pdo_hash = 0;
142         lh->mlh_reg_mode = lm;
143         lh->mlh_rreg_mode = lm;
144         lh->mlh_type = MDT_REG_LOCK;
145 }
146
147 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
148                        const char *name, int namelen)
149 {
150         lh->mlh_reg_mode = lm;
151         lh->mlh_rreg_mode = lm;
152         lh->mlh_type = MDT_PDO_LOCK;
153
154         if (name != NULL && (name[0] != '\0')) {
155                 LASSERT(namelen > 0);
156                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
157         } else {
158                 LASSERT(namelen == 0);
159                 lh->mlh_pdo_hash = 0ull;
160         }
161 }
162
163 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
164                               struct mdt_lock_handle *lh)
165 {
166         mdl_mode_t mode;
167         ENTRY;
168
169         /*
170          * Any dir access needs couple of locks:
171          *
172          * 1) on part of dir we gonna take lookup/modify;
173          *
174          * 2) on whole dir to protect it from concurrent splitting and/or to
175          * flush client's cache for readdir().
176          *
177          * so, for a given mode and object this routine decides what lock mode
178          * to use for lock #2:
179          *
180          * 1) if caller's gonna lookup in dir then we need to protect dir from
181          * being splitted only - LCK_CR
182          *
183          * 2) if caller's gonna modify dir then we need to protect dir from
184          * being splitted and to flush cache - LCK_CW
185          *
186          * 3) if caller's gonna modify dir and that dir seems ready for
187          * splitting then we need to protect it from any type of access
188          * (lookup/modify/split) - LCK_EX --bzzz
189          */
190
191         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
192         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
193
194         /*
195          * Ask underlaying level its opinion about preferable PDO lock mode
196          * having access type passed as regular lock mode:
197          *
198          * - MDL_MINMODE means that lower layer does not want to specify lock
199          * mode;
200          *
201          * - MDL_NL means that no PDO lock should be taken. This is used in some
202          * cases. Say, for non-splittable directories no need to use PDO locks
203          * at all.
204          */
205         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
206                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
207
208         if (mode != MDL_MINMODE) {
209                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
210         } else {
211                 /*
212                  * Lower layer does not want to specify locking mode. We do it
213                  * our selves. No special protection is needed, just flush
214                  * client's cache on modification and allow concurrent
215                  * mondification.
216                  */
217                 switch (lh->mlh_reg_mode) {
218                 case LCK_EX:
219                         lh->mlh_pdo_mode = LCK_EX;
220                         break;
221                 case LCK_PR:
222                         lh->mlh_pdo_mode = LCK_CR;
223                         break;
224                 case LCK_PW:
225                         lh->mlh_pdo_mode = LCK_CW;
226                         break;
227                 default:
228                         CERROR("Not expected lock type (0x%x)\n",
229                                (int)lh->mlh_reg_mode);
230                         LBUG();
231                 }
232         }
233
234         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
235         EXIT;
236 }
237
238 int mdt_getstatus(struct mdt_thread_info *info)
239 {
240         struct mdt_device       *mdt  = info->mti_mdt;
241         struct mdt_body         *repbody;
242         int                     rc;
243         ENTRY;
244
245         rc = mdt_check_ucred(info);
246         if (rc)
247                 RETURN(err_serious(rc));
248
249         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
250                 RETURN(err_serious(-ENOMEM));
251
252         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
253         repbody->fid1 = mdt->mdt_md_root_fid;
254         repbody->valid |= OBD_MD_FLID;
255
256         if (mdt->mdt_opts.mo_mds_capa &&
257             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
258                 struct mdt_object  *root;
259                 struct lustre_capa *capa;
260
261                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
262                 if (IS_ERR(root))
263                         RETURN(PTR_ERR(root));
264
265                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
266                 LASSERT(capa);
267                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
268                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
269                                  0);
270                 mdt_object_put(info->mti_env, root);
271                 if (rc == 0)
272                         repbody->valid |= OBD_MD_FLMDSCAPA;
273         }
274
275         RETURN(rc);
276 }
277
278 int mdt_statfs(struct mdt_thread_info *info)
279 {
280         struct ptlrpc_request           *req = mdt_info_req(info);
281         struct md_device                *next = info->mti_mdt->mdt_child;
282         struct ptlrpc_service_part      *svcpt;
283         struct obd_statfs               *osfs;
284         int                             rc;
285
286         ENTRY;
287
288         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
289
290         /* This will trigger a watchdog timeout */
291         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
292                          (MDT_SERVICE_WATCHDOG_FACTOR *
293                           at_get(&svcpt->scp_at_estimate)) + 1);
294
295         rc = mdt_check_ucred(info);
296         if (rc)
297                 RETURN(err_serious(rc));
298
299         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
300                 RETURN(err_serious(-ENOMEM));
301
302         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
303         if (!osfs)
304                 RETURN(-EPROTO);
305
306         /** statfs information are cached in the mdt_device */
307         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
308                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
309                 /** statfs data is too old, get up-to-date one */
310                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
311                 if (rc)
312                         RETURN(rc);
313                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
314                 info->mti_mdt->mdt_osfs = *osfs;
315                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
316                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
317         } else {
318                 /** use cached statfs data */
319                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
320                 *osfs = info->mti_mdt->mdt_osfs;
321                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
322         }
323
324         if (rc == 0)
325                 mdt_counter_incr(req, LPROC_MDT_STATFS);
326
327         RETURN(rc);
328 }
329
330 /**
331  * Pack SOM attributes into the reply.
332  * Call under a DLM UPDATE lock.
333  */
334 static void mdt_pack_size2body(struct mdt_thread_info *info,
335                                struct mdt_object *mo)
336 {
337         struct mdt_body *b;
338         struct md_attr *ma = &info->mti_attr;
339
340         LASSERT(ma->ma_attr.la_valid & LA_MODE);
341         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
342
343         /* Check if Size-on-MDS is supported, if this is a regular file,
344          * if SOM is enabled on the object and if SOM cache exists and valid.
345          * Otherwise do not pack Size-on-MDS attributes to the reply. */
346         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
347             !S_ISREG(ma->ma_attr.la_mode) ||
348             !mdt_object_is_som_enabled(mo) ||
349             !(ma->ma_valid & MA_SOM))
350                 return;
351
352         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
353         b->size = ma->ma_som->msd_size;
354         b->blocks = ma->ma_som->msd_blocks;
355 }
356
357 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
358                         const struct lu_attr *attr, const struct lu_fid *fid)
359 {
360         struct md_attr *ma = &info->mti_attr;
361
362         LASSERT(ma->ma_valid & MA_INODE);
363
364         b->atime      = attr->la_atime;
365         b->mtime      = attr->la_mtime;
366         b->ctime      = attr->la_ctime;
367         b->mode       = attr->la_mode;
368         b->size       = attr->la_size;
369         b->blocks     = attr->la_blocks;
370         b->uid        = attr->la_uid;
371         b->gid        = attr->la_gid;
372         b->flags      = attr->la_flags;
373         b->nlink      = attr->la_nlink;
374         b->rdev       = attr->la_rdev;
375
376         /*XXX should pack the reply body according to lu_valid*/
377         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
378                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
379                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
380                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
381
382         if (!S_ISREG(attr->la_mode)) {
383                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
384         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
385                 /* means no objects are allocated on osts. */
386                 LASSERT(!(ma->ma_valid & MA_LOV));
387                 /* just ignore blocks occupied by extend attributes on MDS */
388                 b->blocks = 0;
389                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
390                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
391         }
392
393         if (fid) {
394                 b->fid1 = *fid;
395                 b->valid |= OBD_MD_FLID;
396                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
397                                 PFID(fid), b->nlink, b->mode, b->size);
398         }
399
400         if (info)
401                 mdt_body_reverse_idmap(info, b);
402
403         if (b->valid & OBD_MD_FLSIZE)
404                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
405                        PFID(fid), (unsigned long long)b->size);
406 }
407
408 static inline int mdt_body_has_lov(const struct lu_attr *la,
409                                    const struct mdt_body *body)
410 {
411         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
412                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
413 }
414
415 void mdt_client_compatibility(struct mdt_thread_info *info)
416 {
417         struct mdt_body       *body;
418         struct ptlrpc_request *req = mdt_info_req(info);
419         struct obd_export     *exp = req->rq_export;
420         struct md_attr        *ma = &info->mti_attr;
421         struct lu_attr        *la = &ma->ma_attr;
422         ENTRY;
423
424         if (exp_connect_layout(exp))
425                 /* the client can deal with 16-bit lmm_stripe_count */
426                 RETURN_EXIT;
427
428         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
429
430         if (!mdt_body_has_lov(la, body))
431                 RETURN_EXIT;
432
433         /* now we have a reply with a lov for a client not compatible with the
434          * layout lock so we have to clean the layout generation number */
435         if (S_ISREG(la->la_mode))
436                 ma->ma_lmm->lmm_layout_gen = 0;
437         EXIT;
438 }
439
440 static int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
441                              char *name)
442 {
443         const struct lu_env *env = info->mti_env;
444         int rc;
445         ENTRY;
446
447         LASSERT(info->mti_big_lmm_used == 0);
448         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
449         if (rc < 0)
450                 RETURN(rc);
451
452         /* big_lmm may need to be grown */
453         if (info->mti_big_lmmsize < rc) {
454                 int size = size_roundup_power2(rc);
455
456                 if (info->mti_big_lmmsize > 0) {
457                         /* free old buffer */
458                         LASSERT(info->mti_big_lmm);
459                         OBD_FREE_LARGE(info->mti_big_lmm,
460                                        info->mti_big_lmmsize);
461                         info->mti_big_lmm = NULL;
462                         info->mti_big_lmmsize = 0;
463                 }
464
465                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
466                 if (info->mti_big_lmm == NULL)
467                         RETURN(-ENOMEM);
468                 info->mti_big_lmmsize = size;
469         }
470         LASSERT(info->mti_big_lmmsize >= rc);
471
472         info->mti_buf.lb_buf = info->mti_big_lmm;
473         info->mti_buf.lb_len = info->mti_big_lmmsize;
474         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
475
476         RETURN(rc);
477 }
478
479 int mdt_attr_get_lov(struct mdt_thread_info *info,
480                      struct mdt_object *o, struct md_attr *ma)
481 {
482         struct md_object *next = mdt_object_child(o);
483         struct lu_buf    *buf = &info->mti_buf;
484         int rc;
485
486         buf->lb_buf = ma->ma_lmm;
487         buf->lb_len = ma->ma_lmm_size;
488         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
489         if (rc > 0) {
490                 ma->ma_lmm_size = rc;
491                 ma->ma_valid |= MA_LOV;
492                 rc = 0;
493         } else if (rc == -ENODATA) {
494                 /* no LOV EA */
495                 rc = 0;
496         } else if (rc == -ERANGE) {
497                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LOV);
498                 if (rc > 0) {
499                         info->mti_big_lmm_used = 1;
500                         ma->ma_valid |= MA_LOV;
501                         ma->ma_lmm = info->mti_big_lmm;
502                         ma->ma_lmm_size = rc;
503                         /* update mdt_max_mdsize so all clients
504                          * will be aware about that */
505                         if (info->mti_mdt->mdt_max_mdsize < rc)
506                                 info->mti_mdt->mdt_max_mdsize = rc;
507                         rc = 0;
508                 }
509         }
510
511         return rc;
512 }
513
514 int mdt_attr_get_pfid(struct mdt_thread_info *info,
515                       struct mdt_object *o, struct lu_fid *pfid)
516 {
517         struct lu_buf           *buf = &info->mti_buf;
518         struct link_ea_header   *leh;
519         struct link_ea_entry    *lee;
520         int                      rc;
521         ENTRY;
522
523         buf->lb_buf = info->mti_big_lmm;
524         buf->lb_len = info->mti_big_lmmsize;
525         rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
526                           buf, XATTR_NAME_LINK);
527         /* ignore errors, MA_PFID won't be set and it is
528          * up to the caller to treat this as an error */
529         if (rc == -ERANGE || buf->lb_len == 0) {
530                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
531                 buf->lb_buf = info->mti_big_lmm;
532                 buf->lb_len = info->mti_big_lmmsize;
533         }
534
535         if (rc < 0)
536                 RETURN(rc);
537         if (rc < sizeof(*leh)) {
538                 CERROR("short LinkEA on "DFID": rc = %d\n",
539                        PFID(mdt_object_fid(o)), rc);
540                 RETURN(-ENODATA);
541         }
542
543         leh = (struct link_ea_header *) buf->lb_buf;
544         lee = (struct link_ea_entry *)(leh + 1);
545         if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
546                 leh->leh_magic = LINK_EA_MAGIC;
547                 leh->leh_reccount = __swab32(leh->leh_reccount);
548                 leh->leh_len = __swab64(leh->leh_len);
549         }
550         if (leh->leh_magic != LINK_EA_MAGIC)
551                 RETURN(-EINVAL);
552         if (leh->leh_reccount == 0)
553                 RETURN(-ENODATA);
554
555         memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
556         fid_be_to_cpu(pfid, pfid);
557
558         RETURN(0);
559 }
560
561 int mdt_attr_get_complex(struct mdt_thread_info *info,
562                          struct mdt_object *o, struct md_attr *ma)
563 {
564         const struct lu_env *env = info->mti_env;
565         struct md_object    *next = mdt_object_child(o);
566         struct lu_buf       *buf = &info->mti_buf;
567         u32                  mode = lu_object_attr(&next->mo_lu);
568         int                  need = ma->ma_need;
569         int                  rc = 0, rc2;
570         ENTRY;
571
572         ma->ma_valid = 0;
573
574         if (need & MA_INODE) {
575                 ma->ma_need = MA_INODE;
576                 rc = mo_attr_get(env, next, ma);
577                 if (rc)
578                         GOTO(out, rc);
579                 ma->ma_valid |= MA_INODE;
580         }
581
582         if (need & MA_PFID) {
583                 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
584                 if (rc == 0)
585                         ma->ma_valid |= MA_PFID;
586                 /* ignore this error, parent fid is not mandatory */
587                 rc = 0;
588         }
589
590         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
591                 rc = mdt_attr_get_lov(info, o, ma);
592                 if (rc)
593                         GOTO(out, rc);
594         }
595
596         if (need & MA_LMV && S_ISDIR(mode)) {
597                 buf->lb_buf = ma->ma_lmv;
598                 buf->lb_len = ma->ma_lmv_size;
599                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
600                 if (rc2 > 0) {
601                         ma->ma_lmv_size = rc2;
602                         ma->ma_valid |= MA_LMV;
603                 } else if (rc2 == -ENODATA) {
604                         /* no LMV EA */
605                         ma->ma_lmv_size = 0;
606                 } else
607                         GOTO(out, rc = rc2);
608         }
609
610         if (need & MA_SOM && S_ISREG(mode)) {
611                 buf->lb_buf = info->mti_xattr_buf;
612                 buf->lb_len = sizeof(info->mti_xattr_buf);
613                 CLASSERT(sizeof(struct som_attrs) <=
614                          sizeof(info->mti_xattr_buf));
615                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_SOM);
616                 rc2 = lustre_buf2som(info->mti_xattr_buf, rc2, ma->ma_som);
617                 if (rc2 == 0)
618                         ma->ma_valid |= MA_SOM;
619                 else if (rc2 < 0 && rc2 != -ENODATA)
620                         GOTO(out, rc = rc2);
621         }
622
623         if (need & MA_HSM && S_ISREG(mode)) {
624                 buf->lb_buf = info->mti_xattr_buf;
625                 buf->lb_len = sizeof(info->mti_xattr_buf);
626                 CLASSERT(sizeof(struct hsm_attrs) <=
627                          sizeof(info->mti_xattr_buf));
628                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM);
629                 rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm);
630                 if (rc2 == 0)
631                         ma->ma_valid |= MA_HSM;
632                 else if (rc2 < 0 && rc2 != -ENODATA)
633                         GOTO(out, rc = rc2);
634         }
635
636 #ifdef CONFIG_FS_POSIX_ACL
637         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
638                 buf->lb_buf = ma->ma_acl;
639                 buf->lb_len = ma->ma_acl_size;
640                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
641                 if (rc2 > 0) {
642                         ma->ma_acl_size = rc2;
643                         ma->ma_valid |= MA_ACL_DEF;
644                 } else if (rc2 == -ENODATA) {
645                         /* no ACLs */
646                         ma->ma_acl_size = 0;
647                 } else
648                         GOTO(out, rc = rc2);
649         }
650 #endif
651 out:
652         ma->ma_need = need;
653         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
654                rc, ma->ma_valid, ma->ma_lmm);
655         RETURN(rc);
656 }
657
658 static int mdt_getattr_internal(struct mdt_thread_info *info,
659                                 struct mdt_object *o, int ma_need)
660 {
661         struct md_object        *next = mdt_object_child(o);
662         const struct mdt_body   *reqbody = info->mti_body;
663         struct ptlrpc_request   *req = mdt_info_req(info);
664         struct md_attr          *ma = &info->mti_attr;
665         struct lu_attr          *la = &ma->ma_attr;
666         struct req_capsule      *pill = info->mti_pill;
667         const struct lu_env     *env = info->mti_env;
668         struct mdt_body         *repbody;
669         struct lu_buf           *buffer = &info->mti_buf;
670         int                     rc;
671         int                     is_root;
672         ENTRY;
673
674         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
675                 RETURN(err_serious(-ENOMEM));
676
677         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
678
679         ma->ma_valid = 0;
680
681         if (mdt_object_remote(o)) {
682                 /* This object is located on remote node.*/
683                 /* Return -EIO for old client */
684                 if (!mdt_is_dne_client(req->rq_export))
685                         GOTO(out, rc = -EIO);
686
687                 repbody->fid1 = *mdt_object_fid(o);
688                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
689                 GOTO(out, rc = 0);
690         }
691
692         buffer->lb_len = reqbody->eadatasize;
693         if (buffer->lb_len > 0)
694                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
695         else
696                 buffer->lb_buf = NULL;
697
698         /* If it is dir object and client require MEA, then we got MEA */
699         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
700             reqbody->valid & OBD_MD_MEA) {
701                 /* Assumption: MDT_MD size is enough for lmv size. */
702                 ma->ma_lmv = buffer->lb_buf;
703                 ma->ma_lmv_size = buffer->lb_len;
704                 ma->ma_need = MA_LMV | MA_INODE;
705         } else {
706                 ma->ma_lmm = buffer->lb_buf;
707                 ma->ma_lmm_size = buffer->lb_len;
708                 ma->ma_need = MA_LOV | MA_INODE | MA_HSM;
709         }
710
711         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
712             reqbody->valid & OBD_MD_FLDIREA  &&
713             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
714                 /* get default stripe info for this dir. */
715                 ma->ma_need |= MA_LOV_DEF;
716         }
717         ma->ma_need |= ma_need;
718         if (ma->ma_need & MA_SOM)
719                 ma->ma_som = &info->mti_u.som.data;
720
721         rc = mdt_attr_get_complex(info, o, ma);
722         if (unlikely(rc)) {
723                 CERROR("getattr error for "DFID": %d\n",
724                         PFID(mdt_object_fid(o)), rc);
725                 RETURN(rc);
726         }
727
728         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
729
730         /* the Lustre protocol supposes to return default striping
731          * on the user-visible root if explicitly requested */
732         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
733             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
734                 struct lu_fid      rootfid;
735                 struct mdt_object *root;
736                 struct mdt_device *mdt = info->mti_mdt;
737
738                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
739                 if (rc)
740                         RETURN(rc);
741                 root = mdt_object_find(env, mdt, &rootfid);
742                 if (IS_ERR(root))
743                         RETURN(PTR_ERR(root));
744                 rc = mdt_attr_get_lov(info, root, ma);
745                 mdt_object_put(info->mti_env, root);
746                 if (unlikely(rc)) {
747                         CERROR("getattr error for "DFID": %d\n",
748                                         PFID(mdt_object_fid(o)), rc);
749                         RETURN(rc);
750                 }
751         }
752
753         if (likely(ma->ma_valid & MA_INODE))
754                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
755         else
756                 RETURN(-EFAULT);
757
758         if (mdt_body_has_lov(la, reqbody)) {
759                 if (ma->ma_valid & MA_LOV) {
760                         LASSERT(ma->ma_lmm_size);
761                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
762                         repbody->eadatasize = ma->ma_lmm_size;
763                         if (S_ISDIR(la->la_mode))
764                                 repbody->valid |= OBD_MD_FLDIREA;
765                         else
766                                 repbody->valid |= OBD_MD_FLEASIZE;
767                 }
768                 if (ma->ma_valid & MA_LMV) {
769                         LASSERT(S_ISDIR(la->la_mode));
770                         repbody->eadatasize = ma->ma_lmv_size;
771                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
772                 }
773         } else if (S_ISLNK(la->la_mode) &&
774                    reqbody->valid & OBD_MD_LINKNAME) {
775                 buffer->lb_buf = ma->ma_lmm;
776                 /* eadatasize from client includes NULL-terminator, so
777                  * there is no need to read it */
778                 buffer->lb_len = reqbody->eadatasize - 1;
779                 rc = mo_readlink(env, next, buffer);
780                 if (unlikely(rc <= 0)) {
781                         CERROR("readlink failed: %d\n", rc);
782                         rc = -EFAULT;
783                 } else {
784                         int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
785
786                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
787                                 rc -= 2;
788                         repbody->valid |= OBD_MD_LINKNAME;
789                         /* we need to report back size with NULL-terminator
790                          * because client expects that */
791                         repbody->eadatasize = rc + 1;
792                         if (repbody->eadatasize != reqbody->eadatasize)
793                                 CERROR("Read shorter symlink %d, expected %d\n",
794                                        rc, reqbody->eadatasize - 1);
795                         /* NULL terminate */
796                         ((char *)ma->ma_lmm)[rc] = 0;
797
798                         /* If the total CDEBUG() size is larger than a page, it
799                          * will print a warning to the console, avoid this by
800                          * printing just the last part of the symlink. */
801                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
802                                print_limit < rc ? "..." : "", print_limit,
803                                (char *)ma->ma_lmm + rc - print_limit, rc);
804                         rc = 0;
805                 }
806         }
807
808         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
809                 repbody->max_cookiesize = 0;
810                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
811                 repbody->valid |= OBD_MD_FLMODEASIZE;
812                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
813                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
814                        repbody->max_cookiesize);
815         }
816
817         if (exp_connect_rmtclient(info->mti_exp) &&
818             reqbody->valid & OBD_MD_FLRMTPERM) {
819                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
820
821                 /* mdt_getattr_lock only */
822                 rc = mdt_pack_remote_perm(info, o, buf);
823                 if (rc) {
824                         repbody->valid &= ~OBD_MD_FLRMTPERM;
825                         repbody->aclsize = 0;
826                         RETURN(rc);
827                 } else {
828                         repbody->valid |= OBD_MD_FLRMTPERM;
829                         repbody->aclsize = sizeof(struct mdt_remote_perm);
830                 }
831         }
832 #ifdef CONFIG_FS_POSIX_ACL
833         else if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
834                  (reqbody->valid & OBD_MD_FLACL)) {
835                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
836                 buffer->lb_len = req_capsule_get_size(pill,
837                                                       &RMF_ACL, RCL_SERVER);
838                 if (buffer->lb_len > 0) {
839                         rc = mo_xattr_get(env, next, buffer,
840                                           XATTR_NAME_ACL_ACCESS);
841                         if (rc < 0) {
842                                 if (rc == -ENODATA) {
843                                         repbody->aclsize = 0;
844                                         repbody->valid |= OBD_MD_FLACL;
845                                         rc = 0;
846                                 } else if (rc == -EOPNOTSUPP) {
847                                         rc = 0;
848                                 } else {
849                                         CERROR("got acl size: %d\n", rc);
850                                 }
851                         } else {
852                                 repbody->aclsize = rc;
853                                 repbody->valid |= OBD_MD_FLACL;
854                                 rc = 0;
855                         }
856                 }
857         }
858 #endif
859
860         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
861             info->mti_mdt->mdt_opts.mo_mds_capa &&
862             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
863                 struct lustre_capa *capa;
864
865                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
866                 LASSERT(capa);
867                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
868                 rc = mo_capa_get(env, next, capa, 0);
869                 if (rc)
870                         RETURN(rc);
871                 repbody->valid |= OBD_MD_FLMDSCAPA;
872         }
873
874 out:
875         if (rc == 0)
876                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
877
878         RETURN(rc);
879 }
880
881 static int mdt_renew_capa(struct mdt_thread_info *info)
882 {
883         struct mdt_object  *obj = info->mti_object;
884         struct mdt_body    *body;
885         struct lustre_capa *capa, *c;
886         int rc;
887         ENTRY;
888
889         /* if object doesn't exist, or server has disabled capability,
890          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
891          * flag not set.
892          */
893         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
894             !(exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA))
895                 RETURN(0);
896
897         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
898         LASSERT(body != NULL);
899
900         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
901         LASSERT(c);
902
903         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
904         LASSERT(capa);
905
906         *capa = *c;
907         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
908         if (rc == 0)
909                 body->valid |= OBD_MD_FLOSSCAPA;
910         RETURN(rc);
911 }
912
913 int mdt_getattr(struct mdt_thread_info *info)
914 {
915         struct mdt_object       *obj = info->mti_object;
916         struct req_capsule      *pill = info->mti_pill;
917         struct mdt_body         *reqbody;
918         struct mdt_body         *repbody;
919         mode_t                   mode;
920         int rc, rc2;
921         ENTRY;
922
923         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
924         LASSERT(reqbody);
925
926         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
927                 rc = req_capsule_server_pack(pill);
928                 if (unlikely(rc))
929                         RETURN(err_serious(rc));
930                 rc = mdt_renew_capa(info);
931                 GOTO(out_shrink, rc);
932         }
933
934         LASSERT(obj != NULL);
935         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
936
937         mode = lu_object_attr(&obj->mot_obj.mo_lu);
938
939         /* old clients may not report needed easize, use max value then */
940         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
941                              reqbody->eadatasize == 0 ?
942                              info->mti_mdt->mdt_max_mdsize :
943                              reqbody->eadatasize);
944
945         rc = req_capsule_server_pack(pill);
946         if (unlikely(rc != 0))
947                 RETURN(err_serious(rc));
948
949         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
950         LASSERT(repbody != NULL);
951         repbody->eadatasize = 0;
952         repbody->aclsize = 0;
953
954         if (reqbody->valid & OBD_MD_FLRMTPERM)
955                 rc = mdt_init_ucred(info, reqbody);
956         else
957                 rc = mdt_check_ucred(info);
958         if (unlikely(rc))
959                 GOTO(out_shrink, rc);
960
961         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
962
963         /*
964          * Don't check capability at all, because rename might getattr for
965          * remote obj, and at that time no capability is available.
966          */
967         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
968         rc = mdt_getattr_internal(info, obj, 0);
969         if (reqbody->valid & OBD_MD_FLRMTPERM)
970                 mdt_exit_ucred(info);
971         EXIT;
972 out_shrink:
973         mdt_client_compatibility(info);
974         rc2 = mdt_fix_reply(info);
975         if (rc == 0)
976                 rc = rc2;
977         return rc;
978 }
979
980 int mdt_is_subdir(struct mdt_thread_info *info)
981 {
982         struct mdt_object     *o = info->mti_object;
983         struct req_capsule    *pill = info->mti_pill;
984         const struct mdt_body *body = info->mti_body;
985         struct mdt_body       *repbody;
986         int                    rc;
987         ENTRY;
988
989         LASSERT(o != NULL);
990
991         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
992
993         /*
994          * We save last checked parent fid to @repbody->fid1 for remote
995          * directory case.
996          */
997         LASSERT(fid_is_sane(&body->fid2));
998         LASSERT(mdt_object_exists(o) && !mdt_object_remote(o));
999         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1000                            &body->fid2, &repbody->fid1);
1001         if (rc == 0 || rc == -EREMOTE)
1002                 repbody->valid |= OBD_MD_FLID;
1003
1004         RETURN(rc);
1005 }
1006
1007 int mdt_swap_layouts(struct mdt_thread_info *info)
1008 {
1009         struct ptlrpc_request   *req = mdt_info_req(info);
1010         struct obd_export       *exp = req->rq_export;
1011         struct mdt_object       *o1, *o2, *o;
1012         struct mdt_lock_handle  *lh1, *lh2;
1013         struct mdc_swap_layouts *msl;
1014         int                      rc;
1015         ENTRY;
1016
1017         /* client does not support layout lock, so layout swaping
1018          * is disabled.
1019          * FIXME: there is a problem for old clients which don't support
1020          * layout lock yet. If those clients have already opened the file
1021          * they won't be notified at all so that old layout may still be
1022          * used to do IO. This can be fixed after file release is landed by
1023          * doing exclusive open and taking full EX ibits lock. - Jinshan */
1024         if (!exp_connect_layout(exp))
1025                 RETURN(-EOPNOTSUPP);
1026
1027         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA1, RCL_CLIENT))
1028                 mdt_set_capainfo(info, 0, &info->mti_body->fid1,
1029                                  req_capsule_client_get(info->mti_pill,
1030                                                         &RMF_CAPA1));
1031
1032         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA2, RCL_CLIENT))
1033                 mdt_set_capainfo(info, 1, &info->mti_body->fid2,
1034                                  req_capsule_client_get(info->mti_pill,
1035                                                         &RMF_CAPA2));
1036
1037         o1 = info->mti_object;
1038         o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
1039                                 &info->mti_body->fid2);
1040         if (IS_ERR(o))
1041                 GOTO(out, rc = PTR_ERR(o));
1042
1043         if (mdt_object_remote(o) || !mdt_object_exists(o)) /* remote object */
1044                 GOTO(put, rc = -ENOENT);
1045
1046         rc = lu_fid_cmp(&info->mti_body->fid1, &info->mti_body->fid2);
1047         if (unlikely(rc == 0)) /* same file, you kidding me? no-op. */
1048                 GOTO(put, rc);
1049
1050         if (rc < 0)
1051                 swap(o1, o2);
1052
1053         /* permission check. Make sure the calling process having permission
1054          * to write both files. */
1055         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
1056                                 MAY_WRITE);
1057         if (rc < 0)
1058                 GOTO(put, rc);
1059
1060         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
1061                                 MAY_WRITE);
1062         if (rc < 0)
1063                 GOTO(put, rc);
1064
1065         msl = req_capsule_client_get(info->mti_pill, &RMF_SWAP_LAYOUTS);
1066         if (msl == NULL)
1067                 GOTO(put, rc = -EPROTO);
1068
1069         lh1 = &info->mti_lh[MDT_LH_NEW];
1070         mdt_lock_reg_init(lh1, LCK_EX);
1071         lh2 = &info->mti_lh[MDT_LH_OLD];
1072         mdt_lock_reg_init(lh2, LCK_EX);
1073
1074         rc = mdt_object_lock(info, o1, lh1, MDS_INODELOCK_LAYOUT,
1075                              MDT_LOCAL_LOCK);
1076         if (rc < 0)
1077                 GOTO(put, rc);
1078
1079         rc = mdt_object_lock(info, o2, lh2, MDS_INODELOCK_LAYOUT,
1080                              MDT_LOCAL_LOCK);
1081         if (rc < 0)
1082                 GOTO(unlock1, rc);
1083
1084         rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
1085                              mdt_object_child(o2), msl->msl_flags);
1086         GOTO(unlock2, rc);
1087 unlock2:
1088         mdt_object_unlock(info, o2, lh2, rc);
1089 unlock1:
1090         mdt_object_unlock(info, o1, lh1, rc);
1091 put:
1092         mdt_object_put(info->mti_env, o);
1093 out:
1094         RETURN(rc);
1095 }
1096
1097 static int mdt_raw_lookup(struct mdt_thread_info *info,
1098                           struct mdt_object *parent,
1099                           const struct lu_name *lname,
1100                           struct ldlm_reply *ldlm_rep)
1101 {
1102         struct md_object *next = mdt_object_child(info->mti_object);
1103         const struct mdt_body *reqbody = info->mti_body;
1104         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1105         struct mdt_body *repbody;
1106         int rc;
1107         ENTRY;
1108
1109         if (reqbody->valid != OBD_MD_FLID)
1110                 RETURN(0);
1111
1112         LASSERT(!info->mti_cross_ref);
1113
1114         /* Only got the fid of this obj by name */
1115         fid_zero(child_fid);
1116         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1117                         &info->mti_spec);
1118 #if 0
1119         /* XXX is raw_lookup possible as intent operation? */
1120         if (rc != 0) {
1121                 if (rc == -ENOENT)
1122                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1123                 RETURN(rc);
1124         } else
1125                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1126
1127         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1128 #endif
1129         if (rc == 0) {
1130                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1131                 repbody->fid1 = *child_fid;
1132                 repbody->valid = OBD_MD_FLID;
1133         }
1134         RETURN(1);
1135 }
1136
1137 /*
1138  * UPDATE lock should be taken against parent, and be release before exit;
1139  * child_bits lock should be taken against child, and be returned back:
1140  *            (1)normal request should release the child lock;
1141  *            (2)intent request will grant the lock to client.
1142  */
1143 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1144                                  struct mdt_lock_handle *lhc,
1145                                  __u64 child_bits,
1146                                  struct ldlm_reply *ldlm_rep)
1147 {
1148         struct ptlrpc_request  *req       = mdt_info_req(info);
1149         struct mdt_body        *reqbody   = NULL;
1150         struct mdt_object      *parent    = info->mti_object;
1151         struct mdt_object      *child;
1152         struct md_object       *next      = mdt_object_child(parent);
1153         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1154         struct lu_name         *lname     = NULL;
1155         const char             *name      = NULL;
1156         int                     namelen   = 0;
1157         struct mdt_lock_handle *lhp       = NULL;
1158         struct ldlm_lock       *lock;
1159         struct ldlm_res_id     *res_id;
1160         int                     is_resent;
1161         int                     ma_need = 0;
1162         int                     rc;
1163
1164         ENTRY;
1165
1166         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1167         LASSERT(ergo(is_resent,
1168                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1169
1170         LASSERT(parent != NULL);
1171         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1172         if (name == NULL)
1173                 RETURN(err_serious(-EFAULT));
1174
1175         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1176                                        RCL_CLIENT) - 1;
1177         if (!info->mti_cross_ref) {
1178                 /*
1179                  * XXX: Check for "namelen == 0" is for getattr by fid
1180                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1181                  * that is the name must contain at least one character and
1182                  * the terminating '\0'
1183                  */
1184                 if (namelen == 0) {
1185                         reqbody = req_capsule_client_get(info->mti_pill,
1186                                                          &RMF_MDT_BODY);
1187                         if (unlikely(reqbody == NULL))
1188                                 RETURN(err_serious(-EFAULT));
1189
1190                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1191                                 RETURN(err_serious(-EINVAL));
1192
1193                         name = NULL;
1194                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1195                                "ldlm_rep = %p\n",
1196                                PFID(mdt_object_fid(parent)),
1197                                PFID(&reqbody->fid2), ldlm_rep);
1198                 } else {
1199                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1200                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1201                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1202                                name, ldlm_rep);
1203                 }
1204         }
1205         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1206
1207         if (unlikely(!mdt_object_exists(parent))) {
1208                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1209                                 &parent->mot_obj.mo_lu,
1210                                 "Parent doesn't exist!\n");
1211                 RETURN(-ESTALE);
1212         } else if (!info->mti_cross_ref) {
1213                 LASSERTF(!mdt_object_remote(parent),
1214                          "Parent "DFID" is on remote server\n",
1215                          PFID(mdt_object_fid(parent)));
1216         }
1217         if (lname) {
1218                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1219                 if (rc != 0) {
1220                         if (rc > 0)
1221                                 rc = 0;
1222                         RETURN(rc);
1223                 }
1224         }
1225
1226         if (info->mti_cross_ref) {
1227                 /* Only getattr on the child. Parent is on another node. */
1228                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1229                 child = parent;
1230                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1231                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1232
1233                 if (is_resent) {
1234                         /* Do not take lock for resent case. */
1235                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1236                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1237                                  lhc->mlh_reg_lh.cookie);
1238                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1239                                                 &lock->l_resource->lr_name));
1240                         LDLM_LOCK_PUT(lock);
1241                         rc = 0;
1242                 } else {
1243                         mdt_lock_handle_init(lhc);
1244                         mdt_lock_reg_init(lhc, LCK_PR);
1245
1246                         /*
1247                          * Object's name is on another MDS, no lookup lock is
1248                          * needed here but update is.
1249                          */
1250                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1251                         child_bits |= MDS_INODELOCK_PERM | MDS_INODELOCK_UPDATE;
1252
1253                         rc = mdt_object_lock(info, child, lhc, child_bits,
1254                                              MDT_LOCAL_LOCK);
1255                 }
1256                 if (rc == 0) {
1257                         /* Finally, we can get attr for child. */
1258                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1259                                          BYPASS_CAPA);
1260                         rc = mdt_getattr_internal(info, child, 0);
1261                         if (unlikely(rc != 0))
1262                                 mdt_object_unlock(info, child, lhc, 1);
1263                 }
1264                 RETURN(rc);
1265         }
1266
1267         if (lname) {
1268                 /* step 1: lock parent only if parent is a directory */
1269                 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1270                         lhp = &info->mti_lh[MDT_LH_PARENT];
1271                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1272                         rc = mdt_object_lock(info, parent, lhp,
1273                                              MDS_INODELOCK_UPDATE,
1274                                              MDT_LOCAL_LOCK);
1275                         if (unlikely(rc != 0))
1276                                 RETURN(rc);
1277                 }
1278
1279                 /* step 2: lookup child's fid by name */
1280                 fid_zero(child_fid);
1281                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1282                                 &info->mti_spec);
1283
1284                 if (rc != 0) {
1285                         if (rc == -ENOENT)
1286                                 mdt_set_disposition(info, ldlm_rep,
1287                                                     DISP_LOOKUP_NEG);
1288                         GOTO(out_parent, rc);
1289                 } else
1290                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1291         } else {
1292                 *child_fid = reqbody->fid2;
1293                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1294         }
1295
1296         /*
1297          *step 3: find the child object by fid & lock it.
1298          *        regardless if it is local or remote.
1299          */
1300         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1301
1302         if (unlikely(IS_ERR(child)))
1303                 GOTO(out_parent, rc = PTR_ERR(child));
1304         if (is_resent) {
1305                 /* Do not take lock for resent case. */
1306                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1307                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1308                          lhc->mlh_reg_lh.cookie);
1309
1310                 res_id = &lock->l_resource->lr_name;
1311                 if (!fid_res_name_eq(mdt_object_fid(child),
1312                                     &lock->l_resource->lr_name)) {
1313                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1314                                                  &lock->l_resource->lr_name),
1315                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1316                                  (unsigned long)res_id->name[0],
1317                                  (unsigned long)res_id->name[1],
1318                                  (unsigned long)res_id->name[2],
1319                                  PFID(mdt_object_fid(parent)));
1320                           CWARN("Although resent, but still not get child lock"
1321                                 "parent:"DFID" child:"DFID"\n",
1322                                 PFID(mdt_object_fid(parent)),
1323                                 PFID(mdt_object_fid(child)));
1324                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1325                           LDLM_LOCK_PUT(lock);
1326                           GOTO(relock, 0);
1327                 }
1328                 LDLM_LOCK_PUT(lock);
1329                 rc = 0;
1330         } else {
1331                 bool try_layout = false;
1332
1333 relock:
1334                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1335                 mdt_lock_handle_init(lhc);
1336                 mdt_lock_reg_init(lhc, LCK_PR);
1337
1338                 if (!mdt_object_exists(child)) {
1339                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1340                                         &child->mot_obj.mo_lu,
1341                                         "Object doesn't exist!\n");
1342                         GOTO(out_child, rc = -ENOENT);
1343                 }
1344
1345                 if (!(child_bits & MDS_INODELOCK_UPDATE) &&
1346                       mdt_object_exists(child) && !mdt_object_remote(child)) {
1347                         struct md_attr *ma = &info->mti_attr;
1348
1349                         ma->ma_valid = 0;
1350                         ma->ma_need = MA_INODE;
1351                         rc = mdt_attr_get_complex(info, child, ma);
1352                         if (unlikely(rc != 0))
1353                                 GOTO(out_child, rc);
1354
1355                         /* If the file has not been changed for some time, we
1356                          * return not only a LOOKUP lock, but also an UPDATE
1357                          * lock and this might save us RPC on later STAT. For
1358                          * directories, it also let negative dentry starts
1359                          * working for this dir. */
1360                         if (ma->ma_valid & MA_INODE &&
1361                             ma->ma_attr.la_valid & LA_CTIME &&
1362                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1363                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1364                                 child_bits |= MDS_INODELOCK_UPDATE;
1365                 }
1366
1367                 /* layout lock must be granted in a best-effort way
1368                  * for IT operations */
1369                 LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT));
1370                 if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) &&
1371                     exp_connect_layout(info->mti_exp) &&
1372                     S_ISREG(lu_object_attr(&child->mot_obj.mo_lu)) &&
1373                     ldlm_rep != NULL) {
1374                         /* try to grant layout lock for regular file. */
1375                         try_layout = true;
1376                 }
1377
1378                 rc = 0;
1379                 if (try_layout) {
1380                         child_bits |= MDS_INODELOCK_LAYOUT;
1381                         /* try layout lock, it may fail to be granted due to
1382                          * contention at LOOKUP or UPDATE */
1383                         if (!mdt_object_lock_try(info, child, lhc, child_bits,
1384                                                  MDT_CROSS_LOCK)) {
1385                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1386                                 LASSERT(child_bits != 0);
1387                                 rc = mdt_object_lock(info, child, lhc,
1388                                                 child_bits, MDT_CROSS_LOCK);
1389                         } else {
1390                                 ma_need |= MA_LOV;
1391                         }
1392                 } else {
1393                         rc = mdt_object_lock(info, child, lhc, child_bits,
1394                                                 MDT_CROSS_LOCK);
1395                 }
1396                 if (unlikely(rc != 0))
1397                         GOTO(out_child, rc);
1398         }
1399
1400         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1401         /* Get MA_SOM attributes if update lock is given. */
1402         if (lock &&
1403             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1404             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1405                 ma_need |= MA_SOM;
1406
1407         /* finally, we can get attr for child. */
1408         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1409         rc = mdt_getattr_internal(info, child, ma_need);
1410         if (unlikely(rc != 0)) {
1411                 mdt_object_unlock(info, child, lhc, 1);
1412         } else if (lock) {
1413                 /* Debugging code. */
1414                 res_id = &lock->l_resource->lr_name;
1415                 LDLM_DEBUG(lock, "Returning lock to client");
1416                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1417                                          &lock->l_resource->lr_name),
1418                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1419                          (unsigned long)res_id->name[0],
1420                          (unsigned long)res_id->name[1],
1421                          (unsigned long)res_id->name[2],
1422                          PFID(mdt_object_fid(child)));
1423                 if (mdt_object_exists(child) && !mdt_object_remote(child))
1424                         mdt_pack_size2body(info, child);
1425         }
1426         if (lock)
1427                 LDLM_LOCK_PUT(lock);
1428
1429         EXIT;
1430 out_child:
1431         mdt_object_put(info->mti_env, child);
1432 out_parent:
1433         if (lhp)
1434                 mdt_object_unlock(info, parent, lhp, 1);
1435         return rc;
1436 }
1437
1438 /* normal handler: should release the child lock */
1439 int mdt_getattr_name(struct mdt_thread_info *info)
1440 {
1441         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1442         struct mdt_body        *reqbody;
1443         struct mdt_body        *repbody;
1444         int rc, rc2;
1445         ENTRY;
1446
1447         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1448         LASSERT(reqbody != NULL);
1449         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1450         LASSERT(repbody != NULL);
1451
1452         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1453         repbody->eadatasize = 0;
1454         repbody->aclsize = 0;
1455
1456         rc = mdt_init_ucred(info, reqbody);
1457         if (unlikely(rc))
1458                 GOTO(out_shrink, rc);
1459
1460         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1461         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1462                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1463                 lhc->mlh_reg_lh.cookie = 0;
1464         }
1465         mdt_exit_ucred(info);
1466         EXIT;
1467 out_shrink:
1468         mdt_client_compatibility(info);
1469         rc2 = mdt_fix_reply(info);
1470         if (rc == 0)
1471                 rc = rc2;
1472         return rc;
1473 }
1474
1475 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1476                          void *karg, void *uarg);
1477
1478 int mdt_set_info(struct mdt_thread_info *info)
1479 {
1480         struct ptlrpc_request *req = mdt_info_req(info);
1481         char *key;
1482         void *val;
1483         int keylen, vallen, rc = 0;
1484         ENTRY;
1485
1486         rc = req_capsule_server_pack(info->mti_pill);
1487         if (rc)
1488                 RETURN(rc);
1489
1490         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1491         if (key == NULL) {
1492                 DEBUG_REQ(D_HA, req, "no set_info key");
1493                 RETURN(-EFAULT);
1494         }
1495
1496         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1497                                       RCL_CLIENT);
1498
1499         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1500         if (val == NULL) {
1501                 DEBUG_REQ(D_HA, req, "no set_info val");
1502                 RETURN(-EFAULT);
1503         }
1504
1505         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1506                                       RCL_CLIENT);
1507
1508         /* Swab any part of val you need to here */
1509         if (KEY_IS(KEY_READ_ONLY)) {
1510                 req->rq_status = 0;
1511                 lustre_msg_set_status(req->rq_repmsg, 0);
1512
1513                 spin_lock(&req->rq_export->exp_lock);
1514                 if (*(__u32 *)val)
1515                         *exp_connect_flags_ptr(req->rq_export) |=
1516                                 OBD_CONNECT_RDONLY;
1517                 else
1518                         *exp_connect_flags_ptr(req->rq_export) &=
1519                                 ~OBD_CONNECT_RDONLY;
1520                 spin_unlock(&req->rq_export->exp_lock);
1521
1522         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1523                 struct changelog_setinfo *cs =
1524                         (struct changelog_setinfo *)val;
1525                 if (vallen != sizeof(*cs)) {
1526                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1527                         RETURN(-EINVAL);
1528                 }
1529                 if (ptlrpc_req_need_swab(req)) {
1530                         __swab64s(&cs->cs_recno);
1531                         __swab32s(&cs->cs_id);
1532                 }
1533
1534                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1535                                    vallen, val, NULL);
1536                 lustre_msg_set_status(req->rq_repmsg, rc);
1537
1538         } else {
1539                 RETURN(-EINVAL);
1540         }
1541         RETURN(0);
1542 }
1543
1544 /**
1545  * Top-level handler for MDT connection requests.
1546  */
1547 int mdt_connect(struct mdt_thread_info *info)
1548 {
1549         int rc;
1550         struct obd_connect_data *reply;
1551         struct obd_export *exp;
1552         struct ptlrpc_request *req = mdt_info_req(info);
1553
1554         rc = target_handle_connect(req);
1555         if (rc != 0)
1556                 return err_serious(rc);
1557
1558         LASSERT(req->rq_export != NULL);
1559         info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1560         rc = mdt_init_sec_level(info);
1561         if (rc != 0) {
1562                 obd_disconnect(class_export_get(req->rq_export));
1563                 return rc;
1564         }
1565
1566         /* To avoid exposing partially initialized connection flags, changes up
1567          * to this point have been staged in reply->ocd_connect_flags. Now that
1568          * connection handling has completed successfully, atomically update
1569          * the connect flags in the shared export data structure. LU-1623 */
1570         reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
1571         exp = req->rq_export;
1572         spin_lock(&exp->exp_lock);
1573         *exp_connect_flags_ptr(exp) = reply->ocd_connect_flags;
1574         spin_unlock(&exp->exp_lock);
1575
1576         rc = mdt_init_idmap(info);
1577         if (rc != 0)
1578                 obd_disconnect(class_export_get(req->rq_export));
1579
1580         return rc;
1581 }
1582
1583 int mdt_disconnect(struct mdt_thread_info *info)
1584 {
1585         int rc;
1586         ENTRY;
1587
1588         rc = target_handle_disconnect(mdt_info_req(info));
1589         if (rc)
1590                 rc = err_serious(rc);
1591         RETURN(rc);
1592 }
1593
1594 static int mdt_sendpage(struct mdt_thread_info *info,
1595                         struct lu_rdpg *rdpg, int nob)
1596 {
1597         struct ptlrpc_request   *req = mdt_info_req(info);
1598         struct obd_export       *exp = req->rq_export;
1599         struct ptlrpc_bulk_desc *desc;
1600         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1601         int                      tmpcount;
1602         int                      tmpsize;
1603         int                      i;
1604         int                      rc;
1605         ENTRY;
1606
1607         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
1608                                     MDS_BULK_PORTAL);
1609         if (desc == NULL)
1610                 RETURN(-ENOMEM);
1611
1612         if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1613                 /* old client requires reply size in it's PAGE_SIZE,
1614                  * which is rdpg->rp_count */
1615                 nob = rdpg->rp_count;
1616
1617         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1618              i++, tmpcount -= tmpsize) {
1619                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1620                 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1621         }
1622
1623         LASSERT(desc->bd_nob == nob);
1624         rc = target_bulk_io(exp, desc, lwi);
1625         ptlrpc_free_bulk_pin(desc);
1626         RETURN(rc);
1627 }
1628
1629 int mdt_readpage(struct mdt_thread_info *info)
1630 {
1631         struct mdt_object *object = info->mti_object;
1632         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1633         struct mdt_body   *reqbody;
1634         struct mdt_body   *repbody;
1635         int                rc;
1636         int                i;
1637         ENTRY;
1638
1639         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1640                 RETURN(err_serious(-ENOMEM));
1641
1642         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1643         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1644         if (reqbody == NULL || repbody == NULL)
1645                 RETURN(err_serious(-EFAULT));
1646
1647         /*
1648          * prepare @rdpg before calling lower layers and transfer itself. Here
1649          * reqbody->size contains offset of where to start to read and
1650          * reqbody->nlink contains number bytes to read.
1651          */
1652         rdpg->rp_hash = reqbody->size;
1653         if (rdpg->rp_hash != reqbody->size) {
1654                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1655                        rdpg->rp_hash, reqbody->size);
1656                 RETURN(-EFAULT);
1657         }
1658
1659         rdpg->rp_attrs = reqbody->mode;
1660         if (exp_connect_flags(info->mti_exp) & OBD_CONNECT_64BITHASH)
1661                 rdpg->rp_attrs |= LUDA_64BITHASH;
1662         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1663                                 exp_max_brw_size(info->mti_exp));
1664         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1665                           CFS_PAGE_SHIFT;
1666         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1667         if (rdpg->rp_pages == NULL)
1668                 RETURN(-ENOMEM);
1669
1670         for (i = 0; i < rdpg->rp_npages; ++i) {
1671                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1672                 if (rdpg->rp_pages[i] == NULL)
1673                         GOTO(free_rdpg, rc = -ENOMEM);
1674         }
1675
1676         /* call lower layers to fill allocated pages with directory data */
1677         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1678         if (rc < 0)
1679                 GOTO(free_rdpg, rc);
1680
1681         /* send pages to client */
1682         rc = mdt_sendpage(info, rdpg, rc);
1683
1684         EXIT;
1685 free_rdpg:
1686
1687         for (i = 0; i < rdpg->rp_npages; i++)
1688                 if (rdpg->rp_pages[i] != NULL)
1689                         cfs_free_page(rdpg->rp_pages[i]);
1690         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1691
1692         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1693                 RETURN(0);
1694
1695         return rc;
1696 }
1697
1698 static int mdt_reint_internal(struct mdt_thread_info *info,
1699                               struct mdt_lock_handle *lhc,
1700                               __u32 op)
1701 {
1702         struct req_capsule      *pill = info->mti_pill;
1703         struct mdt_body         *repbody;
1704         int                      rc = 0, rc2;
1705         ENTRY;
1706
1707
1708         rc = mdt_reint_unpack(info, op);
1709         if (rc != 0) {
1710                 CERROR("Can't unpack reint, rc %d\n", rc);
1711                 RETURN(err_serious(rc));
1712         }
1713
1714         /* for replay (no_create) lmm is not needed, client has it already */
1715         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1716                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1717                                      info->mti_rr.rr_eadatalen);
1718
1719         /* llog cookies are always 0, the field is kept for compatibility */
1720         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1721                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1722
1723         rc = req_capsule_server_pack(pill);
1724         if (rc != 0) {
1725                 CERROR("Can't pack response, rc %d\n", rc);
1726                 RETURN(err_serious(rc));
1727         }
1728
1729         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1730                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1731                 LASSERT(repbody);
1732                 repbody->eadatasize = 0;
1733                 repbody->aclsize = 0;
1734         }
1735
1736         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1737
1738         /* for replay no cookkie / lmm need, because client have this already */
1739         if (info->mti_spec.no_create)
1740                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1741                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1742
1743         rc = mdt_init_ucred_reint(info);
1744         if (rc)
1745                 GOTO(out_shrink, rc);
1746
1747         rc = mdt_fix_attr_ucred(info, op);
1748         if (rc != 0)
1749                 GOTO(out_ucred, rc = err_serious(rc));
1750
1751         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1752                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1753                 GOTO(out_ucred, rc);
1754         }
1755         rc = mdt_reint_rec(info, lhc);
1756         EXIT;
1757 out_ucred:
1758         mdt_exit_ucred(info);
1759 out_shrink:
1760         mdt_client_compatibility(info);
1761         rc2 = mdt_fix_reply(info);
1762         if (rc == 0)
1763                 rc = rc2;
1764         return rc;
1765 }
1766
1767 static long mdt_reint_opcode(struct mdt_thread_info *info,
1768                              const struct req_format **fmt)
1769 {
1770         struct mdt_rec_reint *rec;
1771         long opc;
1772
1773         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1774         if (rec != NULL) {
1775                 opc = rec->rr_opcode;
1776                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1777                 if (opc < REINT_MAX && fmt[opc] != NULL)
1778                         req_capsule_extend(info->mti_pill, fmt[opc]);
1779                 else {
1780                         CERROR("%s: Unsupported opcode '%ld' from client '%s': "
1781                                "rc = %d\n", mdt_obd_name(info->mti_mdt), opc,
1782                                info->mti_mdt->mdt_ldlm_client->cli_name,
1783                                -EFAULT);
1784                         opc = err_serious(-EFAULT);
1785                 }
1786         } else {
1787                 opc = err_serious(-EFAULT);
1788         }
1789         return opc;
1790 }
1791
1792 int mdt_reint(struct mdt_thread_info *info)
1793 {
1794         long opc;
1795         int  rc;
1796
1797         static const struct req_format *reint_fmts[REINT_MAX] = {
1798                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1799                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1800                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1801                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1802                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1803                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1804                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR,
1805                 [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK
1806         };
1807
1808         ENTRY;
1809
1810         opc = mdt_reint_opcode(info, reint_fmts);
1811         if (opc >= 0) {
1812                 /*
1813                  * No lock possible here from client to pass it to reint code
1814                  * path.
1815                  */
1816                 rc = mdt_reint_internal(info, NULL, opc);
1817         } else {
1818                 rc = opc;
1819         }
1820
1821         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1822         RETURN(rc);
1823 }
1824
1825 /* this should sync the whole device */
1826 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1827 {
1828         struct dt_device *dt = mdt->mdt_bottom;
1829         int rc;
1830         ENTRY;
1831
1832         rc = dt->dd_ops->dt_sync(env, dt);
1833         RETURN(rc);
1834 }
1835
1836 /* this should sync this object */
1837 static int mdt_object_sync(struct mdt_thread_info *info)
1838 {
1839         struct md_object *next;
1840         int rc;
1841         ENTRY;
1842
1843         if (!mdt_object_exists(info->mti_object)) {
1844                 CWARN("Non existing object  "DFID"!\n",
1845                       PFID(mdt_object_fid(info->mti_object)));
1846                 RETURN(-ESTALE);
1847         }
1848         next = mdt_object_child(info->mti_object);
1849         rc = mo_object_sync(info->mti_env, next);
1850
1851         RETURN(rc);
1852 }
1853
1854 int mdt_sync(struct mdt_thread_info *info)
1855 {
1856         struct ptlrpc_request *req = mdt_info_req(info);
1857         struct req_capsule *pill = info->mti_pill;
1858         struct mdt_body *body;
1859         int rc;
1860         ENTRY;
1861
1862         /* The fid may be zero, so we req_capsule_set manually */
1863         req_capsule_set(pill, &RQF_MDS_SYNC);
1864
1865         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1866         if (body == NULL)
1867                 RETURN(err_serious(-EINVAL));
1868
1869         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1870                 RETURN(err_serious(-ENOMEM));
1871
1872         if (fid_seq(&body->fid1) == 0) {
1873                 /* sync the whole device */
1874                 rc = req_capsule_server_pack(pill);
1875                 if (rc == 0)
1876                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1877                 else
1878                         rc = err_serious(rc);
1879         } else {
1880                 /* sync an object */
1881                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1882                 if (rc == 0) {
1883                         rc = mdt_object_sync(info);
1884                         if (rc == 0) {
1885                                 const struct lu_fid *fid;
1886                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1887
1888                                 info->mti_attr.ma_need = MA_INODE;
1889                                 info->mti_attr.ma_valid = 0;
1890                                 rc = mdt_attr_get_complex(info, info->mti_object,
1891                                                           &info->mti_attr);
1892                                 if (rc == 0) {
1893                                         body = req_capsule_server_get(pill,
1894                                                                 &RMF_MDT_BODY);
1895                                         fid = mdt_object_fid(info->mti_object);
1896                                         mdt_pack_attr2body(info, body, la, fid);
1897                                 }
1898                         }
1899                 } else
1900                         rc = err_serious(rc);
1901         }
1902         if (rc == 0)
1903                 mdt_counter_incr(req, LPROC_MDT_SYNC);
1904
1905         RETURN(rc);
1906 }
1907
1908 /*
1909  * Quotacheck handler.
1910  * in-kernel quotacheck isn't supported any more.
1911  */
1912 int mdt_quotacheck(struct mdt_thread_info *info)
1913 {
1914         struct obd_quotactl     *oqctl;
1915         int                      rc;
1916         ENTRY;
1917
1918         oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
1919         if (oqctl == NULL)
1920                 RETURN(err_serious(-EPROTO));
1921
1922         rc = req_capsule_server_pack(info->mti_pill);
1923         if (rc)
1924                 RETURN(err_serious(rc));
1925
1926         /* deprecated, not used any more */
1927         RETURN(-EOPNOTSUPP);
1928 }
1929
1930 /*
1931  * Handle quota control requests to consult current usage/limit, but also
1932  * to configure quota enforcement
1933  */
1934 int mdt_quotactl(struct mdt_thread_info *info)
1935 {
1936         struct obd_export       *exp  = info->mti_exp;
1937         struct req_capsule      *pill = info->mti_pill;
1938         struct obd_quotactl     *oqctl, *repoqc;
1939         int                      id, rc;
1940         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
1941         ENTRY;
1942
1943         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1944         if (oqctl == NULL)
1945                 RETURN(err_serious(-EPROTO));
1946
1947         rc = req_capsule_server_pack(pill);
1948         if (rc)
1949                 RETURN(err_serious(rc));
1950
1951         switch (oqctl->qc_cmd) {
1952         case Q_QUOTACHECK:
1953         case LUSTRE_Q_INVALIDATE:
1954         case LUSTRE_Q_FINVALIDATE:
1955         case Q_QUOTAON:
1956         case Q_QUOTAOFF:
1957         case Q_INITQUOTA:
1958                 /* deprecated, not used any more */
1959                 RETURN(-EOPNOTSUPP);
1960                 /* master quotactl */
1961         case Q_GETINFO:
1962         case Q_SETINFO:
1963         case Q_SETQUOTA:
1964         case Q_GETQUOTA:
1965                 if (qmt == NULL)
1966                         RETURN(-EOPNOTSUPP);
1967                 /* slave quotactl */
1968         case Q_GETOINFO:
1969         case Q_GETOQUOTA:
1970                 break;
1971         default:
1972                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
1973                 RETURN(-EFAULT);
1974         }
1975
1976         /* map uid/gid for remote client */
1977         id = oqctl->qc_id;
1978         if (exp_connect_rmtclient(exp)) {
1979                 struct lustre_idmap_table *idmap;
1980
1981                 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
1982
1983                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1984                              oqctl->qc_cmd != Q_GETINFO))
1985                         RETURN(-EPERM);
1986
1987                 if (oqctl->qc_type == USRQUOTA)
1988                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1989                                                      oqctl->qc_id);
1990                 else if (oqctl->qc_type == GRPQUOTA)
1991                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1992                                                      oqctl->qc_id);
1993                 else
1994                         RETURN(-EINVAL);
1995
1996                 if (id == CFS_IDMAP_NOTFOUND) {
1997                         CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
1998                         RETURN(-EACCES);
1999                 }
2000         }
2001
2002         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2003         if (repoqc == NULL)
2004                 RETURN(err_serious(-EFAULT));
2005
2006         if (oqctl->qc_id != id)
2007                 swap(oqctl->qc_id, id);
2008
2009         switch (oqctl->qc_cmd) {
2010
2011         case Q_GETINFO:
2012         case Q_SETINFO:
2013         case Q_SETQUOTA:
2014         case Q_GETQUOTA:
2015                 /* forward quotactl request to QMT */
2016                 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2017                 break;
2018
2019         case Q_GETOINFO:
2020         case Q_GETOQUOTA:
2021                 /* slave quotactl */
2022                 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2023                                    oqctl);
2024                 break;
2025
2026         default:
2027                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2028                 RETURN(-EFAULT);
2029         }
2030
2031         if (oqctl->qc_id != id)
2032                 swap(oqctl->qc_id, id);
2033
2034         *repoqc = *oqctl;
2035         RETURN(rc);
2036 }
2037
2038 /*
2039  * OBD PING and other handlers.
2040  */
2041 int mdt_obd_ping(struct mdt_thread_info *info)
2042 {
2043         int rc;
2044         ENTRY;
2045
2046         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2047
2048         rc = target_handle_ping(mdt_info_req(info));
2049         if (rc < 0)
2050                 rc = err_serious(rc);
2051         RETURN(rc);
2052 }
2053
2054 /*
2055  * OBD_IDX_READ handler
2056  */
2057 int mdt_obd_idx_read(struct mdt_thread_info *info)
2058 {
2059         struct mdt_device       *mdt = info->mti_mdt;
2060         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2061         struct idx_info         *req_ii, *rep_ii;
2062         int                      rc, i;
2063         ENTRY;
2064
2065         memset(rdpg, 0, sizeof(*rdpg));
2066         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2067
2068         /* extract idx_info buffer from request & reply */
2069         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2070         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2071                 RETURN(err_serious(-EPROTO));
2072
2073         rc = req_capsule_server_pack(info->mti_pill);
2074         if (rc)
2075                 RETURN(err_serious(rc));
2076
2077         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2078         if (rep_ii == NULL)
2079                 RETURN(err_serious(-EFAULT));
2080         rep_ii->ii_magic = IDX_INFO_MAGIC;
2081
2082         /* extract hash to start with */
2083         rdpg->rp_hash = req_ii->ii_hash_start;
2084
2085         /* extract requested attributes */
2086         rdpg->rp_attrs = req_ii->ii_attrs;
2087
2088         /* check that fid packed in request is valid and supported */
2089         if (!fid_is_sane(&req_ii->ii_fid))
2090                 RETURN(-EINVAL);
2091         rep_ii->ii_fid = req_ii->ii_fid;
2092
2093         /* copy flags */
2094         rep_ii->ii_flags = req_ii->ii_flags;
2095
2096         /* compute number of pages to allocate, ii_count is the number of 4KB
2097          * containers */
2098         if (req_ii->ii_count <= 0)
2099                 GOTO(out, rc = -EFAULT);
2100         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2101                                exp_max_brw_size(info->mti_exp));
2102         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2103
2104         /* allocate pages to store the containers */
2105         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2106         if (rdpg->rp_pages == NULL)
2107                 GOTO(out, rc = -ENOMEM);
2108         for (i = 0; i < rdpg->rp_npages; i++) {
2109                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2110                 if (rdpg->rp_pages[i] == NULL)
2111                         GOTO(out, rc = -ENOMEM);
2112         }
2113
2114         /* populate pages with key/record pairs */
2115         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2116         if (rc < 0)
2117                 GOTO(out, rc);
2118
2119         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2120                  "asked %d > %d\n", rc, rdpg->rp_count);
2121
2122         /* send pages to client */
2123         rc = mdt_sendpage(info, rdpg, rc);
2124
2125         GOTO(out, rc);
2126 out:
2127         if (rdpg->rp_pages) {
2128                 for (i = 0; i < rdpg->rp_npages; i++)
2129                         if (rdpg->rp_pages[i])
2130                                 cfs_free_page(rdpg->rp_pages[i]);
2131                 OBD_FREE(rdpg->rp_pages,
2132                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2133         }
2134         return rc;
2135 }
2136
2137 int mdt_obd_log_cancel(struct mdt_thread_info *info)
2138 {
2139         return err_serious(-EOPNOTSUPP);
2140 }
2141
2142 int mdt_obd_qc_callback(struct mdt_thread_info *info)
2143 {
2144         return err_serious(-EOPNOTSUPP);
2145 }
2146
2147 /*
2148  * LLOG handlers.
2149  */
2150
2151 /** clone llog ctxt from child (mdd)
2152  * This allows remote llog (replicator) access.
2153  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2154  * context was originally set up, or we can handle them directly.
2155  * I choose the latter, but that means I need any llog
2156  * contexts set up by child to be accessable by the mdt.  So we clone the
2157  * context into our context list here.
2158  */
2159 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2160                                int idx)
2161 {
2162         struct md_device  *next = mdt->mdt_child;
2163         struct llog_ctxt *ctxt;
2164         int rc;
2165
2166         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2167                 return 0;
2168
2169         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2170         if (rc || ctxt == NULL) {
2171                 return 0;
2172         }
2173
2174         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2175         if (rc)
2176                 CERROR("Can't set mdt ctxt %d\n", rc);
2177
2178         return rc;
2179 }
2180
2181 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2182                                  struct mdt_device *mdt, int idx)
2183 {
2184         struct llog_ctxt *ctxt;
2185
2186         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2187         if (ctxt == NULL)
2188                 return 0;
2189         /* Put once for the get we just did, and once for the clone */
2190         llog_ctxt_put(ctxt);
2191         llog_ctxt_put(ctxt);
2192         return 0;
2193 }
2194
2195 int mdt_llog_create(struct mdt_thread_info *info)
2196 {
2197         int rc;
2198
2199         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2200         rc = llog_origin_handle_open(mdt_info_req(info));
2201         return (rc < 0 ? err_serious(rc) : rc);
2202 }
2203
2204 int mdt_llog_destroy(struct mdt_thread_info *info)
2205 {
2206         int rc;
2207
2208         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2209         rc = llog_origin_handle_destroy(mdt_info_req(info));
2210         return (rc < 0 ? err_serious(rc) : rc);
2211 }
2212
2213 int mdt_llog_read_header(struct mdt_thread_info *info)
2214 {
2215         int rc;
2216
2217         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2218         rc = llog_origin_handle_read_header(mdt_info_req(info));
2219         return (rc < 0 ? err_serious(rc) : rc);
2220 }
2221
2222 int mdt_llog_next_block(struct mdt_thread_info *info)
2223 {
2224         int rc;
2225
2226         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2227         rc = llog_origin_handle_next_block(mdt_info_req(info));
2228         return (rc < 0 ? err_serious(rc) : rc);
2229 }
2230
2231 int mdt_llog_prev_block(struct mdt_thread_info *info)
2232 {
2233         int rc;
2234
2235         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2236         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2237         return (rc < 0 ? err_serious(rc) : rc);
2238 }
2239
2240
2241 /*
2242  * DLM handlers.
2243  */
2244
2245 static struct ldlm_callback_suite cbs = {
2246         .lcs_completion = ldlm_server_completion_ast,
2247         .lcs_blocking   = ldlm_server_blocking_ast,
2248         .lcs_glimpse    = ldlm_server_glimpse_ast
2249 };
2250
2251 int mdt_enqueue(struct mdt_thread_info *info)
2252 {
2253         struct ptlrpc_request *req;
2254         int rc;
2255
2256         /*
2257          * info->mti_dlm_req already contains swapped and (if necessary)
2258          * converted dlm request.
2259          */
2260         LASSERT(info->mti_dlm_req != NULL);
2261
2262         req = mdt_info_req(info);
2263         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2264                                   req, info->mti_dlm_req, &cbs);
2265         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2266         return rc ? err_serious(rc) : req->rq_status;
2267 }
2268
2269 int mdt_convert(struct mdt_thread_info *info)
2270 {
2271         int rc;
2272         struct ptlrpc_request *req;
2273
2274         LASSERT(info->mti_dlm_req);
2275         req = mdt_info_req(info);
2276         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2277         return rc ? err_serious(rc) : req->rq_status;
2278 }
2279
2280 int mdt_bl_callback(struct mdt_thread_info *info)
2281 {
2282         CERROR("bl callbacks should not happen on MDS\n");
2283         LBUG();
2284         return err_serious(-EOPNOTSUPP);
2285 }
2286
2287 int mdt_cp_callback(struct mdt_thread_info *info)
2288 {
2289         CERROR("cp callbacks should not happen on MDS\n");
2290         LBUG();
2291         return err_serious(-EOPNOTSUPP);
2292 }
2293
2294 /*
2295  * sec context handlers
2296  */
2297 int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2298 {
2299         int rc;
2300
2301         rc = mdt_handle_idmap(info);
2302
2303         if (unlikely(rc)) {
2304                 struct ptlrpc_request *req = mdt_info_req(info);
2305                 __u32                  opc;
2306
2307                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2308                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2309                         sptlrpc_svc_ctx_invalidate(req);
2310         }
2311
2312         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2313
2314         return rc;
2315 }
2316
2317 /*
2318  * quota request handlers
2319  */
2320 int mdt_quota_dqacq(struct mdt_thread_info *info)
2321 {
2322         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2323         int                      rc;
2324         ENTRY;
2325
2326         if (qmt == NULL)
2327                 RETURN(err_serious(-EOPNOTSUPP));
2328
2329         rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2330         RETURN(rc);
2331 }
2332
2333 static struct mdt_object *mdt_obj(struct lu_object *o)
2334 {
2335         LASSERT(lu_device_is_mdt(o->lo_dev));
2336         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2337 }
2338
2339 struct mdt_object *mdt_object_new(const struct lu_env *env,
2340                                   struct mdt_device *d,
2341                                   const struct lu_fid *f)
2342 {
2343         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2344         struct lu_object *o;
2345         struct mdt_object *m;
2346         ENTRY;
2347
2348         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2349         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2350         if (unlikely(IS_ERR(o)))
2351                 m = (struct mdt_object *)o;
2352         else
2353                 m = mdt_obj(o);
2354         RETURN(m);
2355 }
2356
2357 struct mdt_object *mdt_object_find(const struct lu_env *env,
2358                                    struct mdt_device *d,
2359                                    const struct lu_fid *f)
2360 {
2361         struct lu_object *o;
2362         struct mdt_object *m;
2363         ENTRY;
2364
2365         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2366         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2367         if (unlikely(IS_ERR(o)))
2368                 m = (struct mdt_object *)o;
2369         else
2370                 m = mdt_obj(o);
2371         RETURN(m);
2372 }
2373
2374 /**
2375  * Asyncronous commit for mdt device.
2376  *
2377  * Pass asynchonous commit call down the MDS stack.
2378  *
2379  * \param env environment
2380  * \param mdt the mdt device
2381  */
2382 static void mdt_device_commit_async(const struct lu_env *env,
2383                                     struct mdt_device *mdt)
2384 {
2385         struct dt_device *dt = mdt->mdt_bottom;
2386         int rc;
2387
2388         rc = dt->dd_ops->dt_commit_async(env, dt);
2389         if (unlikely(rc != 0))
2390                 CWARN("async commit start failed with rc = %d", rc);
2391 }
2392
2393 /**
2394  * Mark the lock as "synchonous".
2395  *
2396  * Mark the lock to deffer transaction commit to the unlock time.
2397  *
2398  * \param lock the lock to mark as "synchonous"
2399  *
2400  * \see mdt_is_lock_sync
2401  * \see mdt_save_lock
2402  */
2403 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2404 {
2405         lock->l_ast_data = (void*)1;
2406 }
2407
2408 /**
2409  * Check whehter the lock "synchonous" or not.
2410  *
2411  * \param lock the lock to check
2412  * \retval 1 the lock is "synchonous"
2413  * \retval 0 the lock isn't "synchronous"
2414  *
2415  * \see mdt_set_lock_sync
2416  * \see mdt_save_lock
2417  */
2418 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2419 {
2420         return lock->l_ast_data != NULL;
2421 }
2422
2423 /**
2424  * Blocking AST for mdt locks.
2425  *
2426  * Starts transaction commit if in case of COS lock conflict or
2427  * deffers such a commit to the mdt_save_lock.
2428  *
2429  * \param lock the lock which blocks a request or cancelling lock
2430  * \param desc unused
2431  * \param data unused
2432  * \param flag indicates whether this cancelling or blocking callback
2433  * \retval 0
2434  * \see ldlm_blocking_ast_nocheck
2435  */
2436 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2437                      void *data, int flag)
2438 {
2439         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2440         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2441         int rc;
2442         ENTRY;
2443
2444         if (flag == LDLM_CB_CANCELING)
2445                 RETURN(0);
2446         lock_res_and_lock(lock);
2447         if (lock->l_blocking_ast != mdt_blocking_ast) {
2448                 unlock_res_and_lock(lock);
2449                 RETURN(0);
2450         }
2451         if (mdt_cos_is_enabled(mdt) &&
2452             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2453             lock->l_blocking_lock != NULL &&
2454             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2455                 mdt_set_lock_sync(lock);
2456         }
2457         rc = ldlm_blocking_ast_nocheck(lock);
2458
2459         /* There is no lock conflict if l_blocking_lock == NULL,
2460          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2461          * when the last reference to a local lock was released */
2462         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2463                 struct lu_env env;
2464
2465                 rc = lu_env_init(&env, LCT_LOCAL);
2466                 if (unlikely(rc != 0))
2467                         CWARN("lu_env initialization failed with rc = %d,"
2468                               "cannot start asynchronous commit\n", rc);
2469                 else
2470                         mdt_device_commit_async(&env, mdt);
2471                 lu_env_fini(&env);
2472         }
2473         RETURN(rc);
2474 }
2475
2476 int mdt_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2477                         void *data, int flag)
2478 {
2479         struct lustre_handle lockh;
2480         int               rc;
2481
2482         switch (flag) {
2483         case LDLM_CB_BLOCKING:
2484                 ldlm_lock2handle(lock, &lockh);
2485                 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
2486                 if (rc < 0) {
2487                         CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2488                         RETURN(rc);
2489                 }
2490                 break;
2491         case LDLM_CB_CANCELING:
2492                 LDLM_DEBUG(lock, "Revoke remote lock\n");
2493                 break;
2494         default:
2495                 LBUG();
2496         }
2497         RETURN(0);
2498 }
2499
2500 int mdt_remote_object_lock(struct mdt_thread_info *mti,
2501                            struct mdt_object *o, struct lustre_handle *lh,
2502                            ldlm_mode_t mode, __u64 ibits)
2503 {
2504         struct ldlm_enqueue_info *einfo = &mti->mti_einfo;
2505         ldlm_policy_data_t *policy = &mti->mti_policy;
2506         int rc = 0;
2507         ENTRY;
2508
2509         LASSERT(mdt_object_remote(o));
2510
2511         LASSERT((ibits & MDS_INODELOCK_UPDATE));
2512
2513         memset(einfo, 0, sizeof(*einfo));
2514         einfo->ei_type = LDLM_IBITS;
2515         einfo->ei_mode = mode;
2516         einfo->ei_cb_bl = mdt_md_blocking_ast;
2517         einfo->ei_cb_cp = ldlm_completion_ast;
2518
2519         memset(policy, 0, sizeof(*policy));
2520         policy->l_inodebits.bits = ibits;
2521
2522         rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo,
2523                             policy);
2524         RETURN(rc);
2525 }
2526
2527 static int mdt_object_lock0(struct mdt_thread_info *info, struct mdt_object *o,
2528                             struct mdt_lock_handle *lh, __u64 ibits,
2529                             bool nonblock, int locality)
2530 {
2531         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2532         ldlm_policy_data_t *policy = &info->mti_policy;
2533         struct ldlm_res_id *res_id = &info->mti_res_id;
2534         __u64 dlmflags;
2535         int rc;
2536         ENTRY;
2537
2538         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2539         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2540         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2541         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2542
2543         if (mdt_object_remote(o)) {
2544                 if (locality == MDT_CROSS_LOCK) {
2545                         ibits &= ~(MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM);
2546                         ibits |= MDS_INODELOCK_LOOKUP;
2547                 } else {
2548                         LASSERTF(!(ibits &
2549                                   (MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM)),
2550                                 "%s: wrong bit "LPX64" for remote obj "DFID"\n",
2551                                 mdt_obd_name(info->mti_mdt), ibits,
2552                                 PFID(mdt_object_fid(o)));
2553                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2554                 }
2555                 /* No PDO lock on remote object */
2556                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2557         }
2558
2559         if (lh->mlh_type == MDT_PDO_LOCK) {
2560                 /* check for exists after object is locked */
2561                 if (mdt_object_exists(o) == 0) {
2562                         /* Non-existent object shouldn't have PDO lock */
2563                         RETURN(-ESTALE);
2564                 } else {
2565                         /* Non-dir object shouldn't have PDO lock */
2566                         if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2567                                 RETURN(-ENOTDIR);
2568                 }
2569         }
2570
2571         memset(policy, 0, sizeof(*policy));
2572         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2573
2574         dlmflags = LDLM_FL_ATOMIC_CB;
2575         if (nonblock)
2576                 dlmflags |= LDLM_FL_BLOCK_NOWAIT;
2577
2578         /*
2579          * Take PDO lock on whole directory and build correct @res_id for lock
2580          * on part of directory.
2581          */
2582         if (lh->mlh_pdo_hash != 0) {
2583                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2584                 mdt_lock_pdo_mode(info, o, lh);
2585                 if (lh->mlh_pdo_mode != LCK_NL) {
2586                         /*
2587                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2588                          * is never going to be sent to client and we do not
2589                          * want it slowed down due to possible cancels.
2590                          */
2591                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2592                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2593                                           policy, res_id, dlmflags,
2594                                           &info->mti_exp->exp_handle.h_cookie);
2595                         if (unlikely(rc))
2596                                 RETURN(rc);
2597                 }
2598
2599                 /*
2600                  * Finish res_id initializing by name hash marking part of
2601                  * directory which is taking modification.
2602                  */
2603                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2604         }
2605
2606         policy->l_inodebits.bits = ibits;
2607
2608         /*
2609          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2610          * going to be sent to client. If it is - mdt_intent_policy() path will
2611          * fix it up and turn FL_LOCAL flag off.
2612          */
2613         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2614                           res_id, LDLM_FL_LOCAL_ONLY | dlmflags,
2615                           &info->mti_exp->exp_handle.h_cookie);
2616         if (rc)
2617                 mdt_object_unlock(info, o, lh, 1);
2618         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2619                  lh->mlh_pdo_hash != 0 &&
2620                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2621                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2622         }
2623
2624         RETURN(rc);
2625 }
2626
2627 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2628                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2629 {
2630         return mdt_object_lock0(info, o, lh, ibits, false, locality);
2631 }
2632
2633 int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *o,
2634                         struct mdt_lock_handle *lh, __u64 ibits, int locality)
2635 {
2636         struct mdt_lock_handle tmp = *lh;
2637         int rc;
2638
2639         rc = mdt_object_lock0(info, o, &tmp, ibits, true, locality);
2640         if (rc == 0)
2641                 *lh = tmp;
2642
2643         return rc == 0;
2644 }
2645
2646 /**
2647  * Save a lock within request object.
2648  *
2649  * Keep the lock referenced until whether client ACK or transaction
2650  * commit happens or release the lock immediately depending on input
2651  * parameters. If COS is ON, a write lock is converted to COS lock
2652  * before saving.
2653  *
2654  * \param info thead info object
2655  * \param h lock handle
2656  * \param mode lock mode
2657  * \param decref force immediate lock releasing
2658  */
2659 static
2660 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2661                    ldlm_mode_t mode, int decref)
2662 {
2663         ENTRY;
2664
2665         if (lustre_handle_is_used(h)) {
2666                 if (decref || !info->mti_has_trans ||
2667                     !(mode & (LCK_PW | LCK_EX))){
2668                         mdt_fid_unlock(h, mode);
2669                 } else {
2670                         struct mdt_device *mdt = info->mti_mdt;
2671                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2672                         struct ptlrpc_request *req = mdt_info_req(info);
2673                         int no_ack = 0;
2674
2675                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2676                                  h->cookie);
2677                         CDEBUG(D_HA, "request = %p reply state = %p"
2678                                " transno = "LPD64"\n",
2679                                req, req->rq_reply_state, req->rq_transno);
2680                         if (mdt_cos_is_enabled(mdt)) {
2681                                 no_ack = 1;
2682                                 ldlm_lock_downgrade(lock, LCK_COS);
2683                                 mode = LCK_COS;
2684                         }
2685                         ptlrpc_save_lock(req, h, mode, no_ack);
2686                         if (mdt_is_lock_sync(lock)) {
2687                                 CDEBUG(D_HA, "found sync-lock,"
2688                                        " async commit started\n");
2689                                 mdt_device_commit_async(info->mti_env,
2690                                                         mdt);
2691                         }
2692                         LDLM_LOCK_PUT(lock);
2693                 }
2694                 h->cookie = 0ull;
2695         }
2696
2697         EXIT;
2698 }
2699
2700 /**
2701  * Unlock mdt object.
2702  *
2703  * Immeditely release the regular lock and the PDO lock or save the
2704  * lock in reqeuest and keep them referenced until client ACK or
2705  * transaction commit.
2706  *
2707  * \param info thread info object
2708  * \param o mdt object
2709  * \param lh mdt lock handle referencing regular and PDO locks
2710  * \param decref force immediate lock releasing
2711  */
2712 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2713                        struct mdt_lock_handle *lh, int decref)
2714 {
2715         ENTRY;
2716
2717         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2718         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2719
2720         if (lustre_handle_is_used(&lh->mlh_rreg_lh))
2721                 ldlm_lock_decref(&lh->mlh_rreg_lh, lh->mlh_rreg_mode);
2722
2723         EXIT;
2724 }
2725
2726 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2727                                         const struct lu_fid *f,
2728                                         struct mdt_lock_handle *lh,
2729                                         __u64 ibits)
2730 {
2731         struct mdt_object *o;
2732
2733         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2734         if (!IS_ERR(o)) {
2735                 int rc;
2736
2737                 rc = mdt_object_lock(info, o, lh, ibits,
2738                                      MDT_LOCAL_LOCK);
2739                 if (rc != 0) {
2740                         mdt_object_put(info->mti_env, o);
2741                         o = ERR_PTR(rc);
2742                 }
2743         }
2744         return o;
2745 }
2746
2747 void mdt_object_unlock_put(struct mdt_thread_info * info,
2748                            struct mdt_object * o,
2749                            struct mdt_lock_handle *lh,
2750                            int decref)
2751 {
2752         mdt_object_unlock(info, o, lh, decref);
2753         mdt_object_put(info->mti_env, o);
2754 }
2755
2756 struct mdt_handler *mdt_handler_find(__u32 opc, struct mdt_opc_slice *supported)
2757 {
2758         struct mdt_opc_slice *s;
2759         struct mdt_handler   *h;
2760
2761         h = NULL;
2762         for (s = supported; s->mos_hs != NULL; s++) {
2763                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2764                         h = s->mos_hs + (opc - s->mos_opc_start);
2765                         if (likely(h->mh_opc != 0))
2766                                 LASSERTF(h->mh_opc == opc,
2767                                          "opcode mismatch %d != %d\n",
2768                                          h->mh_opc, opc);
2769                         else
2770                                 h = NULL; /* unsupported opc */
2771                         break;
2772                 }
2773         }
2774         return h;
2775 }
2776
2777 static int mdt_lock_resname_compat(struct mdt_device *m,
2778                                    struct ldlm_request *req)
2779 {
2780         /* XXX something... later. */
2781         return 0;
2782 }
2783
2784 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2785 {
2786         /* XXX something... later. */
2787         return 0;
2788 }
2789
2790 /*
2791  * Generic code handling requests that have struct mdt_body passed in:
2792  *
2793  *  - extract mdt_body from request and save it in @info, if present;
2794  *
2795  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2796  *  @info;
2797  *
2798  *  - if HABEO_CORPUS flag is set for this request type check whether object
2799  *  actually exists on storage (lu_object_exists()).
2800  *
2801  */
2802 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2803 {
2804         const struct mdt_body    *body;
2805         struct mdt_object        *obj;
2806         const struct lu_env      *env;
2807         struct req_capsule       *pill;
2808         int                       rc;
2809         ENTRY;
2810
2811         env = info->mti_env;
2812         pill = info->mti_pill;
2813
2814         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2815         if (body == NULL)
2816                 RETURN(-EFAULT);
2817
2818         if (!(body->valid & OBD_MD_FLID))
2819                 RETURN(0);
2820
2821         if (!fid_is_sane(&body->fid1)) {
2822                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2823                 RETURN(-EINVAL);
2824         }
2825
2826         /*
2827          * Do not get size or any capa fields before we check that request
2828          * contains capa actually. There are some requests which do not, for
2829          * instance MDS_IS_SUBDIR.
2830          */
2831         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2832             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2833                 mdt_set_capainfo(info, 0, &body->fid1,
2834                                  req_capsule_client_get(pill, &RMF_CAPA1));
2835
2836         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2837         if (!IS_ERR(obj)) {
2838                 if ((flags & HABEO_CORPUS) &&
2839                     !mdt_object_exists(obj)) {
2840                         mdt_object_put(env, obj);
2841                         /* for capability renew ENOENT will be handled in
2842                          * mdt_renew_capa */
2843                         if (body->valid & OBD_MD_FLOSSCAPA)
2844                                 rc = 0;
2845                         else
2846                                 rc = -ENOENT;
2847                 } else {
2848                         info->mti_object = obj;
2849                         rc = 0;
2850                 }
2851         } else
2852                 rc = PTR_ERR(obj);
2853
2854         RETURN(rc);
2855 }
2856
2857 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2858 {
2859         struct req_capsule *pill = info->mti_pill;
2860         int rc;
2861         ENTRY;
2862
2863         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2864                 rc = mdt_body_unpack(info, flags);
2865         else
2866                 rc = 0;
2867
2868         if (rc == 0 && (flags & HABEO_REFERO)) {
2869                 /* Pack reply. */
2870                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2871                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2872                                              info->mti_body->eadatasize);
2873                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2874                         req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2875                                              RCL_SERVER, 0);
2876
2877                 rc = req_capsule_server_pack(pill);
2878         }
2879         RETURN(rc);
2880 }
2881
2882 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2883 {
2884         struct md_device *next = m->mdt_child;
2885
2886         return next->md_ops->mdo_init_capa_ctxt(env, next,
2887                                                 m->mdt_opts.mo_mds_capa,
2888                                                 m->mdt_capa_timeout,
2889                                                 m->mdt_capa_alg,
2890                                                 m->mdt_capa_keys);
2891 }
2892
2893 /*
2894  * Invoke handler for this request opc. Also do necessary preprocessing
2895  * (according to handler ->mh_flags), and post-processing (setting of
2896  * ->last_{xid,committed}).
2897  */
2898 static int mdt_req_handle(struct mdt_thread_info *info,
2899                           struct mdt_handler *h, struct ptlrpc_request *req)
2900 {
2901         int   rc, serious = 0;
2902         __u32 flags;
2903
2904         ENTRY;
2905
2906         LASSERT(h->mh_act != NULL);
2907         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2908         LASSERT(current->journal_info == NULL);
2909
2910         /*
2911          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2912          * to put same checks into handlers like mdt_close(), mdt_reint(),
2913          * etc., without talking to mdt authors first. Checking same thing
2914          * there again is useless and returning 0 error without packing reply
2915          * is buggy! Handlers either pack reply or return error.
2916          *
2917          * We return 0 here and do not send any reply in order to emulate
2918          * network failure. Do not send any reply in case any of NET related
2919          * fail_id has occured.
2920          */
2921         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2922                 RETURN(0);
2923
2924         rc = 0;
2925         flags = h->mh_flags;
2926         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2927
2928         if (h->mh_fmt != NULL) {
2929                 req_capsule_set(info->mti_pill, h->mh_fmt);
2930                 rc = mdt_unpack_req_pack_rep(info, flags);
2931         }
2932
2933         if (rc == 0 && flags & MUTABOR &&
2934             exp_connect_flags(req->rq_export) & OBD_CONNECT_RDONLY)
2935                 /* should it be rq_status? */
2936                 rc = -EROFS;
2937
2938         if (rc == 0 && flags & HABEO_CLAVIS) {
2939                 struct ldlm_request *dlm_req;
2940
2941                 LASSERT(h->mh_fmt != NULL);
2942
2943                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2944                 if (dlm_req != NULL) {
2945                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2946                                         LDLM_IBITS &&
2947                                      dlm_req->lock_desc.l_policy_data.\
2948                                         l_inodebits.bits == 0)) {
2949                                 /*
2950                                  * Lock without inodebits makes no sense and
2951                                  * will oops later in ldlm. If client miss to
2952                                  * set such bits, do not trigger ASSERTION.
2953                                  *
2954                                  * For liblustre flock case, it maybe zero.
2955                                  */
2956                                 rc = -EPROTO;
2957                         } else {
2958                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2959                                         rc = mdt_lock_resname_compat(
2960                                                                 info->mti_mdt,
2961                                                                 dlm_req);
2962                                 info->mti_dlm_req = dlm_req;
2963                         }
2964                 } else {
2965                         rc = -EFAULT;
2966                 }
2967         }
2968
2969         /* capability setting changed via /proc, needs reinitialize ctxt */
2970         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2971                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2972                 info->mti_mdt->mdt_capa_conf = 0;
2973         }
2974
2975         if (likely(rc == 0)) {
2976                 /*
2977                  * Process request, there can be two types of rc:
2978                  * 1) errors with msg unpack/pack, other failures outside the
2979                  * operation itself. This is counted as serious errors;
2980                  * 2) errors during fs operation, should be placed in rq_status
2981                  * only
2982                  */
2983                 rc = h->mh_act(info);
2984                 if (rc == 0 &&
2985                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2986                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2987                                   "pack reply and returned 0 error\n",
2988                                   h->mh_name);
2989                         LBUG();
2990                 }
2991                 serious = is_serious(rc);
2992                 rc = clear_serious(rc);
2993         } else
2994                 serious = 1;
2995
2996         req->rq_status = rc;
2997
2998         /*
2999          * ELDLM_* codes which > 0 should be in rq_status only as well as
3000          * all non-serious errors.
3001          */
3002         if (rc > 0 || !serious)
3003                 rc = 0;
3004
3005         LASSERT(current->journal_info == NULL);
3006
3007         if (rc == 0 && (flags & HABEO_CLAVIS) &&
3008             info->mti_mdt->mdt_opts.mo_compat_resname) {
3009                 struct ldlm_reply *dlmrep;
3010
3011                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3012                 if (dlmrep != NULL)
3013                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3014         }
3015
3016         /* If we're DISCONNECTing, the mdt_export_data is already freed */
3017         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3018                 target_committed_to_req(req);
3019
3020         if (unlikely(req_is_replay(req) &&
3021                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3022                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3023                 LBUG();
3024         }
3025
3026         target_send_reply(req, rc, info->mti_fail_id);
3027         RETURN(0);
3028 }
3029
3030 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3031 {
3032         lh->mlh_type = MDT_NUL_LOCK;
3033         lh->mlh_reg_lh.cookie = 0ull;
3034         lh->mlh_reg_mode = LCK_MINMODE;
3035         lh->mlh_pdo_lh.cookie = 0ull;
3036         lh->mlh_pdo_mode = LCK_MINMODE;
3037         lh->mlh_rreg_lh.cookie = 0ull;
3038         lh->mlh_rreg_mode = LCK_MINMODE;
3039 }
3040
3041 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3042 {
3043         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3044         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3045 }
3046
3047 /*
3048  * Initialize fields of struct mdt_thread_info. Other fields are left in
3049  * uninitialized state, because it's too expensive to zero out whole
3050  * mdt_thread_info (> 1K) on each request arrival.
3051  */
3052 static void mdt_thread_info_init(struct ptlrpc_request *req,
3053                                  struct mdt_thread_info *info)
3054 {
3055         int i;
3056
3057         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3058         info->mti_pill = &req->rq_pill;
3059
3060         /* lock handle */
3061         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3062                 mdt_lock_handle_init(&info->mti_lh[i]);
3063
3064         /* mdt device: it can be NULL while CONNECT */
3065         if (req->rq_export) {
3066                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3067                 info->mti_exp = req->rq_export;
3068         } else
3069                 info->mti_mdt = NULL;
3070         info->mti_env = req->rq_svc_thread->t_env;
3071         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3072         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3073         info->mti_mos = NULL;
3074
3075         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3076         info->mti_big_buf = LU_BUF_NULL;
3077         info->mti_body = NULL;
3078         info->mti_object = NULL;
3079         info->mti_dlm_req = NULL;
3080         info->mti_has_trans = 0;
3081         info->mti_cross_ref = 0;
3082         info->mti_opdata = 0;
3083         info->mti_big_lmm_used = 0;
3084
3085         /* To not check for split by default. */
3086         info->mti_spec.no_create = 0;
3087         info->mti_spec.sp_rm_entry = 0;
3088 }
3089
3090 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3091 {
3092         int i;
3093
3094         req_capsule_fini(info->mti_pill);
3095         if (info->mti_object != NULL) {
3096                 mdt_object_put(info->mti_env, info->mti_object);
3097                 info->mti_object = NULL;
3098         }
3099
3100         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3101                 mdt_lock_handle_fini(&info->mti_lh[i]);
3102         info->mti_env = NULL;
3103
3104         if (unlikely(info->mti_big_buf.lb_buf != NULL))
3105                 lu_buf_free(&info->mti_big_buf);
3106 }
3107
3108 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3109                                        struct obd_device *obd, int *process)
3110 {
3111         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3112         case MDS_CONNECT: /* This will never get here, but for completeness. */
3113         case OST_CONNECT: /* This will never get here, but for completeness. */
3114         case MDS_DISCONNECT:
3115         case OST_DISCONNECT:
3116         case OBD_IDX_READ:
3117                *process = 1;
3118                RETURN(0);
3119
3120         case MDS_CLOSE:
3121         case MDS_DONE_WRITING:
3122         case MDS_SYNC: /* used in unmounting */
3123         case OBD_PING:
3124         case MDS_REINT:
3125         case UPDATE_OBJ:
3126         case SEQ_QUERY:
3127         case FLD_QUERY:
3128         case LDLM_ENQUEUE:
3129                 *process = target_queue_recovery_request(req, obd);
3130                 RETURN(0);
3131
3132         default:
3133                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3134                 *process = -EAGAIN;
3135                 RETURN(0);
3136         }
3137 }
3138
3139 /*
3140  * Handle recovery. Return:
3141  *        +1: continue request processing;
3142  *       -ve: abort immediately with the given error code;
3143  *         0: send reply with error code in req->rq_status;
3144  */
3145 static int mdt_recovery(struct mdt_thread_info *info)
3146 {
3147         struct ptlrpc_request *req = mdt_info_req(info);
3148         struct obd_device *obd;
3149
3150         ENTRY;
3151
3152         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3153         case MDS_CONNECT:
3154         case SEC_CTX_INIT:
3155         case SEC_CTX_INIT_CONT:
3156         case SEC_CTX_FINI:
3157                 {
3158 #if 0
3159                         int rc;
3160
3161                         rc = mdt_handle_idmap(info);
3162                         if (rc)
3163                                 RETURN(rc);
3164                         else
3165 #endif
3166                                 RETURN(+1);
3167                 }
3168         }
3169
3170         if (unlikely(!class_connected_export(req->rq_export))) {
3171                 CDEBUG(D_HA, "operation %d on unconnected MDS from %s\n",
3172                        lustre_msg_get_opc(req->rq_reqmsg),
3173                        libcfs_id2str(req->rq_peer));
3174                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3175                  * mds_A will get -ENOTCONN(especially for ping req),
3176                  * which will cause that mds_A deactive timeout, then when
3177                  * mds_A cleanup, the cleanup process will be suspended since
3178                  * deactive timeout is not zero.
3179                  */
3180                 req->rq_status = -ENOTCONN;
3181                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3182                 RETURN(0);
3183         }
3184
3185         /* sanity check: if the xid matches, the request must be marked as a
3186          * resent or replayed */
3187         if (req_xid_is_last(req)) {
3188                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3189                       (MSG_RESENT | MSG_REPLAY))) {
3190                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3191                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3192                                   lustre_msg_get_flags(req->rq_reqmsg));
3193                         LBUG();
3194                         req->rq_status = -ENOTCONN;
3195                         RETURN(-ENOTCONN);
3196                 }
3197         }
3198
3199         /* else: note the opposite is not always true; a RESENT req after a
3200          * failover will usually not match the last_xid, since it was likely
3201          * never committed. A REPLAYed request will almost never match the
3202          * last xid, however it could for a committed, but still retained,
3203          * open. */
3204
3205         obd = req->rq_export->exp_obd;
3206
3207         /* Check for aborted recovery... */
3208         if (unlikely(obd->obd_recovering)) {
3209                 int rc;
3210                 int should_process;
3211                 DEBUG_REQ(D_INFO, req, "Got new replay");
3212                 rc = mdt_filter_recovery_request(req, obd, &should_process);
3213                 if (rc != 0 || !should_process)
3214                         RETURN(rc);
3215                 else if (should_process < 0) {
3216                         req->rq_status = should_process;
3217                         rc = ptlrpc_error(req);
3218                         RETURN(rc);
3219                 }
3220         }
3221         RETURN(+1);
3222 }
3223
3224 static int mdt_msg_check_version(struct lustre_msg *msg)