Whamcloud - gitweb
1c5d473830ed92dcf23d1b3f638935c09a2825ef
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71
72 mdl_mode_t mdt_mdl_lock_modes[] = {
73         [LCK_MINMODE] = MDL_MINMODE,
74         [LCK_EX]      = MDL_EX,
75         [LCK_PW]      = MDL_PW,
76         [LCK_PR]      = MDL_PR,
77         [LCK_CW]      = MDL_CW,
78         [LCK_CR]      = MDL_CR,
79         [LCK_NL]      = MDL_NL,
80         [LCK_GROUP]   = MDL_GROUP
81 };
82
83 ldlm_mode_t mdt_dlm_lock_modes[] = {
84         [MDL_MINMODE] = LCK_MINMODE,
85         [MDL_EX]      = LCK_EX,
86         [MDL_PW]      = LCK_PW,
87         [MDL_PR]      = LCK_PR,
88         [MDL_CW]      = LCK_CW,
89         [MDL_CR]      = LCK_CR,
90         [MDL_NL]      = LCK_NL,
91         [MDL_GROUP]   = LCK_GROUP
92 };
93
94 /*
95  * Initialized in mdt_mod_init().
96  */
97 static unsigned long mdt_num_threads;
98 CFS_MODULE_PARM(mdt_num_threads, "ul", ulong, 0444,
99                 "number of MDS service threads to start "
100                 "(deprecated in favor of mds_num_threads)");
101
102 static unsigned long mds_num_threads;
103 CFS_MODULE_PARM(mds_num_threads, "ul", ulong, 0444,
104                 "number of MDS service threads to start");
105
106 static char *mds_num_cpts;
107 CFS_MODULE_PARM(mds_num_cpts, "c", charp, 0444,
108                 "CPU partitions MDS threads should run on");
109
110 static unsigned long mds_rdpg_num_threads;
111 CFS_MODULE_PARM(mds_rdpg_num_threads, "ul", ulong, 0444,
112                 "number of MDS readpage service threads to start");
113
114 static char *mds_rdpg_num_cpts;
115 CFS_MODULE_PARM(mds_rdpg_num_cpts, "c", charp, 0444,
116                 "CPU partitions MDS readpage threads should run on");
117
118 /* NB: these two should be removed along with setattr service in the future */
119 static unsigned long mds_attr_num_threads;
120 CFS_MODULE_PARM(mds_attr_num_threads, "ul", ulong, 0444,
121                 "number of MDS setattr service threads to start");
122
123 static char *mds_attr_num_cpts;
124 CFS_MODULE_PARM(mds_attr_num_cpts, "c", charp, 0444,
125                 "CPU partitions MDS setattr threads should run on");
126
127 /* ptlrpc request handler for MDT. All handlers are
128  * grouped into several slices - struct mdt_opc_slice,
129  * and stored in an array - mdt_handlers[].
130  */
131 struct mdt_handler {
132         /* The name of this handler. */
133         const char *mh_name;
134         /* Fail id for this handler, checked at the beginning of this handler*/
135         int         mh_fail_id;
136         /* Operation code for this handler */
137         __u32       mh_opc;
138         /* flags are listed in enum mdt_handler_flags below. */
139         __u32       mh_flags;
140         /* The actual handler function to execute. */
141         int (*mh_act)(struct mdt_thread_info *info);
142         /* Request format for this request. */
143         const struct req_format *mh_fmt;
144 };
145
146 enum mdt_handler_flags {
147         /*
148          * struct mdt_body is passed in the incoming message, and object
149          * identified by this fid exists on disk.
150          *
151          * "habeo corpus" == "I have a body"
152          */
153         HABEO_CORPUS = (1 << 0),
154         /*
155          * struct ldlm_request is passed in the incoming message.
156          *
157          * "habeo clavis" == "I have a key"
158          */
159         HABEO_CLAVIS = (1 << 1),
160         /*
161          * this request has fixed reply format, so that reply message can be
162          * packed by generic code.
163          *
164          * "habeo refero" == "I have a reply"
165          */
166         HABEO_REFERO = (1 << 2),
167         /*
168          * this request will modify something, so check whether the filesystem
169          * is readonly or not, then return -EROFS to client asap if necessary.
170          *
171          * "mutabor" == "I shall modify"
172          */
173         MUTABOR      = (1 << 3)
174 };
175
176 struct mdt_opc_slice {
177         __u32               mos_opc_start;
178         int                 mos_opc_end;
179         struct mdt_handler *mos_hs;
180 };
181
182 static struct mdt_opc_slice mdt_regular_handlers[];
183 static struct mdt_opc_slice mdt_readpage_handlers[];
184 static struct mdt_opc_slice mdt_xmds_handlers[];
185 static struct mdt_opc_slice mdt_seq_handlers[];
186 static struct mdt_opc_slice mdt_fld_handlers[];
187
188 static struct mdt_device *mdt_dev(struct lu_device *d);
189 static int mdt_regular_handle(struct ptlrpc_request *req);
190 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
191 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
192                         struct getinfo_fid2path *fp);
193
194 static const struct lu_object_operations mdt_obj_ops;
195
196 /* Slab for MDT object allocation */
197 static cfs_mem_cache_t *mdt_object_kmem;
198
199 static struct lu_kmem_descr mdt_caches[] = {
200         {
201                 .ckd_cache = &mdt_object_kmem,
202                 .ckd_name  = "mdt_obj",
203                 .ckd_size  = sizeof(struct mdt_object)
204         },
205         {
206                 .ckd_cache = NULL
207         }
208 };
209
210 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
211 {
212         if (!rep)
213                 return 0;
214         return (rep->lock_policy_res1 & flag);
215 }
216
217 void mdt_clear_disposition(struct mdt_thread_info *info,
218                            struct ldlm_reply *rep, int flag)
219 {
220         if (info)
221                 info->mti_opdata &= ~flag;
222         if (rep)
223                 rep->lock_policy_res1 &= ~flag;
224 }
225
226 void mdt_set_disposition(struct mdt_thread_info *info,
227                          struct ldlm_reply *rep, int flag)
228 {
229         if (info)
230                 info->mti_opdata |= flag;
231         if (rep)
232                 rep->lock_policy_res1 |= flag;
233 }
234
235 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
236 {
237         lh->mlh_pdo_hash = 0;
238         lh->mlh_reg_mode = lm;
239         lh->mlh_type = MDT_REG_LOCK;
240 }
241
242 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
243                        const char *name, int namelen)
244 {
245         lh->mlh_reg_mode = lm;
246         lh->mlh_type = MDT_PDO_LOCK;
247
248         if (name != NULL && (name[0] != '\0')) {
249                 LASSERT(namelen > 0);
250                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
251         } else {
252                 LASSERT(namelen == 0);
253                 lh->mlh_pdo_hash = 0ull;
254         }
255 }
256
257 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
258                               struct mdt_lock_handle *lh)
259 {
260         mdl_mode_t mode;
261         ENTRY;
262
263         /*
264          * Any dir access needs couple of locks:
265          *
266          * 1) on part of dir we gonna take lookup/modify;
267          *
268          * 2) on whole dir to protect it from concurrent splitting and/or to
269          * flush client's cache for readdir().
270          *
271          * so, for a given mode and object this routine decides what lock mode
272          * to use for lock #2:
273          *
274          * 1) if caller's gonna lookup in dir then we need to protect dir from
275          * being splitted only - LCK_CR
276          *
277          * 2) if caller's gonna modify dir then we need to protect dir from
278          * being splitted and to flush cache - LCK_CW
279          *
280          * 3) if caller's gonna modify dir and that dir seems ready for
281          * splitting then we need to protect it from any type of access
282          * (lookup/modify/split) - LCK_EX --bzzz
283          */
284
285         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
286         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
287
288         /*
289          * Ask underlaying level its opinion about preferable PDO lock mode
290          * having access type passed as regular lock mode:
291          *
292          * - MDL_MINMODE means that lower layer does not want to specify lock
293          * mode;
294          *
295          * - MDL_NL means that no PDO lock should be taken. This is used in some
296          * cases. Say, for non-splittable directories no need to use PDO locks
297          * at all.
298          */
299         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
300                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
301
302         if (mode != MDL_MINMODE) {
303                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
304         } else {
305                 /*
306                  * Lower layer does not want to specify locking mode. We do it
307                  * our selves. No special protection is needed, just flush
308                  * client's cache on modification and allow concurrent
309                  * mondification.
310                  */
311                 switch (lh->mlh_reg_mode) {
312                 case LCK_EX:
313                         lh->mlh_pdo_mode = LCK_EX;
314                         break;
315                 case LCK_PR:
316                         lh->mlh_pdo_mode = LCK_CR;
317                         break;
318                 case LCK_PW:
319                         lh->mlh_pdo_mode = LCK_CW;
320                         break;
321                 default:
322                         CERROR("Not expected lock type (0x%x)\n",
323                                (int)lh->mlh_reg_mode);
324                         LBUG();
325                 }
326         }
327
328         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
329         EXIT;
330 }
331
332 static int mdt_getstatus(struct mdt_thread_info *info)
333 {
334         struct mdt_device *mdt  = info->mti_mdt;
335         struct md_device  *next = mdt->mdt_child;
336         struct mdt_body   *repbody;
337         int                rc;
338
339         ENTRY;
340
341         rc = mdt_check_ucred(info);
342         if (rc)
343                 RETURN(err_serious(rc));
344
345         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
346                 RETURN(err_serious(-ENOMEM));
347
348         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
349         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
350         if (rc != 0)
351                 RETURN(rc);
352
353         repbody->valid |= OBD_MD_FLID;
354
355         if (mdt->mdt_opts.mo_mds_capa &&
356             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
357                 struct mdt_object  *root;
358                 struct lustre_capa *capa;
359
360                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
361                 if (IS_ERR(root))
362                         RETURN(PTR_ERR(root));
363
364                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
365                 LASSERT(capa);
366                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
367                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
368                                  0);
369                 mdt_object_put(info->mti_env, root);
370                 if (rc == 0)
371                         repbody->valid |= OBD_MD_FLMDSCAPA;
372         }
373
374         RETURN(rc);
375 }
376
377 static int mdt_statfs(struct mdt_thread_info *info)
378 {
379         struct ptlrpc_request           *req = mdt_info_req(info);
380         struct md_device                *next = info->mti_mdt->mdt_child;
381         struct ptlrpc_service_part      *svcpt;
382         struct obd_statfs               *osfs;
383         int                             rc;
384
385         ENTRY;
386
387         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
388
389         /* This will trigger a watchdog timeout */
390         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
391                          (MDT_SERVICE_WATCHDOG_FACTOR *
392                           at_get(&svcpt->scp_at_estimate)) + 1);
393
394         rc = mdt_check_ucred(info);
395         if (rc)
396                 RETURN(err_serious(rc));
397
398         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
399                 RETURN(err_serious(-ENOMEM));
400
401         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
402         if (!osfs)
403                 RETURN(-EPROTO);
404
405         /** statfs information are cached in the mdt_device */
406         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
407                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
408                 /** statfs data is too old, get up-to-date one */
409                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
410                 if (rc)
411                         RETURN(rc);
412                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
413                 info->mti_mdt->mdt_osfs = *osfs;
414                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
415                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
416         } else {
417                 /** use cached statfs data */
418                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
419                 *osfs = info->mti_mdt->mdt_osfs;
420                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
421         }
422
423         if (rc == 0)
424                 mdt_counter_incr(req, LPROC_MDT_STATFS);
425
426         RETURN(rc);
427 }
428
429 /**
430  * Pack SOM attributes into the reply.
431  * Call under a DLM UPDATE lock.
432  */
433 static void mdt_pack_size2body(struct mdt_thread_info *info,
434                                struct mdt_object *mo)
435 {
436         struct mdt_body *b;
437         struct md_attr *ma = &info->mti_attr;
438
439         LASSERT(ma->ma_attr.la_valid & LA_MODE);
440         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
441
442         /* Check if Size-on-MDS is supported, if this is a regular file,
443          * if SOM is enabled on the object and if SOM cache exists and valid.
444          * Otherwise do not pack Size-on-MDS attributes to the reply. */
445         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
446             !S_ISREG(ma->ma_attr.la_mode) ||
447             !mdt_object_is_som_enabled(mo) ||
448             !(ma->ma_valid & MA_SOM))
449                 return;
450
451         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
452         b->size = ma->ma_som->msd_size;
453         b->blocks = ma->ma_som->msd_blocks;
454 }
455
456 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
457                         const struct lu_attr *attr, const struct lu_fid *fid)
458 {
459         struct md_attr *ma = &info->mti_attr;
460
461         LASSERT(ma->ma_valid & MA_INODE);
462
463         b->atime      = attr->la_atime;
464         b->mtime      = attr->la_mtime;
465         b->ctime      = attr->la_ctime;
466         b->mode       = attr->la_mode;
467         b->size       = attr->la_size;
468         b->blocks     = attr->la_blocks;
469         b->uid        = attr->la_uid;
470         b->gid        = attr->la_gid;
471         b->flags      = attr->la_flags;
472         b->nlink      = attr->la_nlink;
473         b->rdev       = attr->la_rdev;
474
475         /*XXX should pack the reply body according to lu_valid*/
476         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
477                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
478                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
479                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
480
481         if (!S_ISREG(attr->la_mode)) {
482                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
483         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
484                 /* means no objects are allocated on osts. */
485                 LASSERT(!(ma->ma_valid & MA_LOV));
486                 /* just ignore blocks occupied by extend attributes on MDS */
487                 b->blocks = 0;
488                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
489                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
490         }
491
492         if (fid) {
493                 b->fid1 = *fid;
494                 b->valid |= OBD_MD_FLID;
495
496                 /* FIXME: these should be fixed when new igif ready.*/
497                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
498                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
499                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
500
501                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
502                                 PFID(fid), b->nlink, b->mode, b->size);
503         }
504
505         if (info)
506                 mdt_body_reverse_idmap(info, b);
507
508         if (b->valid & OBD_MD_FLSIZE)
509                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
510                        PFID(fid), (unsigned long long)b->size);
511 }
512
513 static inline int mdt_body_has_lov(const struct lu_attr *la,
514                                    const struct mdt_body *body)
515 {
516         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
517                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
518 }
519
520 void mdt_client_compatibility(struct mdt_thread_info *info)
521 {
522         struct mdt_body       *body;
523         struct ptlrpc_request *req = mdt_info_req(info);
524         struct obd_export     *exp = req->rq_export;
525         struct md_attr        *ma = &info->mti_attr;
526         struct lu_attr        *la = &ma->ma_attr;
527         ENTRY;
528
529         if (exp->exp_connect_flags & OBD_CONNECT_LAYOUTLOCK)
530                 /* the client can deal with 16-bit lmm_stripe_count */
531                 RETURN_EXIT;
532
533         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
534
535         if (!mdt_body_has_lov(la, body))
536                 RETURN_EXIT;
537
538         /* now we have a reply with a lov for a client not compatible with the
539          * layout lock so we have to clean the layout generation number */
540         if (S_ISREG(la->la_mode))
541                 ma->ma_lmm->lmm_layout_gen = 0;
542         EXIT;
543 }
544
545 static int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
546                              char *name)
547 {
548         const struct lu_env *env = info->mti_env;
549         int rc;
550         ENTRY;
551
552         LASSERT(info->mti_big_lmm_used == 0);
553         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
554         if (rc < 0)
555                 RETURN(rc);
556
557         /* big_lmm may need to be grown */
558         if (info->mti_big_lmmsize < rc) {
559                 int size = size_roundup_power2(rc);
560
561                 if (info->mti_big_lmmsize > 0) {
562                         /* free old buffer */
563                         LASSERT(info->mti_big_lmm);
564                         OBD_FREE_LARGE(info->mti_big_lmm,
565                                        info->mti_big_lmmsize);
566                         info->mti_big_lmm = NULL;
567                         info->mti_big_lmmsize = 0;
568                 }
569
570                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
571                 if (info->mti_big_lmm == NULL)
572                         RETURN(-ENOMEM);
573                 info->mti_big_lmmsize = size;
574         }
575         LASSERT(info->mti_big_lmmsize >= rc);
576
577         info->mti_buf.lb_buf = info->mti_big_lmm;
578         info->mti_buf.lb_len = info->mti_big_lmmsize;
579         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
580
581         RETURN(rc);
582 }
583
584 int mdt_attr_get_lov(struct mdt_thread_info *info,
585                      struct mdt_object *o, struct md_attr *ma)
586 {
587         struct md_object *next = mdt_object_child(o);
588         struct lu_buf    *buf = &info->mti_buf;
589         int rc;
590
591         buf->lb_buf = ma->ma_lmm;
592         buf->lb_len = ma->ma_lmm_size;
593         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
594         if (rc > 0) {
595                 ma->ma_lmm_size = rc;
596                 ma->ma_valid |= MA_LOV;
597                 rc = 0;
598         } else if (rc == -ENODATA) {
599                 /* no LOV EA */
600                 rc = 0;
601         } else if (rc == -ERANGE) {
602                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LOV);
603                 if (rc > 0) {
604                         info->mti_big_lmm_used = 1;
605                         ma->ma_valid |= MA_LOV;
606                         ma->ma_lmm = info->mti_big_lmm;
607                         ma->ma_lmm_size = rc;
608                         /* update mdt_max_mdsize so all clients
609                          * will be aware about that */
610                         if (info->mti_mdt->mdt_max_mdsize < rc)
611                                 info->mti_mdt->mdt_max_mdsize = rc;
612                         rc = 0;
613                 }
614         }
615
616         return rc;
617 }
618
619 int mdt_attr_get_pfid(struct mdt_thread_info *info,
620                       struct mdt_object *o, struct lu_fid *pfid)
621 {
622         struct lu_buf           *buf = &info->mti_buf;
623         struct link_ea_header   *leh;
624         struct link_ea_entry    *lee;
625         int                      rc;
626         ENTRY;
627
628         buf->lb_buf = info->mti_big_lmm;
629         buf->lb_len = info->mti_big_lmmsize;
630         rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
631                           buf, XATTR_NAME_LINK);
632         /* ignore errors, MA_PFID won't be set and it is
633          * up to the caller to treat this as an error */
634         if (rc == -ERANGE || buf->lb_len == 0) {
635                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
636                 buf->lb_buf = info->mti_big_lmm;
637                 buf->lb_len = info->mti_big_lmmsize;
638         }
639
640         if (rc < 0)
641                 RETURN(rc);
642         if (rc < sizeof(*leh)) {
643                 CERROR("short LinkEA on "DFID": rc = %d\n",
644                        PFID(mdt_object_fid(o)), rc);
645                 RETURN(-ENODATA);
646         }
647
648         leh = (struct link_ea_header *) buf->lb_buf;
649         lee = (struct link_ea_entry *)(leh + 1);
650         if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
651                 leh->leh_magic = LINK_EA_MAGIC;
652                 leh->leh_reccount = __swab32(leh->leh_reccount);
653                 leh->leh_len = __swab64(leh->leh_len);
654         }
655         if (leh->leh_magic != LINK_EA_MAGIC)
656                 RETURN(-EINVAL);
657         if (leh->leh_reccount == 0)
658                 RETURN(-ENODATA);
659
660         memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
661         fid_be_to_cpu(pfid, pfid);
662
663         RETURN(0);
664 }
665
666 int mdt_attr_get_complex(struct mdt_thread_info *info,
667                          struct mdt_object *o, struct md_attr *ma)
668 {
669         const struct lu_env *env = info->mti_env;
670         struct md_object    *next = mdt_object_child(o);
671         struct lu_buf       *buf = &info->mti_buf;
672         u32                  mode = lu_object_attr(&next->mo_lu);
673         int                  need = ma->ma_need;
674         int                  rc = 0, rc2;
675         ENTRY;
676
677         ma->ma_valid = 0;
678
679         if (need & MA_INODE) {
680                 ma->ma_need = MA_INODE;
681                 rc = mo_attr_get(env, next, ma);
682                 if (rc)
683                         GOTO(out, rc);
684                 ma->ma_valid |= MA_INODE;
685         }
686
687         if (need & MA_PFID) {
688                 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
689                 if (rc == 0)
690                         ma->ma_valid |= MA_PFID;
691                 /* ignore this error, parent fid is not mandatory */
692                 rc = 0;
693         }
694
695         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
696                 rc = mdt_attr_get_lov(info, o, ma);
697                 if (rc)
698                         GOTO(out, rc);
699         }
700
701         if (need & MA_LMV && S_ISDIR(mode)) {
702                 buf->lb_buf = ma->ma_lmv;
703                 buf->lb_len = ma->ma_lmv_size;
704                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
705                 if (rc2 > 0) {
706                         ma->ma_lmv_size = rc2;
707                         ma->ma_valid |= MA_LMV;
708                 } else if (rc2 == -ENODATA) {
709                         /* no LMV EA */
710                         ma->ma_lmv_size = 0;
711                 } else
712                         GOTO(out, rc = rc2);
713         }
714
715
716         if (rc == 0 && S_ISREG(mode) && (need & (MA_HSM | MA_SOM))) {
717                 struct lustre_mdt_attrs *lma;
718
719                 lma = (struct lustre_mdt_attrs *)info->mti_xattr_buf;
720                 CLASSERT(sizeof(*lma) <= sizeof(info->mti_xattr_buf));
721
722                 buf->lb_buf = lma;
723                 buf->lb_len = sizeof(info->mti_xattr_buf);
724                 rc = mo_xattr_get(env, next, buf, XATTR_NAME_LMA);
725                 if (rc > 0) {
726                         lustre_lma_swab(lma);
727                         /* Swab and copy LMA */
728                         if (need & MA_HSM) {
729                                 if (lma->lma_compat & LMAC_HSM)
730                                         ma->ma_hsm.mh_flags =
731                                                 lma->lma_flags & HSM_FLAGS_MASK;
732                                 else
733                                         ma->ma_hsm.mh_flags = 0;
734                                 ma->ma_valid |= MA_HSM;
735                         }
736                         /* Copy SOM */
737                         if (need & MA_SOM && lma->lma_compat & LMAC_SOM) {
738                                 LASSERT(ma->ma_som != NULL);
739                                 ma->ma_som->msd_ioepoch = lma->lma_ioepoch;
740                                 ma->ma_som->msd_size    = lma->lma_som_size;
741                                 ma->ma_som->msd_blocks  = lma->lma_som_blocks;
742                                 ma->ma_som->msd_mountid = lma->lma_som_mountid;
743                                 ma->ma_valid |= MA_SOM;
744                         }
745                         rc = 0;
746                 } else if (rc == -ENODATA) {
747                         rc = 0;
748                 }
749         }
750
751 #ifdef CONFIG_FS_POSIX_ACL
752         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
753                 buf->lb_buf = ma->ma_acl;
754                 buf->lb_len = ma->ma_acl_size;
755                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
756                 if (rc2 > 0) {
757                         ma->ma_acl_size = rc2;
758                         ma->ma_valid |= MA_ACL_DEF;
759                 } else if (rc2 == -ENODATA) {
760                         /* no ACLs */
761                         ma->ma_acl_size = 0;
762                 } else
763                         GOTO(out, rc = rc2);
764         }
765 #endif
766 out:
767         ma->ma_need = need;
768         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
769                rc, ma->ma_valid, ma->ma_lmm);
770         RETURN(rc);
771 }
772
773 static int mdt_getattr_internal(struct mdt_thread_info *info,
774                                 struct mdt_object *o, int ma_need)
775 {
776         struct md_object        *next = mdt_object_child(o);
777         const struct mdt_body   *reqbody = info->mti_body;
778         struct ptlrpc_request   *req = mdt_info_req(info);
779         struct md_attr          *ma = &info->mti_attr;
780         struct lu_attr          *la = &ma->ma_attr;
781         struct req_capsule      *pill = info->mti_pill;
782         const struct lu_env     *env = info->mti_env;
783         struct mdt_body         *repbody;
784         struct lu_buf           *buffer = &info->mti_buf;
785         int                     rc;
786         int                     is_root;
787         ENTRY;
788
789         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
790                 RETURN(err_serious(-ENOMEM));
791
792         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
793
794         ma->ma_valid = 0;
795
796         rc = mdt_object_exists(o);
797         if (rc < 0) {
798                 /* This object is located on remote node.*/
799                 repbody->fid1 = *mdt_object_fid(o);
800                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
801                 GOTO(out, rc = 0);
802         }
803
804         buffer->lb_len = reqbody->eadatasize;
805         if (buffer->lb_len > 0)
806                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
807         else
808                 buffer->lb_buf = NULL;
809
810         /* If it is dir object and client require MEA, then we got MEA */
811         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
812             reqbody->valid & OBD_MD_MEA) {
813                 /* Assumption: MDT_MD size is enough for lmv size. */
814                 ma->ma_lmv = buffer->lb_buf;
815                 ma->ma_lmv_size = buffer->lb_len;
816                 ma->ma_need = MA_LMV | MA_INODE;
817         } else {
818                 ma->ma_lmm = buffer->lb_buf;
819                 ma->ma_lmm_size = buffer->lb_len;
820                 ma->ma_need = MA_LOV | MA_INODE;
821         }
822
823         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
824             reqbody->valid & OBD_MD_FLDIREA  &&
825             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
826                 /* get default stripe info for this dir. */
827                 ma->ma_need |= MA_LOV_DEF;
828         }
829         ma->ma_need |= ma_need;
830         if (ma->ma_need & MA_SOM)
831                 ma->ma_som = &info->mti_u.som.data;
832
833         rc = mdt_attr_get_complex(info, o, ma);
834         if (unlikely(rc)) {
835                 CERROR("getattr error for "DFID": %d\n",
836                         PFID(mdt_object_fid(o)), rc);
837                 RETURN(rc);
838         }
839
840         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
841
842         /* the Lustre protocol supposes to return default striping
843          * on the user-visible root if explicitly requested */
844         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
845             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
846                 struct lu_fid      rootfid;
847                 struct mdt_object *root;
848                 struct mdt_device *mdt = info->mti_mdt;
849
850                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
851                 if (rc)
852                         RETURN(rc);
853                 root = mdt_object_find(env, mdt, &rootfid);
854                 if (IS_ERR(root))
855                         RETURN(PTR_ERR(root));
856                 rc = mdt_attr_get_lov(info, root, ma);
857                 mdt_object_put(info->mti_env, root);
858                 if (unlikely(rc)) {
859                         CERROR("getattr error for "DFID": %d\n",
860                                         PFID(mdt_object_fid(o)), rc);
861                         RETURN(rc);
862                 }
863         }
864
865         if (likely(ma->ma_valid & MA_INODE))
866                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
867         else
868                 RETURN(-EFAULT);
869
870         if (mdt_body_has_lov(la, reqbody)) {
871                 if (ma->ma_valid & MA_LOV) {
872                         LASSERT(ma->ma_lmm_size);
873                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
874                         repbody->eadatasize = ma->ma_lmm_size;
875                         if (S_ISDIR(la->la_mode))
876                                 repbody->valid |= OBD_MD_FLDIREA;
877                         else
878                                 repbody->valid |= OBD_MD_FLEASIZE;
879                 }
880                 if (ma->ma_valid & MA_LMV) {
881                         LASSERT(S_ISDIR(la->la_mode));
882                         repbody->eadatasize = ma->ma_lmv_size;
883                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
884                 }
885         } else if (S_ISLNK(la->la_mode) &&
886                    reqbody->valid & OBD_MD_LINKNAME) {
887                 buffer->lb_buf = ma->ma_lmm;
888                 /* eadatasize from client includes NULL-terminator, so
889                  * there is no need to read it */
890                 buffer->lb_len = reqbody->eadatasize - 1;
891                 rc = mo_readlink(env, next, buffer);
892                 if (unlikely(rc <= 0)) {
893                         CERROR("readlink failed: %d\n", rc);
894                         rc = -EFAULT;
895                 } else {
896                         int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
897
898                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
899                                 rc -= 2;
900                         repbody->valid |= OBD_MD_LINKNAME;
901                         /* we need to report back size with NULL-terminator
902                          * because client expects that */
903                         repbody->eadatasize = rc + 1;
904                         if (repbody->eadatasize != reqbody->eadatasize)
905                                 CERROR("Read shorter symlink %d, expected %d\n",
906                                        rc, reqbody->eadatasize - 1);
907                         /* NULL terminate */
908                         ((char *)ma->ma_lmm)[rc] = 0;
909
910                         /* If the total CDEBUG() size is larger than a page, it
911                          * will print a warning to the console, avoid this by
912                          * printing just the last part of the symlink. */
913                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
914                                print_limit < rc ? "..." : "", print_limit,
915                                (char *)ma->ma_lmm + rc - print_limit, rc);
916                         rc = 0;
917                 }
918         }
919
920         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
921                 repbody->max_cookiesize = 0;
922                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
923                 repbody->valid |= OBD_MD_FLMODEASIZE;
924                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
925                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
926                        repbody->max_cookiesize);
927         }
928
929         if (exp_connect_rmtclient(info->mti_exp) &&
930             reqbody->valid & OBD_MD_FLRMTPERM) {
931                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
932
933                 /* mdt_getattr_lock only */
934                 rc = mdt_pack_remote_perm(info, o, buf);
935                 if (rc) {
936                         repbody->valid &= ~OBD_MD_FLRMTPERM;
937                         repbody->aclsize = 0;
938                         RETURN(rc);
939                 } else {
940                         repbody->valid |= OBD_MD_FLRMTPERM;
941                         repbody->aclsize = sizeof(struct mdt_remote_perm);
942                 }
943         }
944 #ifdef CONFIG_FS_POSIX_ACL
945         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
946                  (reqbody->valid & OBD_MD_FLACL)) {
947                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
948                 buffer->lb_len = req_capsule_get_size(pill,
949                                                       &RMF_ACL, RCL_SERVER);
950                 if (buffer->lb_len > 0) {
951                         rc = mo_xattr_get(env, next, buffer,
952                                           XATTR_NAME_ACL_ACCESS);
953                         if (rc < 0) {
954                                 if (rc == -ENODATA) {
955                                         repbody->aclsize = 0;
956                                         repbody->valid |= OBD_MD_FLACL;
957                                         rc = 0;
958                                 } else if (rc == -EOPNOTSUPP) {
959                                         rc = 0;
960                                 } else {
961                                         CERROR("got acl size: %d\n", rc);
962                                 }
963                         } else {
964                                 repbody->aclsize = rc;
965                                 repbody->valid |= OBD_MD_FLACL;
966                                 rc = 0;
967                         }
968                 }
969         }
970 #endif
971
972         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
973             info->mti_mdt->mdt_opts.mo_mds_capa &&
974             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
975                 struct lustre_capa *capa;
976
977                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
978                 LASSERT(capa);
979                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
980                 rc = mo_capa_get(env, next, capa, 0);
981                 if (rc)
982                         RETURN(rc);
983                 repbody->valid |= OBD_MD_FLMDSCAPA;
984         }
985
986 out:
987         if (rc == 0)
988                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
989
990         RETURN(rc);
991 }
992
993 static int mdt_renew_capa(struct mdt_thread_info *info)
994 {
995         struct mdt_object  *obj = info->mti_object;
996         struct mdt_body    *body;
997         struct lustre_capa *capa, *c;
998         int rc;
999         ENTRY;
1000
1001         /* if object doesn't exist, or server has disabled capability,
1002          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
1003          * flag not set.
1004          */
1005         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
1006             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
1007                 RETURN(0);
1008
1009         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1010         LASSERT(body != NULL);
1011
1012         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
1013         LASSERT(c);
1014
1015         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
1016         LASSERT(capa);
1017
1018         *capa = *c;
1019         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
1020         if (rc == 0)
1021                 body->valid |= OBD_MD_FLOSSCAPA;
1022         RETURN(rc);
1023 }
1024
1025 static int mdt_getattr(struct mdt_thread_info *info)
1026 {
1027         struct mdt_object       *obj = info->mti_object;
1028         struct req_capsule      *pill = info->mti_pill;
1029         struct mdt_body         *reqbody;
1030         struct mdt_body         *repbody;
1031         mode_t                   mode;
1032         int rc, rc2;
1033         ENTRY;
1034
1035         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
1036         LASSERT(reqbody);
1037
1038         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
1039                 rc = req_capsule_server_pack(pill);
1040                 if (unlikely(rc))
1041                         RETURN(err_serious(rc));
1042                 rc = mdt_renew_capa(info);
1043                 GOTO(out_shrink, rc);
1044         }
1045
1046         LASSERT(obj != NULL);
1047         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
1048
1049         mode = lu_object_attr(&obj->mot_obj.mo_lu);
1050
1051         /* old clients may not report needed easize, use max value then */
1052         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1053                              reqbody->eadatasize == 0 ?
1054                              info->mti_mdt->mdt_max_mdsize :
1055                              reqbody->eadatasize);
1056
1057         rc = req_capsule_server_pack(pill);
1058         if (unlikely(rc != 0))
1059                 RETURN(err_serious(rc));
1060
1061         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1062         LASSERT(repbody != NULL);
1063         repbody->eadatasize = 0;
1064         repbody->aclsize = 0;
1065
1066         if (reqbody->valid & OBD_MD_FLRMTPERM)
1067                 rc = mdt_init_ucred(info, reqbody);
1068         else
1069                 rc = mdt_check_ucred(info);
1070         if (unlikely(rc))
1071                 GOTO(out_shrink, rc);
1072
1073         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1074
1075         /*
1076          * Don't check capability at all, because rename might getattr for
1077          * remote obj, and at that time no capability is available.
1078          */
1079         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
1080         rc = mdt_getattr_internal(info, obj, 0);
1081         if (reqbody->valid & OBD_MD_FLRMTPERM)
1082                 mdt_exit_ucred(info);
1083         EXIT;
1084 out_shrink:
1085         mdt_client_compatibility(info);
1086         rc2 = mdt_fix_reply(info);
1087         if (rc == 0)
1088                 rc = rc2;
1089         return rc;
1090 }
1091
1092 static int mdt_is_subdir(struct mdt_thread_info *info)
1093 {
1094         struct mdt_object     *o = info->mti_object;
1095         struct req_capsule    *pill = info->mti_pill;
1096         const struct mdt_body *body = info->mti_body;
1097         struct mdt_body       *repbody;
1098         int                    rc;
1099         ENTRY;
1100
1101         LASSERT(o != NULL);
1102
1103         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1104
1105         /*
1106          * We save last checked parent fid to @repbody->fid1 for remote
1107          * directory case.
1108          */
1109         LASSERT(fid_is_sane(&body->fid2));
1110         LASSERT(mdt_object_exists(o) > 0);
1111         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1112                            &body->fid2, &repbody->fid1);
1113         if (rc == 0 || rc == -EREMOTE)
1114                 repbody->valid |= OBD_MD_FLID;
1115
1116         RETURN(rc);
1117 }
1118
1119 static int mdt_raw_lookup(struct mdt_thread_info *info,
1120                           struct mdt_object *parent,
1121                           const struct lu_name *lname,
1122                           struct ldlm_reply *ldlm_rep)
1123 {
1124         struct md_object *next = mdt_object_child(info->mti_object);
1125         const struct mdt_body *reqbody = info->mti_body;
1126         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1127         struct mdt_body *repbody;
1128         int rc;
1129         ENTRY;
1130
1131         if (reqbody->valid != OBD_MD_FLID)
1132                 RETURN(0);
1133
1134         LASSERT(!info->mti_cross_ref);
1135
1136         /* Only got the fid of this obj by name */
1137         fid_zero(child_fid);
1138         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1139                         &info->mti_spec);
1140 #if 0
1141         /* XXX is raw_lookup possible as intent operation? */
1142         if (rc != 0) {
1143                 if (rc == -ENOENT)
1144                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1145                 RETURN(rc);
1146         } else
1147                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1148
1149         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1150 #endif
1151         if (rc == 0) {
1152                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1153                 repbody->fid1 = *child_fid;
1154                 repbody->valid = OBD_MD_FLID;
1155         }
1156         RETURN(1);
1157 }
1158
1159 /*
1160  * UPDATE lock should be taken against parent, and be release before exit;
1161  * child_bits lock should be taken against child, and be returned back:
1162  *            (1)normal request should release the child lock;
1163  *            (2)intent request will grant the lock to client.
1164  */
1165 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1166                                  struct mdt_lock_handle *lhc,
1167                                  __u64 child_bits,
1168                                  struct ldlm_reply *ldlm_rep)
1169 {
1170         struct ptlrpc_request  *req       = mdt_info_req(info);
1171         struct mdt_body        *reqbody   = NULL;
1172         struct mdt_object      *parent    = info->mti_object;
1173         struct mdt_object      *child;
1174         struct md_object       *next      = mdt_object_child(parent);
1175         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1176         struct lu_name         *lname     = NULL;
1177         const char             *name      = NULL;
1178         int                     namelen   = 0;
1179         struct mdt_lock_handle *lhp       = NULL;
1180         struct ldlm_lock       *lock;
1181         struct ldlm_res_id     *res_id;
1182         int                     is_resent;
1183         int                     ma_need = 0;
1184         int                     rc;
1185
1186         ENTRY;
1187
1188         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1189         LASSERT(ergo(is_resent,
1190                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1191
1192         LASSERT(parent != NULL);
1193         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1194         if (name == NULL)
1195                 RETURN(err_serious(-EFAULT));
1196
1197         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1198                                        RCL_CLIENT) - 1;
1199         if (!info->mti_cross_ref) {
1200                 /*
1201                  * XXX: Check for "namelen == 0" is for getattr by fid
1202                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1203                  * that is the name must contain at least one character and
1204                  * the terminating '\0'
1205                  */
1206                 if (namelen == 0) {
1207                         reqbody = req_capsule_client_get(info->mti_pill,
1208                                                          &RMF_MDT_BODY);
1209                         if (unlikely(reqbody == NULL))
1210                                 RETURN(err_serious(-EFAULT));
1211
1212                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1213                                 RETURN(err_serious(-EINVAL));
1214
1215                         name = NULL;
1216                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1217                                "ldlm_rep = %p\n",
1218                                PFID(mdt_object_fid(parent)),
1219                                PFID(&reqbody->fid2), ldlm_rep);
1220                 } else {
1221                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1222                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1223                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1224                                name, ldlm_rep);
1225                 }
1226         }
1227         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1228
1229         rc = mdt_object_exists(parent);
1230         if (unlikely(rc == 0)) {
1231                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1232                                 &parent->mot_obj.mo_lu,
1233                                 "Parent doesn't exist!\n");
1234                 RETURN(-ESTALE);
1235         } else if (!info->mti_cross_ref) {
1236                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
1237                          PFID(mdt_object_fid(parent)));
1238         }
1239         if (lname) {
1240                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1241                 if (rc != 0) {
1242                         if (rc > 0)
1243                                 rc = 0;
1244                         RETURN(rc);
1245                 }
1246         }
1247
1248         if (info->mti_cross_ref) {
1249                 /* Only getattr on the child. Parent is on another node. */
1250                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1251                 child = parent;
1252                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1253                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1254
1255                 if (is_resent) {
1256                         /* Do not take lock for resent case. */
1257                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1258                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1259                                  lhc->mlh_reg_lh.cookie);
1260                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1261                                                 &lock->l_resource->lr_name));
1262                         LDLM_LOCK_PUT(lock);
1263                         rc = 0;
1264                 } else {
1265                         mdt_lock_handle_init(lhc);
1266                         mdt_lock_reg_init(lhc, LCK_PR);
1267
1268                         /*
1269                          * Object's name is on another MDS, no lookup lock is
1270                          * needed here but update is.
1271                          */
1272                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1273                         child_bits |= MDS_INODELOCK_UPDATE;
1274
1275                         rc = mdt_object_lock(info, child, lhc, child_bits,
1276                                              MDT_LOCAL_LOCK);
1277                 }
1278                 if (rc == 0) {
1279                         /* Finally, we can get attr for child. */
1280                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1281                                          BYPASS_CAPA);
1282                         rc = mdt_getattr_internal(info, child, 0);
1283                         if (unlikely(rc != 0))
1284                                 mdt_object_unlock(info, child, lhc, 1);
1285                 }
1286                 RETURN(rc);
1287         }
1288
1289         if (lname) {
1290                 /* step 1: lock parent only if parent is a directory */
1291                 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1292                         lhp = &info->mti_lh[MDT_LH_PARENT];
1293                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1294                         rc = mdt_object_lock(info, parent, lhp,
1295                                              MDS_INODELOCK_UPDATE,
1296                                              MDT_LOCAL_LOCK);
1297                         if (unlikely(rc != 0))
1298                                 RETURN(rc);
1299                 }
1300
1301                 /* step 2: lookup child's fid by name */
1302                 fid_zero(child_fid);
1303                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1304                                 &info->mti_spec);
1305
1306                 if (rc != 0) {
1307                         if (rc == -ENOENT)
1308                                 mdt_set_disposition(info, ldlm_rep,
1309                                                     DISP_LOOKUP_NEG);
1310                         GOTO(out_parent, rc);
1311                 } else
1312                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1313         } else {
1314                 *child_fid = reqbody->fid2;
1315                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1316         }
1317
1318         /*
1319          *step 3: find the child object by fid & lock it.
1320          *        regardless if it is local or remote.
1321          */
1322         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1323
1324         if (unlikely(IS_ERR(child)))
1325                 GOTO(out_parent, rc = PTR_ERR(child));
1326         if (is_resent) {
1327                 /* Do not take lock for resent case. */
1328                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1329                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1330                          lhc->mlh_reg_lh.cookie);
1331
1332                 res_id = &lock->l_resource->lr_name;
1333                 if (!fid_res_name_eq(mdt_object_fid(child),
1334                                     &lock->l_resource->lr_name)) {
1335                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1336                                                  &lock->l_resource->lr_name),
1337                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1338                                  (unsigned long)res_id->name[0],
1339                                  (unsigned long)res_id->name[1],
1340                                  (unsigned long)res_id->name[2],
1341                                  PFID(mdt_object_fid(parent)));
1342                           CWARN("Although resent, but still not get child lock"
1343                                 "parent:"DFID" child:"DFID"\n",
1344                                 PFID(mdt_object_fid(parent)),
1345                                 PFID(mdt_object_fid(child)));
1346                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1347                           LDLM_LOCK_PUT(lock);
1348                           GOTO(relock, 0);
1349                 }
1350                 LDLM_LOCK_PUT(lock);
1351                 rc = 0;
1352         } else {
1353 relock:
1354                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1355                 mdt_lock_handle_init(lhc);
1356                 if (child_bits == MDS_INODELOCK_LAYOUT)
1357                         mdt_lock_reg_init(lhc, LCK_CR);
1358                 else
1359                         mdt_lock_reg_init(lhc, LCK_PR);
1360
1361                 if (mdt_object_exists(child) == 0) {
1362                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1363                                         &child->mot_obj.mo_lu,
1364                                         "Object doesn't exist!\n");
1365                         GOTO(out_child, rc = -ENOENT);
1366                 }
1367
1368                 if (!(child_bits & MDS_INODELOCK_UPDATE)) {
1369                         struct md_attr *ma = &info->mti_attr;
1370
1371                         ma->ma_valid = 0;
1372                         ma->ma_need = MA_INODE;
1373                         rc = mdt_attr_get_complex(info, child, ma);
1374                         if (unlikely(rc != 0))
1375                                 GOTO(out_child, rc);
1376
1377                         /* layout lock is used only on regular files */
1378                         if ((ma->ma_valid & MA_INODE) &&
1379                             (ma->ma_attr.la_valid & LA_MODE) &&
1380                             !S_ISREG(ma->ma_attr.la_mode))
1381                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1382
1383                         /* If the file has not been changed for some time, we
1384                          * return not only a LOOKUP lock, but also an UPDATE
1385                          * lock and this might save us RPC on later STAT. For
1386                          * directories, it also let negative dentry starts
1387                          * working for this dir. */
1388                         if (ma->ma_valid & MA_INODE &&
1389                             ma->ma_attr.la_valid & LA_CTIME &&
1390                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1391                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1392                                 child_bits |= MDS_INODELOCK_UPDATE;
1393                 }
1394
1395                 rc = mdt_object_lock(info, child, lhc, child_bits,
1396                                      MDT_CROSS_LOCK);
1397
1398                 if (unlikely(rc != 0))
1399                         GOTO(out_child, rc);
1400         }
1401
1402         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1403         /* Get MA_SOM attributes if update lock is given. */
1404         if (lock &&
1405             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1406             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1407                 ma_need = MA_SOM;
1408
1409         /* finally, we can get attr for child. */
1410         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1411         rc = mdt_getattr_internal(info, child, ma_need);
1412         if (unlikely(rc != 0)) {
1413                 mdt_object_unlock(info, child, lhc, 1);
1414         } else if (lock) {
1415                 /* Debugging code. */
1416                 res_id = &lock->l_resource->lr_name;
1417                 LDLM_DEBUG(lock, "Returning lock to client");
1418                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1419                                          &lock->l_resource->lr_name),
1420                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1421                          (unsigned long)res_id->name[0],
1422                          (unsigned long)res_id->name[1],
1423                          (unsigned long)res_id->name[2],
1424                          PFID(mdt_object_fid(child)));
1425                 mdt_pack_size2body(info, child);
1426         }
1427         if (lock)
1428                 LDLM_LOCK_PUT(lock);
1429
1430         EXIT;
1431 out_child:
1432         mdt_object_put(info->mti_env, child);
1433 out_parent:
1434         if (lhp)
1435                 mdt_object_unlock(info, parent, lhp, 1);
1436         return rc;
1437 }
1438
1439 /* normal handler: should release the child lock */
1440 static int mdt_getattr_name(struct mdt_thread_info *info)
1441 {
1442         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1443         struct mdt_body        *reqbody;
1444         struct mdt_body        *repbody;
1445         int rc, rc2;
1446         ENTRY;
1447
1448         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1449         LASSERT(reqbody != NULL);
1450         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1451         LASSERT(repbody != NULL);
1452
1453         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1454         repbody->eadatasize = 0;
1455         repbody->aclsize = 0;
1456
1457         rc = mdt_init_ucred(info, reqbody);
1458         if (unlikely(rc))
1459                 GOTO(out_shrink, rc);
1460
1461         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1462         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1463                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1464                 lhc->mlh_reg_lh.cookie = 0;
1465         }
1466         mdt_exit_ucred(info);
1467         EXIT;
1468 out_shrink:
1469         mdt_client_compatibility(info);
1470         rc2 = mdt_fix_reply(info);
1471         if (rc == 0)
1472                 rc = rc2;
1473         return rc;
1474 }
1475
1476 static const struct lu_device_operations mdt_lu_ops;
1477
1478 static int lu_device_is_mdt(struct lu_device *d)
1479 {
1480         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1481 }
1482
1483 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1484                          void *karg, void *uarg);
1485
1486 static int mdt_set_info(struct mdt_thread_info *info)
1487 {
1488         struct ptlrpc_request *req = mdt_info_req(info);
1489         char *key;
1490         void *val;
1491         int keylen, vallen, rc = 0;
1492         ENTRY;
1493
1494         rc = req_capsule_server_pack(info->mti_pill);
1495         if (rc)
1496                 RETURN(rc);
1497
1498         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1499         if (key == NULL) {
1500                 DEBUG_REQ(D_HA, req, "no set_info key");
1501                 RETURN(-EFAULT);
1502         }
1503
1504         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1505                                       RCL_CLIENT);
1506
1507         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1508         if (val == NULL) {
1509                 DEBUG_REQ(D_HA, req, "no set_info val");
1510                 RETURN(-EFAULT);
1511         }
1512
1513         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1514                                       RCL_CLIENT);
1515
1516         /* Swab any part of val you need to here */
1517         if (KEY_IS(KEY_READ_ONLY)) {
1518                 req->rq_status = 0;
1519                 lustre_msg_set_status(req->rq_repmsg, 0);
1520
1521                 spin_lock(&req->rq_export->exp_lock);
1522                 if (*(__u32 *)val)
1523                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1524                 else
1525                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1526                 spin_unlock(&req->rq_export->exp_lock);
1527
1528         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1529                 struct changelog_setinfo *cs =
1530                         (struct changelog_setinfo *)val;
1531                 if (vallen != sizeof(*cs)) {
1532                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1533                         RETURN(-EINVAL);
1534                 }
1535                 if (ptlrpc_req_need_swab(req)) {
1536                         __swab64s(&cs->cs_recno);
1537                         __swab32s(&cs->cs_id);
1538                 }
1539
1540                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1541                                    vallen, val, NULL);
1542                 lustre_msg_set_status(req->rq_repmsg, rc);
1543
1544         } else {
1545                 RETURN(-EINVAL);
1546         }
1547         RETURN(0);
1548 }
1549
1550 /**
1551  * Top-level handler for MDT connection requests.
1552  */
1553 static int mdt_connect(struct mdt_thread_info *info)
1554 {
1555         int rc;
1556         struct obd_connect_data *reply;
1557         struct obd_export *exp;
1558         struct ptlrpc_request *req = mdt_info_req(info);
1559
1560         rc = target_handle_connect(req);
1561         if (rc != 0)
1562                 return err_serious(rc);
1563
1564         LASSERT(req->rq_export != NULL);
1565         info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1566         rc = mdt_init_sec_level(info);
1567         if (rc != 0) {
1568                 obd_disconnect(class_export_get(req->rq_export));
1569                 return rc;
1570         }
1571
1572         /* To avoid exposing partially initialized connection flags, changes up
1573          * to this point have been staged in reply->ocd_connect_flags. Now that
1574          * connection handling has completed successfully, atomically update
1575          * the connect flags in the shared export data structure. LU-1623 */
1576         reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
1577         exp = req->rq_export;
1578         spin_lock(&exp->exp_lock);
1579         exp->exp_connect_flags = reply->ocd_connect_flags;
1580         spin_unlock(&exp->exp_lock);
1581
1582         rc = mdt_init_idmap(info);
1583         if (rc != 0)
1584                 obd_disconnect(class_export_get(req->rq_export));
1585
1586         return rc;
1587 }
1588
1589 static int mdt_disconnect(struct mdt_thread_info *info)
1590 {
1591         int rc;
1592         ENTRY;
1593
1594         rc = target_handle_disconnect(mdt_info_req(info));
1595         if (rc)
1596                 rc = err_serious(rc);
1597         RETURN(rc);
1598 }
1599
1600 static int mdt_sendpage(struct mdt_thread_info *info,
1601                         struct lu_rdpg *rdpg, int nob)
1602 {
1603         struct ptlrpc_request   *req = mdt_info_req(info);
1604         struct obd_export       *exp = req->rq_export;
1605         struct ptlrpc_bulk_desc *desc;
1606         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1607         int                      tmpcount;
1608         int                      tmpsize;
1609         int                      i;
1610         int                      rc;
1611         ENTRY;
1612
1613         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1614                                     MDS_BULK_PORTAL);
1615         if (desc == NULL)
1616                 RETURN(-ENOMEM);
1617
1618         if (!(exp->exp_connect_flags & OBD_CONNECT_BRW_SIZE))
1619                 /* old client requires reply size in it's PAGE_SIZE,
1620                  * which is rdpg->rp_count */
1621                 nob = rdpg->rp_count;
1622
1623         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1624              i++, tmpcount -= tmpsize) {
1625                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1626                 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1627         }
1628
1629         LASSERT(desc->bd_nob == nob);
1630         rc = target_bulk_io(exp, desc, lwi);
1631         ptlrpc_free_bulk_pin(desc);
1632         RETURN(rc);
1633 }
1634
1635 static int mdt_readpage(struct mdt_thread_info *info)
1636 {
1637         struct mdt_object *object = info->mti_object;
1638         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1639         struct mdt_body   *reqbody;
1640         struct mdt_body   *repbody;
1641         int                rc;
1642         int                i;
1643         ENTRY;
1644
1645         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1646                 RETURN(err_serious(-ENOMEM));
1647
1648         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1649         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1650         if (reqbody == NULL || repbody == NULL)
1651                 RETURN(err_serious(-EFAULT));
1652
1653         /*
1654          * prepare @rdpg before calling lower layers and transfer itself. Here
1655          * reqbody->size contains offset of where to start to read and
1656          * reqbody->nlink contains number bytes to read.
1657          */
1658         rdpg->rp_hash = reqbody->size;
1659         if (rdpg->rp_hash != reqbody->size) {
1660                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1661                        rdpg->rp_hash, reqbody->size);
1662                 RETURN(-EFAULT);
1663         }
1664
1665         rdpg->rp_attrs = reqbody->mode;
1666         if (info->mti_exp->exp_connect_flags & OBD_CONNECT_64BITHASH)
1667                 rdpg->rp_attrs |= LUDA_64BITHASH;
1668         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1669                                 PTLRPC_MAX_BRW_SIZE);
1670         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1671                           CFS_PAGE_SHIFT;
1672         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1673         if (rdpg->rp_pages == NULL)
1674                 RETURN(-ENOMEM);
1675
1676         for (i = 0; i < rdpg->rp_npages; ++i) {
1677                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1678                 if (rdpg->rp_pages[i] == NULL)
1679                         GOTO(free_rdpg, rc = -ENOMEM);
1680         }
1681
1682         /* call lower layers to fill allocated pages with directory data */
1683         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1684         if (rc < 0)
1685                 GOTO(free_rdpg, rc);
1686
1687         /* send pages to client */
1688         rc = mdt_sendpage(info, rdpg, rc);
1689
1690         EXIT;
1691 free_rdpg:
1692
1693         for (i = 0; i < rdpg->rp_npages; i++)
1694                 if (rdpg->rp_pages[i] != NULL)
1695                         cfs_free_page(rdpg->rp_pages[i]);
1696         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1697
1698         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1699                 RETURN(0);
1700
1701         return rc;
1702 }
1703
1704 static int mdt_reint_internal(struct mdt_thread_info *info,
1705                               struct mdt_lock_handle *lhc,
1706                               __u32 op)
1707 {
1708         struct req_capsule      *pill = info->mti_pill;
1709         struct mdt_body         *repbody;
1710         int                      rc = 0, rc2;
1711         ENTRY;
1712
1713
1714         rc = mdt_reint_unpack(info, op);
1715         if (rc != 0) {
1716                 CERROR("Can't unpack reint, rc %d\n", rc);
1717                 RETURN(err_serious(rc));
1718         }
1719
1720         /* for replay (no_create) lmm is not needed, client has it already */
1721         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1722                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1723                                      info->mti_rr.rr_eadatalen);
1724
1725         /* llog cookies are always 0, the field is kept for compatibility */
1726         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1727                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1728
1729         rc = req_capsule_server_pack(pill);
1730         if (rc != 0) {
1731                 CERROR("Can't pack response, rc %d\n", rc);
1732                 RETURN(err_serious(rc));
1733         }
1734
1735         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1736                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1737                 LASSERT(repbody);
1738                 repbody->eadatasize = 0;
1739                 repbody->aclsize = 0;
1740         }
1741
1742         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1743
1744         /* for replay no cookkie / lmm need, because client have this already */
1745         if (info->mti_spec.no_create)
1746                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1747                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1748
1749         rc = mdt_init_ucred_reint(info);
1750         if (rc)
1751                 GOTO(out_shrink, rc);
1752
1753         rc = mdt_fix_attr_ucred(info, op);
1754         if (rc != 0)
1755                 GOTO(out_ucred, rc = err_serious(rc));
1756
1757         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1758                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1759                 GOTO(out_ucred, rc);
1760         }
1761         rc = mdt_reint_rec(info, lhc);
1762         EXIT;
1763 out_ucred:
1764         mdt_exit_ucred(info);
1765 out_shrink:
1766         mdt_client_compatibility(info);
1767         rc2 = mdt_fix_reply(info);
1768         if (rc == 0)
1769                 rc = rc2;
1770         return rc;
1771 }
1772
1773 static long mdt_reint_opcode(struct mdt_thread_info *info,
1774                              const struct req_format **fmt)
1775 {
1776         struct mdt_rec_reint *rec;
1777         long opc;
1778
1779         opc = err_serious(-EFAULT);
1780         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1781         if (rec != NULL) {
1782                 opc = rec->rr_opcode;
1783                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1784                 if (opc < REINT_MAX && fmt[opc] != NULL)
1785                         req_capsule_extend(info->mti_pill, fmt[opc]);
1786                 else {
1787                         CERROR("Unsupported opc: %ld\n", opc);
1788                         opc = err_serious(opc);
1789                 }
1790         }
1791         return opc;
1792 }
1793
1794 static int mdt_reint(struct mdt_thread_info *info)
1795 {
1796         long opc;
1797         int  rc;
1798
1799         static const struct req_format *reint_fmts[REINT_MAX] = {
1800                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1801                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1802                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1803                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1804                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1805                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1806                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1807         };
1808
1809         ENTRY;
1810
1811         opc = mdt_reint_opcode(info, reint_fmts);
1812         if (opc >= 0) {
1813                 /*
1814                  * No lock possible here from client to pass it to reint code
1815                  * path.
1816                  */
1817                 rc = mdt_reint_internal(info, NULL, opc);
1818         } else {
1819                 rc = opc;
1820         }
1821
1822         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1823         RETURN(rc);
1824 }
1825
1826 /* this should sync the whole device */
1827 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1828 {
1829         struct dt_device *dt = mdt->mdt_bottom;
1830         int rc;
1831         ENTRY;
1832
1833         rc = dt->dd_ops->dt_sync(env, dt);
1834         RETURN(rc);
1835 }
1836
1837 /* this should sync this object */
1838 static int mdt_object_sync(struct mdt_thread_info *info)
1839 {
1840         struct md_object *next;
1841         int rc;
1842         ENTRY;
1843
1844         if (!mdt_object_exists(info->mti_object)) {
1845                 CWARN("Non existing object  "DFID"!\n",
1846                       PFID(mdt_object_fid(info->mti_object)));
1847                 RETURN(-ESTALE);
1848         }
1849         next = mdt_object_child(info->mti_object);
1850         rc = mo_object_sync(info->mti_env, next);
1851
1852         RETURN(rc);
1853 }
1854
1855 static int mdt_sync(struct mdt_thread_info *info)
1856 {
1857         struct ptlrpc_request *req = mdt_info_req(info);
1858         struct req_capsule *pill = info->mti_pill;
1859         struct mdt_body *body;
1860         int rc;
1861         ENTRY;
1862
1863         /* The fid may be zero, so we req_capsule_set manually */
1864         req_capsule_set(pill, &RQF_MDS_SYNC);
1865
1866         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1867         if (body == NULL)
1868                 RETURN(err_serious(-EINVAL));
1869
1870         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1871                 RETURN(err_serious(-ENOMEM));
1872
1873         if (fid_seq(&body->fid1) == 0) {
1874                 /* sync the whole device */
1875                 rc = req_capsule_server_pack(pill);
1876                 if (rc == 0)
1877                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1878                 else
1879                         rc = err_serious(rc);
1880         } else {
1881                 /* sync an object */
1882                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1883                 if (rc == 0) {
1884                         rc = mdt_object_sync(info);
1885                         if (rc == 0) {
1886                                 const struct lu_fid *fid;
1887                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1888
1889                                 info->mti_attr.ma_need = MA_INODE;
1890                                 info->mti_attr.ma_valid = 0;
1891                                 rc = mdt_attr_get_complex(info, info->mti_object,
1892                                                           &info->mti_attr);
1893                                 if (rc == 0) {
1894                                         body = req_capsule_server_get(pill,
1895                                                                 &RMF_MDT_BODY);
1896                                         fid = mdt_object_fid(info->mti_object);
1897                                         mdt_pack_attr2body(info, body, la, fid);
1898                                 }
1899                         }
1900                 } else
1901                         rc = err_serious(rc);
1902         }
1903         if (rc == 0)
1904                 mdt_counter_incr(req, LPROC_MDT_SYNC);
1905
1906         RETURN(rc);
1907 }
1908
1909 /*
1910  * Quotacheck handler.
1911  * in-kernel quotacheck isn't supported any more.
1912  */
1913 static int mdt_quotacheck(struct mdt_thread_info *info)
1914 {
1915         struct obd_quotactl     *oqctl;
1916         int                      rc;
1917         ENTRY;
1918
1919         oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
1920         if (oqctl == NULL)
1921                 RETURN(err_serious(-EPROTO));
1922
1923         rc = req_capsule_server_pack(info->mti_pill);
1924         if (rc)
1925                 RETURN(err_serious(rc));
1926
1927         /* deprecated, not used any more */
1928         RETURN(-EOPNOTSUPP);
1929 }
1930
1931 /*
1932  * Handle quota control requests to consult current usage/limit, but also
1933  * to configure quota enforcement
1934  */
1935 static int mdt_quotactl(struct mdt_thread_info *info)
1936 {
1937         struct obd_export       *exp  = info->mti_exp;
1938         struct req_capsule      *pill = info->mti_pill;
1939         struct obd_quotactl     *oqctl, *repoqc;
1940         int                      id, rc;
1941         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
1942         ENTRY;
1943
1944         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1945         if (oqctl == NULL)
1946                 RETURN(err_serious(-EPROTO));
1947
1948         rc = req_capsule_server_pack(pill);
1949         if (rc)
1950                 RETURN(err_serious(rc));
1951
1952         switch (oqctl->qc_cmd) {
1953         case Q_QUOTACHECK:
1954         case LUSTRE_Q_INVALIDATE:
1955         case LUSTRE_Q_FINVALIDATE:
1956         case Q_QUOTAON:
1957         case Q_QUOTAOFF:
1958         case Q_INITQUOTA:
1959                 /* deprecated, not used any more */
1960                 RETURN(-EOPNOTSUPP);
1961                 /* master quotactl */
1962         case Q_GETINFO:
1963         case Q_SETINFO:
1964         case Q_SETQUOTA:
1965         case Q_GETQUOTA:
1966                 if (qmt == NULL)
1967                         RETURN(-EOPNOTSUPP);
1968                 /* slave quotactl */
1969         case Q_GETOINFO:
1970         case Q_GETOQUOTA:
1971                 break;
1972         default:
1973                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
1974                 RETURN(-EFAULT);
1975         }
1976
1977         /* map uid/gid for remote client */
1978         id = oqctl->qc_id;
1979         if (exp_connect_rmtclient(exp)) {
1980                 struct lustre_idmap_table *idmap;
1981
1982                 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
1983
1984                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1985                              oqctl->qc_cmd != Q_GETINFO))
1986                         RETURN(-EPERM);
1987
1988                 if (oqctl->qc_type == USRQUOTA)
1989                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1990                                                      oqctl->qc_id);
1991                 else if (oqctl->qc_type == GRPQUOTA)
1992                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1993                                                      oqctl->qc_id);
1994                 else
1995                         RETURN(-EINVAL);
1996
1997                 if (id == CFS_IDMAP_NOTFOUND) {
1998                         CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
1999                         RETURN(-EACCES);
2000                 }
2001         }
2002
2003         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2004         if (repoqc == NULL)
2005                 RETURN(err_serious(-EFAULT));
2006
2007         if (oqctl->qc_id != id)
2008                 swap(oqctl->qc_id, id);
2009
2010         switch (oqctl->qc_cmd) {
2011
2012         case Q_GETINFO:
2013         case Q_SETINFO:
2014         case Q_SETQUOTA:
2015         case Q_GETQUOTA:
2016                 /* forward quotactl request to QMT */
2017                 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2018                 break;
2019
2020         case Q_GETOINFO:
2021         case Q_GETOQUOTA:
2022                 /* slave quotactl */
2023                 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2024                                    oqctl);
2025                 break;
2026
2027         default:
2028                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2029                 RETURN(-EFAULT);
2030         }
2031
2032         if (oqctl->qc_id != id)
2033                 swap(oqctl->qc_id, id);
2034
2035         *repoqc = *oqctl;
2036         RETURN(rc);
2037 }
2038
2039 /*
2040  * OBD PING and other handlers.
2041  */
2042 static int mdt_obd_ping(struct mdt_thread_info *info)
2043 {
2044         int rc;
2045         ENTRY;
2046
2047         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2048
2049         rc = target_handle_ping(mdt_info_req(info));
2050         if (rc < 0)
2051                 rc = err_serious(rc);
2052         RETURN(rc);
2053 }
2054
2055 /*
2056  * OBD_IDX_READ handler
2057  */
2058 static int mdt_obd_idx_read(struct mdt_thread_info *info)
2059 {
2060         struct mdt_device       *mdt = info->mti_mdt;
2061         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2062         struct idx_info         *req_ii, *rep_ii;
2063         int                      rc, i;
2064         ENTRY;
2065
2066         memset(rdpg, 0, sizeof(*rdpg));
2067         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2068
2069         /* extract idx_info buffer from request & reply */
2070         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2071         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2072                 RETURN(err_serious(-EPROTO));
2073
2074         rc = req_capsule_server_pack(info->mti_pill);
2075         if (rc)
2076                 RETURN(err_serious(rc));
2077
2078         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2079         if (rep_ii == NULL)
2080                 RETURN(err_serious(-EFAULT));
2081         rep_ii->ii_magic = IDX_INFO_MAGIC;
2082
2083         /* extract hash to start with */
2084         rdpg->rp_hash = req_ii->ii_hash_start;
2085
2086         /* extract requested attributes */
2087         rdpg->rp_attrs = req_ii->ii_attrs;
2088
2089         /* check that fid packed in request is valid and supported */
2090         if (!fid_is_sane(&req_ii->ii_fid))
2091                 RETURN(-EINVAL);
2092         rep_ii->ii_fid = req_ii->ii_fid;
2093
2094         /* copy flags */
2095         rep_ii->ii_flags = req_ii->ii_flags;
2096
2097         /* compute number of pages to allocate, ii_count is the number of 4KB
2098          * containers */
2099         if (req_ii->ii_count <= 0)
2100                 GOTO(out, rc = -EFAULT);
2101         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2102                                PTLRPC_MAX_BRW_SIZE);
2103         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2104
2105         /* allocate pages to store the containers */
2106         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2107         if (rdpg->rp_pages == NULL)
2108                 GOTO(out, rc = -ENOMEM);
2109         for (i = 0; i < rdpg->rp_npages; i++) {
2110                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2111                 if (rdpg->rp_pages[i] == NULL)
2112                         GOTO(out, rc = -ENOMEM);
2113         }
2114
2115         /* populate pages with key/record pairs */
2116         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2117         if (rc < 0)
2118                 GOTO(out, rc);
2119
2120         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2121                  "asked %d > %d\n", rc, rdpg->rp_count);
2122
2123         /* send pages to client */
2124         rc = mdt_sendpage(info, rdpg, rc);
2125
2126         GOTO(out, rc);
2127 out:
2128         if (rdpg->rp_pages) {
2129                 for (i = 0; i < rdpg->rp_npages; i++)
2130                         if (rdpg->rp_pages[i])
2131                                 cfs_free_page(rdpg->rp_pages[i]);
2132                 OBD_FREE(rdpg->rp_pages,
2133                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2134         }
2135         return rc;
2136 }
2137
2138 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
2139 {
2140         return err_serious(-EOPNOTSUPP);
2141 }
2142
2143 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
2144 {
2145         return err_serious(-EOPNOTSUPP);
2146 }
2147
2148
2149 /*
2150  * LLOG handlers.
2151  */
2152
2153 /** clone llog ctxt from child (mdd)
2154  * This allows remote llog (replicator) access.
2155  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2156  * context was originally set up, or we can handle them directly.
2157  * I choose the latter, but that means I need any llog
2158  * contexts set up by child to be accessable by the mdt.  So we clone the
2159  * context into our context list here.
2160  */
2161 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2162                                int idx)
2163 {
2164         struct md_device  *next = mdt->mdt_child;
2165         struct llog_ctxt *ctxt;
2166         int rc;
2167
2168         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2169                 return 0;
2170
2171         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2172         if (rc || ctxt == NULL) {
2173                 return 0;
2174         }
2175
2176         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2177         if (rc)
2178                 CERROR("Can't set mdt ctxt %d\n", rc);
2179
2180         return rc;
2181 }
2182
2183 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2184                                  struct mdt_device *mdt, int idx)
2185 {
2186         struct llog_ctxt *ctxt;
2187
2188         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2189         if (ctxt == NULL)
2190                 return 0;
2191         /* Put once for the get we just did, and once for the clone */
2192         llog_ctxt_put(ctxt);
2193         llog_ctxt_put(ctxt);
2194         return 0;
2195 }
2196
2197 static int mdt_llog_create(struct mdt_thread_info *info)
2198 {
2199         int rc;
2200
2201         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2202         rc = llog_origin_handle_open(mdt_info_req(info));
2203         return (rc < 0 ? err_serious(rc) : rc);
2204 }
2205
2206 static int mdt_llog_destroy(struct mdt_thread_info *info)
2207 {
2208         int rc;
2209
2210         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2211         rc = llog_origin_handle_destroy(mdt_info_req(info));
2212         return (rc < 0 ? err_serious(rc) : rc);
2213 }
2214
2215 static int mdt_llog_read_header(struct mdt_thread_info *info)
2216 {
2217         int rc;
2218
2219         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2220         rc = llog_origin_handle_read_header(mdt_info_req(info));
2221         return (rc < 0 ? err_serious(rc) : rc);
2222 }
2223
2224 static int mdt_llog_next_block(struct mdt_thread_info *info)
2225 {
2226         int rc;
2227
2228         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2229         rc = llog_origin_handle_next_block(mdt_info_req(info));
2230         return (rc < 0 ? err_serious(rc) : rc);
2231 }
2232
2233 static int mdt_llog_prev_block(struct mdt_thread_info *info)
2234 {
2235         int rc;
2236
2237         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2238         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2239         return (rc < 0 ? err_serious(rc) : rc);
2240 }
2241
2242
2243 /*
2244  * DLM handlers.
2245  */
2246 static struct ldlm_callback_suite cbs = {
2247         .lcs_completion = ldlm_server_completion_ast,
2248         .lcs_blocking   = ldlm_server_blocking_ast,
2249         .lcs_glimpse    = ldlm_server_glimpse_ast
2250 };
2251
2252 static int mdt_enqueue(struct mdt_thread_info *info)
2253 {
2254         struct ptlrpc_request *req;
2255         int rc;
2256
2257         /*
2258          * info->mti_dlm_req already contains swapped and (if necessary)
2259          * converted dlm request.
2260          */
2261         LASSERT(info->mti_dlm_req != NULL);
2262
2263         req = mdt_info_req(info);
2264         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2265                                   req, info->mti_dlm_req, &cbs);
2266         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2267         return rc ? err_serious(rc) : req->rq_status;
2268 }
2269
2270 static int mdt_convert(struct mdt_thread_info *info)
2271 {
2272         int rc;
2273         struct ptlrpc_request *req;
2274
2275         LASSERT(info->mti_dlm_req);
2276         req = mdt_info_req(info);
2277         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2278         return rc ? err_serious(rc) : req->rq_status;
2279 }
2280
2281 static int mdt_bl_callback(struct mdt_thread_info *info)
2282 {
2283         CERROR("bl callbacks should not happen on MDS\n");
2284         LBUG();
2285         return err_serious(-EOPNOTSUPP);
2286 }
2287
2288 static int mdt_cp_callback(struct mdt_thread_info *info)
2289 {
2290         CERROR("cp callbacks should not happen on MDS\n");
2291         LBUG();
2292         return err_serious(-EOPNOTSUPP);
2293 }
2294
2295 /*
2296  * sec context handlers
2297  */
2298 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2299 {
2300         int rc;
2301
2302         rc = mdt_handle_idmap(info);
2303
2304         if (unlikely(rc)) {
2305                 struct ptlrpc_request *req = mdt_info_req(info);
2306                 __u32                  opc;
2307
2308                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2309                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2310                         sptlrpc_svc_ctx_invalidate(req);
2311         }
2312
2313         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2314
2315         return rc;
2316 }
2317
2318 /*
2319  * quota request handlers
2320  */
2321 static int mdt_quota_dqacq(struct mdt_thread_info *info)
2322 {
2323         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2324         int                      rc;
2325         ENTRY;
2326
2327         if (qmt == NULL)
2328                 RETURN(err_serious(-EOPNOTSUPP));
2329
2330         rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2331         RETURN(rc);
2332 }
2333
2334 static struct mdt_object *mdt_obj(struct lu_object *o)
2335 {
2336         LASSERT(lu_device_is_mdt(o->lo_dev));
2337         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2338 }
2339
2340 struct mdt_object *mdt_object_new(const struct lu_env *env,
2341                                   struct mdt_device *d,
2342                                   const struct lu_fid *f)
2343 {
2344         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2345         struct lu_object *o;
2346         struct mdt_object *m;
2347         ENTRY;
2348
2349         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2350         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2351         if (unlikely(IS_ERR(o)))
2352                 m = (struct mdt_object *)o;
2353         else
2354                 m = mdt_obj(o);
2355         RETURN(m);
2356 }
2357
2358 struct mdt_object *mdt_object_find(const struct lu_env *env,
2359                                    struct mdt_device *d,
2360                                    const struct lu_fid *f)
2361 {
2362         struct lu_object *o;
2363         struct mdt_object *m;
2364         ENTRY;
2365
2366         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2367         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2368         if (unlikely(IS_ERR(o)))
2369                 m = (struct mdt_object *)o;
2370         else
2371                 m = mdt_obj(o);
2372         RETURN(m);
2373 }
2374
2375 /**
2376  * Asyncronous commit for mdt device.
2377  *
2378  * Pass asynchonous commit call down the MDS stack.
2379  *
2380  * \param env environment
2381  * \param mdt the mdt device
2382  */
2383 static void mdt_device_commit_async(const struct lu_env *env,
2384                                     struct mdt_device *mdt)
2385 {
2386         struct dt_device *dt = mdt->mdt_bottom;
2387         int rc;
2388
2389         rc = dt->dd_ops->dt_commit_async(env, dt);
2390         if (unlikely(rc != 0))
2391                 CWARN("async commit start failed with rc = %d", rc);
2392 }
2393
2394 /**
2395  * Mark the lock as "synchonous".
2396  *
2397  * Mark the lock to deffer transaction commit to the unlock time.
2398  *
2399  * \param lock the lock to mark as "synchonous"
2400  *
2401  * \see mdt_is_lock_sync
2402  * \see mdt_save_lock
2403  */
2404 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2405 {
2406         lock->l_ast_data = (void*)1;
2407 }
2408
2409 /**
2410  * Check whehter the lock "synchonous" or not.
2411  *
2412  * \param lock the lock to check
2413  * \retval 1 the lock is "synchonous"
2414  * \retval 0 the lock isn't "synchronous"
2415  *
2416  * \see mdt_set_lock_sync
2417  * \see mdt_save_lock
2418  */
2419 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2420 {
2421         return lock->l_ast_data != NULL;
2422 }
2423
2424 /**
2425  * Blocking AST for mdt locks.
2426  *
2427  * Starts transaction commit if in case of COS lock conflict or
2428  * deffers such a commit to the mdt_save_lock.
2429  *
2430  * \param lock the lock which blocks a request or cancelling lock
2431  * \param desc unused
2432  * \param data unused
2433  * \param flag indicates whether this cancelling or blocking callback
2434  * \retval 0
2435  * \see ldlm_blocking_ast_nocheck
2436  */
2437 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2438                      void *data, int flag)
2439 {
2440         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2441         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2442         int rc;
2443         ENTRY;
2444
2445         if (flag == LDLM_CB_CANCELING)
2446                 RETURN(0);
2447         lock_res_and_lock(lock);
2448         if (lock->l_blocking_ast != mdt_blocking_ast) {
2449                 unlock_res_and_lock(lock);
2450                 RETURN(0);
2451         }
2452         if (mdt_cos_is_enabled(mdt) &&
2453             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2454             lock->l_blocking_lock != NULL &&
2455             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2456                 mdt_set_lock_sync(lock);
2457         }
2458         rc = ldlm_blocking_ast_nocheck(lock);
2459
2460         /* There is no lock conflict if l_blocking_lock == NULL,
2461          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2462          * when the last reference to a local lock was released */
2463         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2464                 struct lu_env env;
2465
2466                 rc = lu_env_init(&env, LCT_LOCAL);
2467                 if (unlikely(rc != 0))
2468                         CWARN("lu_env initialization failed with rc = %d,"
2469                               "cannot start asynchronous commit\n", rc);
2470                 else
2471                         mdt_device_commit_async(&env, mdt);
2472                 lu_env_fini(&env);
2473         }
2474         RETURN(rc);
2475 }
2476
2477 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2478                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2479 {
2480         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2481         ldlm_policy_data_t *policy = &info->mti_policy;
2482         struct ldlm_res_id *res_id = &info->mti_res_id;
2483         int rc;
2484         ENTRY;
2485
2486         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2487         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2488         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2489         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2490
2491         if (mdt_object_exists(o) < 0) {
2492                 if (locality == MDT_CROSS_LOCK) {
2493                         /* cross-ref object fix */
2494                         ibits &= ~MDS_INODELOCK_UPDATE;
2495                         ibits |= MDS_INODELOCK_LOOKUP;
2496                 } else {
2497                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2498                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2499                 }
2500                 /* No PDO lock on remote object */
2501                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2502         }
2503
2504         if (lh->mlh_type == MDT_PDO_LOCK) {
2505                 /* check for exists after object is locked */
2506                 if (mdt_object_exists(o) == 0) {
2507                         /* Non-existent object shouldn't have PDO lock */
2508                         RETURN(-ESTALE);
2509                 } else {
2510                         /* Non-dir object shouldn't have PDO lock */
2511                         if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2512                                 RETURN(-ENOTDIR);
2513                 }
2514         }
2515
2516         memset(policy, 0, sizeof(*policy));
2517         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2518
2519         /*
2520          * Take PDO lock on whole directory and build correct @res_id for lock
2521          * on part of directory.
2522          */
2523         if (lh->mlh_pdo_hash != 0) {
2524                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2525                 mdt_lock_pdo_mode(info, o, lh);
2526                 if (lh->mlh_pdo_mode != LCK_NL) {
2527                         /*
2528                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2529                          * is never going to be sent to client and we do not
2530                          * want it slowed down due to possible cancels.
2531                          */
2532                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2533                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2534                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2535                                           &info->mti_exp->exp_handle.h_cookie);
2536                         if (unlikely(rc))
2537                                 RETURN(rc);
2538                 }
2539
2540                 /*
2541                  * Finish res_id initializing by name hash marking part of
2542                  * directory which is taking modification.
2543                  */
2544                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2545         }
2546
2547         policy->l_inodebits.bits = ibits;
2548
2549         /*
2550          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2551          * going to be sent to client. If it is - mdt_intent_policy() path will
2552          * fix it up and turn FL_LOCAL flag off.
2553          */
2554         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2555                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2556                           &info->mti_exp->exp_handle.h_cookie);
2557         if (rc)
2558                 mdt_object_unlock(info, o, lh, 1);
2559         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2560                  lh->mlh_pdo_hash != 0 &&
2561                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2562                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2563         }
2564
2565         RETURN(rc);
2566 }
2567
2568 /**
2569  * Save a lock within request object.
2570  *
2571  * Keep the lock referenced until whether client ACK or transaction
2572  * commit happens or release the lock immediately depending on input
2573  * parameters. If COS is ON, a write lock is converted to COS lock
2574  * before saving.
2575  *
2576  * \param info thead info object
2577  * \param h lock handle
2578  * \param mode lock mode
2579  * \param decref force immediate lock releasing
2580  */
2581 static
2582 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2583                    ldlm_mode_t mode, int decref)
2584 {
2585         ENTRY;
2586
2587         if (lustre_handle_is_used(h)) {
2588                 if (decref || !info->mti_has_trans ||
2589                     !(mode & (LCK_PW | LCK_EX))){
2590                         mdt_fid_unlock(h, mode);
2591                 } else {
2592                         struct mdt_device *mdt = info->mti_mdt;
2593                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2594                         struct ptlrpc_request *req = mdt_info_req(info);
2595                         int no_ack = 0;
2596
2597                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2598                                  h->cookie);
2599                         CDEBUG(D_HA, "request = %p reply state = %p"
2600                                " transno = "LPD64"\n",
2601                                req, req->rq_reply_state, req->rq_transno);
2602                         if (mdt_cos_is_enabled(mdt)) {
2603                                 no_ack = 1;
2604                                 ldlm_lock_downgrade(lock, LCK_COS);
2605                                 mode = LCK_COS;
2606                         }
2607                         ptlrpc_save_lock(req, h, mode, no_ack);
2608                         if (mdt_is_lock_sync(lock)) {
2609                                 CDEBUG(D_HA, "found sync-lock,"
2610                                        " async commit started\n");
2611                                 mdt_device_commit_async(info->mti_env,
2612                                                         mdt);
2613                         }
2614                         LDLM_LOCK_PUT(lock);
2615                 }
2616                 h->cookie = 0ull;
2617         }
2618
2619         EXIT;
2620 }
2621
2622 /**
2623  * Unlock mdt object.
2624  *
2625  * Immeditely release the regular lock and the PDO lock or save the
2626  * lock in reqeuest and keep them referenced until client ACK or
2627  * transaction commit.
2628  *
2629  * \param info thread info object
2630  * \param o mdt object
2631  * \param lh mdt lock handle referencing regular and PDO locks
2632  * \param decref force immediate lock releasing
2633  */
2634 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2635                        struct mdt_lock_handle *lh, int decref)
2636 {
2637         ENTRY;
2638
2639         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2640         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2641
2642         EXIT;
2643 }
2644
2645 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2646                                         const struct lu_fid *f,
2647                                         struct mdt_lock_handle *lh,
2648                                         __u64 ibits)
2649 {
2650         struct mdt_object *o;
2651
2652         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2653         if (!IS_ERR(o)) {
2654                 int rc;
2655
2656                 rc = mdt_object_lock(info, o, lh, ibits,
2657                                      MDT_LOCAL_LOCK);
2658                 if (rc != 0) {
2659                         mdt_object_put(info->mti_env, o);
2660                         o = ERR_PTR(rc);
2661                 }
2662         }
2663         return o;
2664 }
2665
2666 void mdt_object_unlock_put(struct mdt_thread_info * info,
2667                            struct mdt_object * o,
2668                            struct mdt_lock_handle *lh,
2669                            int decref)
2670 {
2671         mdt_object_unlock(info, o, lh, decref);
2672         mdt_object_put(info->mti_env, o);
2673 }
2674
2675 static struct mdt_handler *mdt_handler_find(__u32 opc,
2676                                             struct mdt_opc_slice *supported)
2677 {
2678         struct mdt_opc_slice *s;
2679         struct mdt_handler   *h;
2680
2681         h = NULL;
2682         for (s = supported; s->mos_hs != NULL; s++) {
2683                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2684                         h = s->mos_hs + (opc - s->mos_opc_start);
2685                         if (likely(h->mh_opc != 0))
2686                                 LASSERTF(h->mh_opc == opc,
2687                                          "opcode mismatch %d != %d\n",
2688                                          h->mh_opc, opc);
2689                         else
2690                                 h = NULL; /* unsupported opc */
2691                         break;
2692                 }
2693         }
2694         return h;
2695 }
2696
2697 static int mdt_lock_resname_compat(struct mdt_device *m,
2698                                    struct ldlm_request *req)
2699 {
2700         /* XXX something... later. */
2701         return 0;
2702 }
2703
2704 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2705 {
2706         /* XXX something... later. */
2707         return 0;
2708 }
2709
2710 /*
2711  * Generic code handling requests that have struct mdt_body passed in:
2712  *
2713  *  - extract mdt_body from request and save it in @info, if present;
2714  *
2715  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2716  *  @info;
2717  *
2718  *  - if HABEO_CORPUS flag is set for this request type check whether object
2719  *  actually exists on storage (lu_object_exists()).
2720  *
2721  */
2722 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2723 {
2724         const struct mdt_body    *body;
2725         struct mdt_object        *obj;
2726         const struct lu_env      *env;
2727         struct req_capsule       *pill;
2728         int                       rc;
2729         ENTRY;
2730
2731         env = info->mti_env;
2732         pill = info->mti_pill;
2733
2734         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2735         if (body == NULL)
2736                 RETURN(-EFAULT);
2737
2738         if (!(body->valid & OBD_MD_FLID))
2739                 RETURN(0);
2740
2741         if (!fid_is_sane(&body->fid1)) {
2742                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2743                 RETURN(-EINVAL);
2744         }
2745
2746         /*
2747          * Do not get size or any capa fields before we check that request
2748          * contains capa actually. There are some requests which do not, for
2749          * instance MDS_IS_SUBDIR.
2750          */
2751         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2752             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2753                 mdt_set_capainfo(info, 0, &body->fid1,
2754                                  req_capsule_client_get(pill, &RMF_CAPA1));
2755
2756         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2757         if (!IS_ERR(obj)) {
2758                 if ((flags & HABEO_CORPUS) &&
2759                     !mdt_object_exists(obj)) {
2760                         mdt_object_put(env, obj);
2761                         /* for capability renew ENOENT will be handled in
2762                          * mdt_renew_capa */
2763                         if (body->valid & OBD_MD_FLOSSCAPA)
2764                                 rc = 0;
2765                         else
2766                                 rc = -ENOENT;
2767                 } else {
2768                         info->mti_object = obj;
2769                         rc = 0;
2770                 }
2771         } else
2772                 rc = PTR_ERR(obj);
2773
2774         RETURN(rc);
2775 }
2776
2777 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2778 {
2779         struct req_capsule *pill = info->mti_pill;
2780         int rc;
2781         ENTRY;
2782
2783         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2784                 rc = mdt_body_unpack(info, flags);
2785         else
2786                 rc = 0;
2787
2788         if (rc == 0 && (flags & HABEO_REFERO)) {
2789                 /* Pack reply. */
2790                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2791                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2792                                              info->mti_body->eadatasize);
2793                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2794                         req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2795                                              RCL_SERVER, 0);
2796
2797                 rc = req_capsule_server_pack(pill);
2798         }
2799         RETURN(rc);
2800 }
2801
2802 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2803 {
2804         struct md_device *next = m->mdt_child;
2805
2806         return next->md_ops->mdo_init_capa_ctxt(env, next,
2807                                                 m->mdt_opts.mo_mds_capa,
2808                                                 m->mdt_capa_timeout,
2809                                                 m->mdt_capa_alg,
2810                                                 m->mdt_capa_keys);
2811 }
2812
2813 /*
2814  * Invoke handler for this request opc. Also do necessary preprocessing
2815  * (according to handler ->mh_flags), and post-processing (setting of
2816  * ->last_{xid,committed}).
2817  */
2818 static int mdt_req_handle(struct mdt_thread_info *info,
2819                           struct mdt_handler *h, struct ptlrpc_request *req)
2820 {
2821         int   rc, serious = 0;
2822         __u32 flags;
2823
2824         ENTRY;
2825
2826         LASSERT(h->mh_act != NULL);
2827         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2828         LASSERT(current->journal_info == NULL);
2829
2830         /*
2831          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2832          * to put same checks into handlers like mdt_close(), mdt_reint(),
2833          * etc., without talking to mdt authors first. Checking same thing
2834          * there again is useless and returning 0 error without packing reply
2835          * is buggy! Handlers either pack reply or return error.
2836          *
2837          * We return 0 here and do not send any reply in order to emulate
2838          * network failure. Do not send any reply in case any of NET related
2839          * fail_id has occured.
2840          */
2841         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2842                 RETURN(0);
2843
2844         rc = 0;
2845         flags = h->mh_flags;
2846         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2847
2848         if (h->mh_fmt != NULL) {
2849                 req_capsule_set(info->mti_pill, h->mh_fmt);
2850                 rc = mdt_unpack_req_pack_rep(info, flags);
2851         }
2852
2853         if (rc == 0 && flags & MUTABOR &&
2854             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2855                 /* should it be rq_status? */
2856                 rc = -EROFS;
2857
2858         if (rc == 0 && flags & HABEO_CLAVIS) {
2859                 struct ldlm_request *dlm_req;
2860
2861                 LASSERT(h->mh_fmt != NULL);
2862
2863                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2864                 if (dlm_req != NULL) {
2865                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2866                                         LDLM_IBITS &&
2867                                      dlm_req->lock_desc.l_policy_data.\
2868                                         l_inodebits.bits == 0)) {
2869                                 /*
2870                                  * Lock without inodebits makes no sense and
2871                                  * will oops later in ldlm. If client miss to
2872                                  * set such bits, do not trigger ASSERTION.
2873                                  *
2874                                  * For liblustre flock case, it maybe zero.
2875                                  */
2876                                 rc = -EPROTO;
2877                         } else {
2878                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2879                                         rc = mdt_lock_resname_compat(
2880                                                                 info->mti_mdt,
2881                                                                 dlm_req);
2882                                 info->mti_dlm_req = dlm_req;
2883                         }
2884                 } else {
2885                         rc = -EFAULT;
2886                 }
2887         }
2888
2889         /* capability setting changed via /proc, needs reinitialize ctxt */
2890         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2891                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2892                 info->mti_mdt->mdt_capa_conf = 0;
2893         }
2894
2895         if (likely(rc == 0)) {
2896                 /*
2897                  * Process request, there can be two types of rc:
2898                  * 1) errors with msg unpack/pack, other failures outside the
2899                  * operation itself. This is counted as serious errors;
2900                  * 2) errors during fs operation, should be placed in rq_status
2901                  * only
2902                  */
2903                 rc = h->mh_act(info);
2904                 if (rc == 0 &&
2905                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2906                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2907                                   "pack reply and returned 0 error\n",
2908                                   h->mh_name);
2909                         LBUG();
2910                 }
2911                 serious = is_serious(rc);
2912                 rc = clear_serious(rc);
2913         } else
2914                 serious = 1;
2915
2916         req->rq_status = rc;
2917
2918         /*
2919          * ELDLM_* codes which > 0 should be in rq_status only as well as
2920          * all non-serious errors.
2921          */
2922         if (rc > 0 || !serious)
2923                 rc = 0;
2924
2925         LASSERT(current->journal_info == NULL);
2926
2927         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2928             info->mti_mdt->mdt_opts.mo_compat_resname) {
2929                 struct ldlm_reply *dlmrep;
2930
2931                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2932                 if (dlmrep != NULL)
2933                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2934         }
2935
2936         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2937         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
2938                 target_committed_to_req(req);
2939
2940         if (unlikely(req_is_replay(req) &&
2941                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2942                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2943                 LBUG();
2944         }
2945
2946         target_send_reply(req, rc, info->mti_fail_id);
2947         RETURN(0);
2948 }
2949
2950 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2951 {
2952         lh->mlh_type = MDT_NUL_LOCK;
2953         lh->mlh_reg_lh.cookie = 0ull;
2954         lh->mlh_reg_mode = LCK_MINMODE;
2955         lh->mlh_pdo_lh.cookie = 0ull;
2956         lh->mlh_pdo_mode = LCK_MINMODE;
2957 }
2958
2959 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2960 {
2961         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2962         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2963 }
2964
2965 /*
2966  * Initialize fields of struct mdt_thread_info. Other fields are left in
2967  * uninitialized state, because it's too expensive to zero out whole
2968  * mdt_thread_info (> 1K) on each request arrival.
2969  */
2970 static void mdt_thread_info_init(struct ptlrpc_request *req,
2971                                  struct mdt_thread_info *info)
2972 {
2973         int i;
2974         struct md_capainfo *ci;
2975
2976         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2977         info->mti_pill = &req->rq_pill;
2978
2979         /* lock handle */
2980         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2981                 mdt_lock_handle_init(&info->mti_lh[i]);
2982
2983         /* mdt device: it can be NULL while CONNECT */
2984         if (req->rq_export) {
2985                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2986                 info->mti_exp = req->rq_export;
2987         } else
2988                 info->mti_mdt = NULL;
2989         info->mti_env = req->rq_svc_thread->t_env;
2990         ci = md_capainfo(info->mti_env);
2991         memset(ci, 0, sizeof *ci);
2992         if (req->rq_export) {
2993                 if (exp_connect_rmtclient(req->rq_export))
2994                         ci->mc_auth = LC_ID_CONVERT;
2995                 else if (req->rq_export->exp_connect_flags &
2996                          OBD_CONNECT_MDS_CAPA)
2997                         ci->mc_auth = LC_ID_PLAIN;
2998                 else
2999                         ci->mc_auth = LC_ID_NONE;
3000         }
3001
3002         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3003         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3004         info->mti_mos = NULL;
3005
3006         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3007         info->mti_body = NULL;
3008         info->mti_object = NULL;
3009         info->mti_dlm_req = NULL;
3010         info->mti_has_trans = 0;
3011         info->mti_cross_ref = 0;
3012         info->mti_opdata = 0;
3013         info->mti_big_lmm_used = 0;
3014
3015         /* To not check for split by default. */
3016         info->mti_spec.no_create = 0;
3017 }
3018
3019 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3020 {
3021         int i;
3022
3023         req_capsule_fini(info->mti_pill);
3024         if (info->mti_object != NULL) {
3025                 mdt_object_put(info->mti_env, info->mti_object);
3026                 info->mti_object = NULL;
3027         }
3028         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3029                 mdt_lock_handle_fini(&info->mti_lh[i]);
3030         info->mti_env = NULL;
3031 }
3032
3033 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3034                                        struct obd_device *obd, int *process)
3035 {
3036         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3037         case MDS_CONNECT: /* This will never get here, but for completeness. */
3038         case OST_CONNECT: /* This will never get here, but for completeness. */
3039         case MDS_DISCONNECT:
3040         case OST_DISCONNECT:
3041         case OBD_IDX_READ:
3042                *process = 1;
3043                RETURN(0);
3044
3045         case MDS_CLOSE:
3046         case MDS_DONE_WRITING:
3047         case MDS_SYNC: /* used in unmounting */
3048         case OBD_PING:
3049         case MDS_REINT:
3050         case SEQ_QUERY:
3051         case FLD_QUERY:
3052         case LDLM_ENQUEUE:
3053                 *process = target_queue_recovery_request(req, obd);
3054                 RETURN(0);
3055
3056         default:
3057                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3058                 *process = -EAGAIN;
3059                 RETURN(0);
3060         }
3061 }
3062
3063 /*
3064  * Handle recovery. Return:
3065  *        +1: continue request processing;
3066  *       -ve: abort immediately with the given error code;
3067  *         0: send reply with error code in req->rq_status;
3068  */
3069 static int mdt_recovery(struct mdt_thread_info *info)
3070 {
3071         struct ptlrpc_request *req = mdt_info_req(info);
3072         struct obd_device *obd;
3073
3074         ENTRY;
3075
3076         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3077         case MDS_CONNECT:
3078         case SEC_CTX_INIT:
3079         case SEC_CTX_INIT_CONT:
3080         case SEC_CTX_FINI:
3081                 {
3082 #if 0
3083                         int rc;
3084
3085                         rc = mdt_handle_idmap(info);
3086                         if (rc)
3087                                 RETURN(rc);
3088                         else
3089 #endif
3090                                 RETURN(+1);
3091                 }
3092         }
3093
3094         if (unlikely(!class_connected_export(req->rq_export))) {
3095                 CERROR("operation %d on unconnected MDS from %s\n",
3096                        lustre_msg_get_opc(req->rq_reqmsg),
3097                        libcfs_id2str(req->rq_peer));
3098                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3099                  * mds_A will get -ENOTCONN(especially for ping req),
3100                  * which will cause that mds_A deactive timeout, then when
3101                  * mds_A cleanup, the cleanup process will be suspended since
3102                  * deactive timeout is not zero.
3103                  */
3104                 req->rq_status = -ENOTCONN;
3105                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3106                 RETURN(0);
3107         }
3108
3109         /* sanity check: if the xid matches, the request must be marked as a
3110          * resent or replayed */
3111         if (req_xid_is_last(req)) {
3112                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3113                       (MSG_RESENT | MSG_REPLAY))) {
3114                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3115                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3116                                   lustre_msg_get_flags(req->rq_reqmsg));
3117                         LBUG();
3118                         req->rq_status = -ENOTCONN;
3119                         RETURN(-ENOTCONN);
3120                 }
3121         }
3122
3123         /* else: note the opposite is not always true; a RESENT req after a
3124          * failover will usually not match the last_xid, since it was likely
3125          * never committed. A REPLAYed request will almost never match the
3126          * last xid, however it could for a committed, but still retained,
3127          * open. */
3128
3129         obd = req->rq_export->exp_obd;
3130
3131         /* Check for aborted recovery... */
3132         if (unlikely(obd->obd_recovering)) {
3133                 int rc;
3134                 int should_process;
3135                 DEBUG_REQ(D_INFO, req, "Got new replay");
3136                 rc = mdt_filter_recovery_request(req, obd, &should_process);
3137                 if (rc != 0 || !should_process)
3138                         RETURN(rc);
3139                 else if (should_process < 0) {
3140                         req->rq_status = should_process;
3141                         rc = ptlrpc_error(req);
3142                         RETURN(rc);
3143                 }
3144         }
3145         RETURN(+1);
3146 }
3147
3148 static int mdt_msg_check_version(struct lustre_msg *msg)
3149 {
3150         int rc;
3151
3152         switch (lustre_msg_get_opc(msg)) {
3153         case MDS_CONNECT:
3154         case MDS_DISCONNECT:
3155         case OBD_PING:
3156         case SEC_CTX_INIT:
3157         case SEC_CTX_INIT_CONT:
3158         case SEC_CTX_FINI:
3159         case OBD_IDX_READ:
3160                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
3161                 if (rc)
3162                         CERROR("bad opc %u version %08x, expecting %08x\n",
3163                                lustre_msg_get_opc(msg),
3164                                lustre_msg_get_version(msg),
3165                                LUSTRE_OBD_VERSION);
3166                 break;
3167         case MDS_GETSTATUS:
3168         case MDS_GETATTR:
3169         case MDS_GETATTR_NAME:
3170         case MDS_STATFS:
3171         case MDS_READPAGE:
3172         case MDS_WRITEPAGE:
3173         case MDS_IS_SUBDIR:
3174         case MDS_REINT:
3175         case MDS_CLOSE:
3176         case MDS_DONE_WRITING:
3177         case MDS_PIN:
3178         case MDS_SYNC:
3179         case MDS_GETXATTR:
3180         case MDS_SETXATTR:
3181         case MDS_SET_INFO:
3182         case MDS_GET_INFO:
3183         case MDS_QUOTACHECK:
3184         case MDS_QUOTACTL:
3185         case QUOTA_DQACQ:
3186         case QUOTA_DQREL:
3187         case SEQ_QUERY:
3188         case FLD_QUERY:
3189                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
3190                 if (rc)
3191                         CERROR("bad opc %u version %08x, expecting %08x\n",
3192                                lustre_msg_get_opc(msg),
3193                                lustre_msg_get_version(msg),
3194                                LUSTRE_MDS_VERSION);
3195                 break;
3196         case LDLM_ENQUEUE:
3197         case LDLM_CONVERT:
3198         case LDLM_BL_CALLBACK:
3199         case LDLM_CP_CALLBACK:
3200                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
3201                 if (rc)
3202                         CERROR("bad opc %u version %08x, expecting %08x\n",
3203                                lustre_msg_get_opc(msg),
3204                                lustre_msg_get_version(msg),
3205                                LUSTRE_DLM_VERSION);
3206                 break;
3207         case OBD_LOG_CANCEL:
3208         case LLOG_ORIGIN_HANDLE_CREATE:
3209         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
3210         case LLOG_ORIGIN_HANDLE_READ_HEADER:
3211         case LLOG_ORIGIN_HANDLE_CLOSE:
3212         case LLOG_ORIGIN_HANDLE_DESTROY:
3213         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
3214         case LLOG_CATINFO:
3215                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
3216                 if (rc)
3217                         CERROR("bad opc %u version %08x, expecting %08x\n",
3218                                lustre_msg_get_opc(msg),
3219                                lustre_msg_get_version(msg),
3220                                LUSTRE_LOG_VERSION);
3221                 break;
3222         default:
3223                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
3224                 rc = -ENOTSUPP;
3225         }
3226         return rc;
3227 }
3228
3229 static int mdt_handle0(struct ptlrpc_request *req,
3230                        struct mdt_thread_info *info,
3231                        struct mdt_opc_slice *supported)
3232 {
3233         struct mdt_handler *h;
3234         struct lustre_msg  *msg;
3235         int                 rc;
3236
3237         ENTRY;
3238
3239         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
3240                 RETURN(0);
3241
3242         LASSERT(current->journal_info == NULL);
3243
3244         msg = req->rq_reqmsg;
3245         rc = mdt_msg_check_version(msg);
3246         if (likely(rc == 0)) {
3247                 rc = mdt_recovery(info);
3248                 if (likely(rc == +1)) {
3249                         h = mdt_handler_find(lustre_msg_get_opc(msg),
3250                                              supported);
3251                         if (likely(h != NULL)) {
3252                                 rc = mdt_req_handle(info, h, req);
3253                         } else {
3254                                 CERROR("The unsupported opc: 0x%x\n",
3255                                        lustre_msg_get_opc(msg) );
3256                                 req->rq_status = -ENOTSUPP;
3257                                 rc = ptlrpc_error(req);
3258                                 RETURN(rc);
3259                         }
3260                 }
3261         } else
3262                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
3263         RETURN(rc);
3264 }
3265
3266 /*
3267  * MDT handler function called by ptlrpc service thread when request comes.
3268  *
3269  * XXX common "target" functionality should be factored into separate module
3270  * shared by mdt, ost and stand-alone services like fld.
3271  */
3272 static int mdt_handle_common(struct ptlrpc_request *req,
3273                              struct mdt_opc_slice *supported)
3274 {
3275         struct lu_env          *env;
3276         struct mdt_thread_info *info;
3277         int                     rc;
3278         ENTRY;
3279
3280         env = req->rq_svc_thread->t_env;
3281         LASSERT(env != NULL);
3282         LASSERT(env->le_ses != NULL);
3283         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
3284         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3285         LASSERT(info != NULL);
3286
3287         mdt_thread_info_init(req, info);
3288
3289         rc = mdt_handle0(req, info, supported);
3290
3291         mdt_thread_info_fini(info);
3292         RETURN(rc);
3293 }
3294
3295 /*
3296  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
3297  * as well.
3298  */
3299 int mdt_recovery_handle(struct ptlrpc_request *req)
3300 {
3301         int rc;
3302         ENTRY;
3303
3304         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3305         case FLD_QUERY:
3306                 rc = mdt_handle_common(req, mdt_fld_handlers);
3307                 break;
3308         case SEQ_QUERY:
3309                 rc = mdt_handle_common(req, mdt_seq_handlers);
3310                 break;
3311         default:
3312                 rc = mdt_handle_common(req, mdt_regular_handlers);
3313                 break;
3314         }
3315
3316         RETURN(rc);
3317 }
3318
3319 static int mdt_regular_handle(struct ptlrpc_request *req)
3320 {
3321         return mdt_handle_common(req, mdt_regular_handlers);
3322 }
3323
3324 static int mdt_readpage_handle(struct ptlrpc_request *req)
3325 {
3326         return mdt_handle_common(req, mdt_readpage_handlers);
3327 }
3328
3329 static int mdt_xmds_handle(struct ptlrpc_request *req)
3330 {
3331         return mdt_handle_common(req, mdt_xmds_handlers);
3332 }
3333
3334 static int mdt_mdsc_handle(struct ptlrpc_request *req)
3335 {
3336         return mdt_handle_common(req, mdt_seq_handlers);
3337 }
3338
3339 static int mdt_mdss_handle(struct ptlrpc_request *req)
3340 {
3341         return mdt_handle_common(req, mdt_seq_handlers);
3342 }
3343
3344 static int mdt_dtss_handle(struct ptlrpc_request *req)
3345 {
3346         return mdt_handle_common(req, mdt_seq_handlers);
3347 }
3348
3349 static int mdt_fld_handle(struct ptlrpc_request *req)
3350 {
3351         return mdt_handle_common(req, mdt_fld_handlers);
3352 }
3353
3354 enum mdt_it_code {
3355         MDT_IT_OPEN,
3356         MDT_IT_OCREAT,
3357         MDT_IT_CREATE,
3358         MDT_IT_GETATTR,
3359         MDT_IT_READDIR,
3360         MDT_IT_LOOKUP,
3361         MDT_IT_UNLINK,
3362         MDT_IT_TRUNC,
3363         MDT_IT_GETXATTR,
3364         MDT_IT_LAYOUT,
3365         MDT_IT_QUOTA,
3366         MDT_IT_NR
3367 };
3368
3369 static int mdt_intent_getattr(enum mdt_it_code opcode,
3370                               struct mdt_thread_info *info,
3371                               struct ldlm_lock **,
3372                               __u64);
3373 static int mdt_intent_reint(enum mdt_it_code opcode,
3374                             struct mdt_thread_info *info,
3375                             struct ldlm_lock **,
3376                             __u64);
3377
3378 static struct mdt_it_flavor {
3379         const struct req_format *it_fmt;
3380         __u32                    it_flags;
3381         int                    (*it_act)(enum mdt_it_code ,
3382                                          struct mdt_thread_info *,
3383                                          struct ldlm_lock **,
3384                                          __u64);
3385         long                     it_reint;
3386 } mdt_it_flavor[] = {
3387         [MDT_IT_OPEN]     = {
3388                 .it_fmt   = &RQF_LDLM_INTENT,
3389                 /*.it_flags = HABEO_REFERO,*/
3390                 .it_flags = 0,
3391                 .it_act   = mdt_intent_reint,
3392                 .it_reint = REINT_OPEN
3393         },
3394         [MDT_IT_OCREAT]   = {
3395                 .it_fmt   = &RQF_LDLM_INTENT,
3396                 .it_flags = MUTABOR,
3397                 .it_act   = mdt_intent_reint,
3398                 .it_reint = REINT_OPEN
3399         },
3400         [MDT_IT_CREATE]   = {
3401                 .it_fmt   = &RQF_LDLM_INTENT,
3402                 .it_flags = MUTABOR,
3403                 .it_act   = mdt_intent_reint,
3404                 .it_reint = REINT_CREATE
3405         },
3406         [MDT_IT_GETATTR]  = {
3407                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3408                 .it_flags = HABEO_REFERO,
3409                 .it_act   = mdt_intent_getattr
3410         },
3411         [MDT_IT_READDIR]  = {
3412                 .it_fmt   = NULL,
3413                 .it_flags = 0,
3414                 .it_act   = NULL
3415         },
3416         [MDT_IT_LOOKUP]   = {
3417                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3418                 .it_flags = HABEO_REFERO,
3419                 .it_act   = mdt_intent_getattr
3420         },
3421         [MDT_IT_UNLINK]   = {
3422                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3423                 .it_flags = MUTABOR,
3424                 .it_act   = NULL,
3425                 .it_reint = REINT_UNLINK
3426         },
3427         [MDT_IT_TRUNC]    = {
3428                 .it_fmt   = NULL,
3429                 .it_flags = MUTABOR,
3430                 .it_act   = NULL
3431         },
3432         [MDT_IT_GETXATTR] = {
3433                 .it_fmt   = NULL,
3434                 .it_flags = 0,
3435                 .it_act   = NULL
3436         },
3437         [MDT_IT_LAYOUT] = {
3438                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3439                 .it_flags = HABEO_REFERO,
3440                 .it_act   = mdt_intent_getattr
3441         }
3442 };
3443
3444 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3445                             struct ldlm_lock **lockp,
3446                             struct ldlm_lock *new_lock,
3447                             struct mdt_lock_handle *lh,
3448                             __u64 flags)
3449 {
3450         struct ptlrpc_request  *req = mdt_info_req(info);
3451         struct ldlm_lock       *lock = *lockp;
3452
3453         /*
3454          * Get new lock only for cases when possible resent did not find any
3455          * lock.
3456          */
3457         if (new_lock == NULL)
3458                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3459
3460         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3461                 lh->mlh_reg_lh.cookie = 0;
3462                 RETURN(0);
3463         }
3464
3465         LASSERTF(new_lock != NULL,
3466                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3467
3468         /*
3469          * If we've already given this lock to a client once, then we should
3470          * have no readers or writers.  Otherwise, we should have one reader
3471          * _or_ writer ref (which will be zeroed below) before returning the
3472          * lock to a client.
3473          */
3474         if (new_lock->l_export == req->rq_export) {
3475                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3476         } else {
3477                 LASSERT(new_lock->l_export == NULL);
3478                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3479         }
3480
3481         *lockp = new_lock;
3482
3483         if (new_lock->l_export == req->rq_export) {
3484                 /*
3485                  * Already gave this to the client, which means that we
3486                  * reconstructed a reply.
3487                  */
3488                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3489                         MSG_RESENT);
3490                 lh->mlh_reg_lh.cookie = 0;
3491                 RETURN(ELDLM_LOCK_REPLACED);
3492         }
3493
3494         /*
3495          * Fixup the lock to be given to the client.
3496          */
3497         lock_res_and_lock(new_lock);
3498         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3499          * possible blocking AST. */
3500         while (new_lock->l_readers > 0) {
3501                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3502                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3503                 new_lock->l_readers--;
3504         }
3505         while (new_lock->l_writers > 0) {
3506                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3507                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3508                 new_lock->l_writers--;
3509         }
3510
3511         new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3512         new_lock->l_blocking_ast = lock->l_blocking_ast;
3513         new_lock->l_completion_ast = lock->l_completion_ast;
3514         new_lock->l_remote_handle = lock->l_remote_handle;
3515         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3516
3517         unlock_res_and_lock(new_lock);
3518
3519         cfs_hash_add(new_lock->l_export->exp_lock_hash,
3520                      &new_lock->l_remote_handle,
3521                      &new_lock->l_exp_hash);
3522
3523         LDLM_LOCK_RELEASE(new_lock);
3524         lh->mlh_reg_lh.cookie = 0;
3525
3526         RETURN(ELDLM_LOCK_REPLACED);
3527 }
3528
3529 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3530                                     struct ldlm_lock *new_lock,
3531                                     struct ldlm_lock **old_lock,
3532                                     struct mdt_lock_handle *lh)
3533 {
3534         struct ptlrpc_request  *req = mdt_info_req(info);
3535         struct obd_export      *exp = req->rq_export;
3536         struct lustre_handle    remote_hdl;
3537         struct ldlm_request    *dlmreq;
3538         struct ldlm_lock       *lock;
3539
3540         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3541                 return;
3542
3543         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3544         remote_hdl = dlmreq->lock_handle[0];
3545
3546         /* In the function below, .hs_keycmp resolves to
3547          * ldlm_export_lock_keycmp() */
3548         /* coverity[overrun-buffer-val] */
3549         lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3550         if (lock) {
3551                 if (lock != new_lock) {
3552                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3553                         lh->mlh_reg_mode = lock->l_granted_mode;
3554
3555                         LDLM_DEBUG(lock, "Restoring lock cookie");
3556                         DEBUG_REQ(D_DLMTRACE, req,
3557                                   "restoring lock cookie "LPX64,
3558                                   lh->mlh_reg_lh.cookie);
3559                         if (old_lock)
3560                                 *old_lock = LDLM_LOCK_GET(lock);
3561                         cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3562                         return;
3563                 }
3564
3565                 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3566         }
3567
3568         /*
3569          * If the xid matches, then we know this is a resent request, and allow
3570          * it. (It's probably an OPEN, for which we don't send a lock.
3571          */
3572         if (req_xid_is_last(req))
3573                 return;
3574
3575         /*
3576          * This remote handle isn't enqueued, so we never received or processed
3577          * this request.  Clear MSG_RESENT, because it can be handled like any
3578          * normal request now.
3579          */
3580         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3581
3582         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3583                   remote_hdl.cookie);
3584 }
3585
3586 static int mdt_intent_getattr(enum mdt_it_code opcode,
3587                               struct mdt_thread_info *info,
3588                               struct ldlm_lock **lockp,
3589                               __u64 flags)
3590 {
3591         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3592         struct ldlm_lock       *new_lock = NULL;
3593         __u64                   child_bits;
3594         struct ldlm_reply      *ldlm_rep;
3595         struct ptlrpc_request  *req;
3596         struct mdt_body        *reqbody;
3597         struct mdt_body        *repbody;
3598         int                     rc, rc2;
3599         ENTRY;
3600
3601         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
3602         LASSERT(reqbody);
3603
3604         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
3605         LASSERT(repbody);
3606
3607         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
3608         repbody->eadatasize = 0;
3609         repbody->aclsize = 0;
3610
3611         switch (opcode) {
3612         case MDT_IT_LOOKUP:
3613                 child_bits = MDS_INODELOCK_LOOKUP;
3614                 break;
3615         case MDT_IT_GETATTR:
3616                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
3617                 break;
3618         case MDT_IT_LAYOUT: {
3619                 static int printed = 0;
3620
3621                 if (!printed) {
3622                         CERROR("layout lock not supported by this version\n");
3623                         printed = 1;
3624                 }
3625                 GOTO(out_shrink, rc = -EINVAL);
3626                 break;
3627         }
3628         default:
3629                 CERROR("Unsupported intent (%d)\n", opcode);
3630                 GOTO(out_shrink, rc = -EINVAL);
3631         }
3632
3633         rc = mdt_init_ucred(info, reqbody);
3634         if (rc)
3635                 GOTO(out_shrink, rc);
3636
3637         req = info->mti_pill->rc_req;
3638         ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3639         mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD);
3640
3641         /* Get lock from request for possible resent case. */
3642         mdt_intent_fixup_resent(info, *lockp, &new_lock, lhc);
3643
3644         ldlm_rep->lock_policy_res2 =
3645                 mdt_getattr_name_lock(info, lhc, child_bits, ldlm_rep);
3646
3647         if (mdt_get_disposition(ldlm_rep, DISP_LOOKUP_NEG))
3648                 ldlm_rep->lock_policy_res2 = 0;
3649         if (!mdt_get_disposition(ldlm_rep, DISP_LOOKUP_POS) ||
3650             ldlm_rep->lock_policy_res2) {
3651                 lhc->mlh_reg_lh.cookie = 0ull;
3652                 GOTO(out_ucred, rc = ELDLM_LOCK_ABORTED);
3653         }
3654
3655         rc = mdt_intent_lock_replace(info, lockp, new_lock, lhc, flags);
3656         EXIT;
3657 out_ucred:
3658         mdt_exit_ucred(info);
3659 out_shrink:
3660         mdt_client_compatibility(info);
3661         rc2 = mdt_fix_reply(info);
3662         if (rc == 0)
3663                 rc = rc2;
3664         return rc;
3665 }
3666
3667 static int mdt_intent_reint(enum mdt_it_code opcode,
3668                             struct mdt_thread_info *info,
3669                             struct ldlm_lock **lockp,
3670                             __u64 flags)
3671 {
3672         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3673         struct ldlm_reply      *rep = NULL;
3674         long                    opc;
3675         int                     rc;
3676
3677         static const struct req_format *intent_fmts[REINT_MAX] = {
3678                 [REINT_CREATE]  = &RQF_LDLM_INTENT_CREATE,
3679                 [REINT_OPEN]    = &RQF_LDLM_INTENT_OPEN
3680         };
3681
3682         ENTRY;
3683
3684         opc = mdt_reint_opcode(info, intent_fmts);
3685         if (opc < 0)
3686                 RETURN(opc);
3687
3688         if (mdt_it_flavor[opcode].it_reint != opc) {
3689                 CERROR("Reint code %ld doesn't match intent: %d\n",
3690                        opc, opcode);
3691                 RETURN(err_serious(-EPROTO));
3692         }
3693
3694         /* Get lock from request for possible resent case. */
3695         mdt_intent_fixup_resent(info, *lockp, NULL, lhc);
3696
3697         rc = mdt_reint_internal(info, lhc, opc);
3698
3699         /* Check whether the reply has been packed successfully. */
3700         if (mdt_info_req(info)->rq_repmsg != NULL)
3701                 rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3702         if (rep == NULL)
3703                 RETURN(err_serious(-EFAULT));
3704
3705         /* MDC expects this in any case */
3706         if (rc != 0)
3707                 mdt_set_disposition(info, rep, DISP_LOOKUP_EXECD);
3708
3709         /* Cross-ref case, the lock should be returned to the client */
3710         if (rc == -EREMOTE) {
3711                 LASSERT(lustre_handle_is_used(&lhc->mlh_reg_lh));
3712                 rep->lock_policy_res2 = 0;
3713                 rc = mdt_intent_lock_replace(info, lockp, NULL, lhc, flags);
3714                 RETURN(rc);
3715         }
3716         rep->lock_policy_res2 = clear_serious(rc);
3717
3718         if (rep->lock_policy_res2 == -ENOENT &&
3719             mdt_get_disposition(rep, DISP_LOOKUP_NEG))
3720                 rep->lock_policy_res2 = 0;
3721
3722         if (rc == -ENOTCONN || rc == -ENODEV ||
3723             rc == -EOVERFLOW) { /**< if VBR failure then return error */
3724                 /*
3725                  * If it is the disconnect error (ENODEV & ENOCONN), the error
3726                  * will be returned by rq_status, and client at ptlrpc layer
3727                  * will detect this, then disconnect, reconnect the import
3728                  * immediately, instead of impacting the following the rpc.
3729                  */
3730                 lhc->mlh_reg_lh.cookie = 0ull;
3731                 RETURN(rc);
3732         } else {
3733                 /*
3734                  * For other cases, the error will be returned by intent.
3735                  * and client will retrieve the result from intent.
3736                  */
3737                  /*
3738                   * FIXME: when open lock is finished, that should be
3739                   * checked here.
3740                   */
3741                 if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
3742                         LASSERTF(rc == 0, "Error occurred but lock handle "
3743                                  "is still in use\n");
3744                         rep->lock_policy_res2 = 0;
3745                         rc = mdt_intent_lock_replace(info, lockp, NULL, lhc, flags);
3746                         RETURN(rc);