Whamcloud - gitweb
b=16890 a build fix for --disable-quota
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52 #define DEBUG_SUBSYSTEM S_MDS
53
54 #include <linux/module.h>
55 /*
56  * struct OBD_{ALLOC,FREE}*()
57  */
58 #include <obd_support.h>
59 /* struct ptlrpc_request */
60 #include <lustre_net.h>
61 /* struct obd_export */
62 #include <lustre_export.h>
63 /* struct obd_device */
64 #include <obd.h>
65 /* lu2dt_dev() */
66 #include <dt_object.h>
67 #include <lustre_mds.h>
68 #include <lustre_mdt.h>
69 #include "mdt_internal.h"
70 #ifdef HAVE_QUOTA_SUPPORT
71 # include <lustre_quota.h>
72 #endif
73 #include <lustre_acl.h>
74 #include <lustre_param.h>
75 #include <lustre_fsfilt.h>
76
77 mdl_mode_t mdt_mdl_lock_modes[] = {
78         [LCK_MINMODE] = MDL_MINMODE,
79         [LCK_EX]      = MDL_EX,
80         [LCK_PW]      = MDL_PW,
81         [LCK_PR]      = MDL_PR,
82         [LCK_CW]      = MDL_CW,
83         [LCK_CR]      = MDL_CR,
84         [LCK_NL]      = MDL_NL,
85         [LCK_GROUP]   = MDL_GROUP
86 };
87
88 ldlm_mode_t mdt_dlm_lock_modes[] = {
89         [MDL_MINMODE] = LCK_MINMODE,
90         [MDL_EX]      = LCK_EX,
91         [MDL_PW]      = LCK_PW,
92         [MDL_PR]      = LCK_PR,
93         [MDL_CW]      = LCK_CW,
94         [MDL_CR]      = LCK_CR,
95         [MDL_NL]      = LCK_NL,
96         [MDL_GROUP]   = LCK_GROUP
97 };
98
99 /*
100  * Initialized in mdt_mod_init().
101  */
102 static unsigned long mdt_num_threads;
103 static unsigned long mdt_min_threads;
104 static unsigned long mdt_max_threads;
105
106 /* ptlrpc request handler for MDT. All handlers are
107  * grouped into several slices - struct mdt_opc_slice,
108  * and stored in an array - mdt_handlers[].
109  */
110 struct mdt_handler {
111         /* The name of this handler. */
112         const char *mh_name;
113         /* Fail id for this handler, checked at the beginning of this handler*/
114         int         mh_fail_id;
115         /* Operation code for this handler */
116         __u32       mh_opc;
117         /* flags are listed in enum mdt_handler_flags below. */
118         __u32       mh_flags;
119         /* The actual handler function to execute. */
120         int (*mh_act)(struct mdt_thread_info *info);
121         /* Request format for this request. */
122         const struct req_format *mh_fmt;
123 };
124
125 enum mdt_handler_flags {
126         /*
127          * struct mdt_body is passed in the incoming message, and object
128          * identified by this fid exists on disk.
129          *
130          * "habeo corpus" == "I have a body"
131          */
132         HABEO_CORPUS = (1 << 0),
133         /*
134          * struct ldlm_request is passed in the incoming message.
135          *
136          * "habeo clavis" == "I have a key"
137          */
138         HABEO_CLAVIS = (1 << 1),
139         /*
140          * this request has fixed reply format, so that reply message can be
141          * packed by generic code.
142          *
143          * "habeo refero" == "I have a reply"
144          */
145         HABEO_REFERO = (1 << 2),
146         /*
147          * this request will modify something, so check whether the filesystem
148          * is readonly or not, then return -EROFS to client asap if necessary.
149          *
150          * "mutabor" == "I shall modify"
151          */
152         MUTABOR      = (1 << 3)
153 };
154
155 struct mdt_opc_slice {
156         __u32               mos_opc_start;
157         int                 mos_opc_end;
158         struct mdt_handler *mos_hs;
159 };
160
161 static struct mdt_opc_slice mdt_regular_handlers[];
162 static struct mdt_opc_slice mdt_readpage_handlers[];
163 static struct mdt_opc_slice mdt_xmds_handlers[];
164 static struct mdt_opc_slice mdt_seq_handlers[];
165 static struct mdt_opc_slice mdt_fld_handlers[];
166
167 static struct mdt_device *mdt_dev(struct lu_device *d);
168 static int mdt_regular_handle(struct ptlrpc_request *req);
169 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
170 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
171                         struct getinfo_fid2path *fp);
172
173 static const struct lu_object_operations mdt_obj_ops;
174
175 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
176 {
177         if (!rep)
178                 return 0;
179         return (rep->lock_policy_res1 & flag);
180 }
181
182 void mdt_clear_disposition(struct mdt_thread_info *info,
183                            struct ldlm_reply *rep, int flag)
184 {
185         if (info)
186                 info->mti_opdata &= ~flag;
187         if (rep)
188                 rep->lock_policy_res1 &= ~flag;
189 }
190
191 void mdt_set_disposition(struct mdt_thread_info *info,
192                          struct ldlm_reply *rep, int flag)
193 {
194         if (info)
195                 info->mti_opdata |= flag;
196         if (rep)
197                 rep->lock_policy_res1 |= flag;
198 }
199
200 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
201 {
202         lh->mlh_pdo_hash = 0;
203         lh->mlh_reg_mode = lm;
204         lh->mlh_type = MDT_REG_LOCK;
205 }
206
207 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
208                        const char *name, int namelen)
209 {
210         lh->mlh_reg_mode = lm;
211         lh->mlh_type = MDT_PDO_LOCK;
212
213         if (name != NULL) {
214                 LASSERT(namelen > 0);
215                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
216         } else {
217                 LASSERT(namelen == 0);
218                 lh->mlh_pdo_hash = 0ull;
219         }
220 }
221
222 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
223                               struct mdt_lock_handle *lh)
224 {
225         mdl_mode_t mode;
226         ENTRY;
227
228         /*
229          * Any dir access needs couple of locks:
230          *
231          * 1) on part of dir we gonna take lookup/modify;
232          *
233          * 2) on whole dir to protect it from concurrent splitting and/or to
234          * flush client's cache for readdir().
235          *
236          * so, for a given mode and object this routine decides what lock mode
237          * to use for lock #2:
238          *
239          * 1) if caller's gonna lookup in dir then we need to protect dir from
240          * being splitted only - LCK_CR
241          *
242          * 2) if caller's gonna modify dir then we need to protect dir from
243          * being splitted and to flush cache - LCK_CW
244          *
245          * 3) if caller's gonna modify dir and that dir seems ready for
246          * splitting then we need to protect it from any type of access
247          * (lookup/modify/split) - LCK_EX --bzzz
248          */
249
250         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
251         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
252
253         /*
254          * Ask underlaying level its opinion about preferable PDO lock mode
255          * having access type passed as regular lock mode:
256          *
257          * - MDL_MINMODE means that lower layer does not want to specify lock
258          * mode;
259          *
260          * - MDL_NL means that no PDO lock should be taken. This is used in some
261          * cases. Say, for non-splittable directories no need to use PDO locks
262          * at all.
263          */
264         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
265                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
266
267         if (mode != MDL_MINMODE) {
268                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
269         } else {
270                 /*
271                  * Lower layer does not want to specify locking mode. We do it
272                  * our selves. No special protection is needed, just flush
273                  * client's cache on modification and allow concurrent
274                  * mondification.
275                  */
276                 switch (lh->mlh_reg_mode) {
277                 case LCK_EX:
278                         lh->mlh_pdo_mode = LCK_EX;
279                         break;
280                 case LCK_PR:
281                         lh->mlh_pdo_mode = LCK_CR;
282                         break;
283                 case LCK_PW:
284                         lh->mlh_pdo_mode = LCK_CW;
285                         break;
286                 default:
287                         CERROR("Not expected lock type (0x%x)\n",
288                                (int)lh->mlh_reg_mode);
289                         LBUG();
290                 }
291         }
292
293         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
294         EXIT;
295 }
296
297 static int mdt_getstatus(struct mdt_thread_info *info)
298 {
299         struct mdt_device *mdt  = info->mti_mdt;
300         struct md_device  *next = mdt->mdt_child;
301         struct mdt_body   *repbody;
302         int                rc;
303
304         ENTRY;
305
306         rc = mdt_check_ucred(info);
307         if (rc)
308                 RETURN(err_serious(rc));
309
310         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
311                 RETURN(err_serious(-ENOMEM));
312
313         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
314         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
315         if (rc != 0)
316                 RETURN(rc);
317
318         repbody->valid |= OBD_MD_FLID;
319
320         if (mdt->mdt_opts.mo_mds_capa &&
321             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
322                 struct mdt_object  *root;
323                 struct lustre_capa *capa;
324
325                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
326                 if (IS_ERR(root))
327                         RETURN(PTR_ERR(root));
328
329                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
330                 LASSERT(capa);
331                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
332                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
333                                  0);
334                 mdt_object_put(info->mti_env, root);
335                 if (rc == 0)
336                         repbody->valid |= OBD_MD_FLMDSCAPA;
337         }
338
339         RETURN(rc);
340 }
341
342 static int mdt_statfs(struct mdt_thread_info *info)
343 {
344         struct md_device      *next  = info->mti_mdt->mdt_child;
345         struct ptlrpc_service *svc;
346         struct obd_statfs     *osfs;
347         int                    rc;
348
349         ENTRY;
350
351         svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
352
353         /* This will trigger a watchdog timeout */
354         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
355                          (MDT_SERVICE_WATCHDOG_FACTOR *
356                           at_get(&svc->srv_at_estimate)) + 1);
357
358         rc = mdt_check_ucred(info);
359         if (rc)
360                 RETURN(err_serious(rc));
361
362         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
363                 rc = err_serious(-ENOMEM);
364         } else {
365                 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
366                 rc = next->md_ops->mdo_statfs(info->mti_env, next,
367                                               &info->mti_u.ksfs);
368                 statfs_pack(osfs, &info->mti_u.ksfs);
369         }
370         RETURN(rc);
371 }
372
373 /**
374  * Pack SOM attributes into the reply.
375  * Call under a DLM UPDATE lock.
376  */
377 static void mdt_pack_size2body(struct mdt_thread_info *info,
378                                struct mdt_object *mo)
379 {
380         struct mdt_body *b;
381         struct md_attr *ma = &info->mti_attr;
382
383         LASSERT(ma->ma_attr.la_valid & LA_MODE);
384         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
385
386         /* Check if Size-on-MDS is supported, if this is a regular file,
387          * if SOM is enabled on the object and if SOM cache exists and valid.
388          * Otherwise do not pack Size-on-MDS attributes to the reply. */
389         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
390             !S_ISREG(ma->ma_attr.la_mode) ||
391             !mdt_object_is_som_enabled(mo) ||
392             !(ma->ma_valid & MA_SOM))
393                 return;
394
395         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
396         b->size = ma->ma_som->msd_size;
397         b->blocks = ma->ma_som->msd_blocks;
398 }
399
400 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
401                         const struct lu_attr *attr, const struct lu_fid *fid)
402 {
403         struct md_attr *ma = &info->mti_attr;
404
405         LASSERT(ma->ma_valid & MA_INODE);
406
407         b->atime      = attr->la_atime;
408         b->mtime      = attr->la_mtime;
409         b->ctime      = attr->la_ctime;
410         b->mode       = attr->la_mode;
411         b->size       = attr->la_size;
412         b->blocks     = attr->la_blocks;
413         b->uid        = attr->la_uid;
414         b->gid        = attr->la_gid;
415         b->flags      = attr->la_flags;
416         b->nlink      = attr->la_nlink;
417         b->rdev       = attr->la_rdev;
418
419         /*XXX should pack the reply body according to lu_valid*/
420         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
421                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
422                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
423                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
424
425         if (!S_ISREG(attr->la_mode)) {
426                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
427         } else if (ma->ma_need & MA_LOV && ma->ma_lmm_size == 0) {
428                 /* means no objects are allocated on osts. */
429                 LASSERT(!(ma->ma_valid & MA_LOV));
430                 /* just ignore blocks occupied by extend attributes on MDS */
431                 b->blocks = 0;
432                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
433                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
434         }
435
436         if (fid) {
437                 b->fid1 = *fid;
438                 b->valid |= OBD_MD_FLID;
439
440                 /* FIXME: these should be fixed when new igif ready.*/
441                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
442                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
443                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
444
445                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
446                                 PFID(fid), b->nlink, b->mode, b->size);
447         }
448
449         if (info)
450                 mdt_body_reverse_idmap(info, b);
451
452         if (b->valid & OBD_MD_FLSIZE)
453                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
454                        PFID(fid), b->size);
455 }
456
457 static inline int mdt_body_has_lov(const struct lu_attr *la,
458                                    const struct mdt_body *body)
459 {
460         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
461                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
462 }
463
464 static int mdt_getattr_internal(struct mdt_thread_info *info,
465                                 struct mdt_object *o, int ma_need)
466 {
467         struct md_object        *next = mdt_object_child(o);
468         const struct mdt_body   *reqbody = info->mti_body;
469         struct ptlrpc_request   *req = mdt_info_req(info);
470         struct md_attr          *ma = &info->mti_attr;
471         struct lu_attr          *la = &ma->ma_attr;
472         struct req_capsule      *pill = info->mti_pill;
473         const struct lu_env     *env = info->mti_env;
474         struct mdt_body         *repbody;
475         struct lu_buf           *buffer = &info->mti_buf;
476         int                     rc;
477         ENTRY;
478
479         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
480                 RETURN(err_serious(-ENOMEM));
481
482         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
483
484         ma->ma_valid = 0;
485
486         rc = mdt_object_exists(o);
487         if (rc < 0) {
488                 /* This object is located on remote node.*/
489                 repbody->fid1 = *mdt_object_fid(o);
490                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
491                 RETURN(0);
492         }
493
494         buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
495         buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
496
497         /* If it is dir object and client require MEA, then we got MEA */
498         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
499             reqbody->valid & OBD_MD_MEA) {
500                 /* Assumption: MDT_MD size is enough for lmv size. */
501                 ma->ma_lmv = buffer->lb_buf;
502                 ma->ma_lmv_size = buffer->lb_len;
503                 ma->ma_need = MA_LMV | MA_INODE;
504         } else {
505                 ma->ma_lmm = buffer->lb_buf;
506                 ma->ma_lmm_size = buffer->lb_len;
507                 ma->ma_need = MA_LOV | MA_INODE;
508         }
509
510         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
511             reqbody->valid & OBD_MD_FLDIREA  &&
512             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
513                 /* get default stripe info for this dir. */
514                 ma->ma_need |= MA_LOV_DEF;
515         }
516         ma->ma_need |= ma_need;
517         if (ma->ma_need & MA_SOM)
518                 ma->ma_som = &info->mti_u.som.data;
519
520         rc = mo_attr_get(env, next, ma);
521         if (unlikely(rc)) {
522                 CERROR("getattr error for "DFID": %d\n",
523                         PFID(mdt_object_fid(o)), rc);
524                 RETURN(rc);
525         }
526
527         if (likely(ma->ma_valid & MA_INODE))
528                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
529         else
530                 RETURN(-EFAULT);
531
532         if (mdt_body_has_lov(la, reqbody)) {
533                 if (ma->ma_valid & MA_LOV) {
534                         LASSERT(ma->ma_lmm_size);
535                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
536                         repbody->eadatasize = ma->ma_lmm_size;
537                         if (S_ISDIR(la->la_mode))
538                                 repbody->valid |= OBD_MD_FLDIREA;
539                         else
540                                 repbody->valid |= OBD_MD_FLEASIZE;
541                 }
542                 if (ma->ma_valid & MA_LMV) {
543                         LASSERT(S_ISDIR(la->la_mode));
544                         repbody->eadatasize = ma->ma_lmv_size;
545                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
546                 }
547         } else if (S_ISLNK(la->la_mode) &&
548                    reqbody->valid & OBD_MD_LINKNAME) {
549                 buffer->lb_buf = ma->ma_lmm;
550                 buffer->lb_len = reqbody->eadatasize;
551                 rc = mo_readlink(env, next, buffer);
552                 if (unlikely(rc <= 0)) {
553                         CERROR("readlink failed: %d\n", rc);
554                         rc = -EFAULT;
555                 } else {
556                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
557                                  rc -= 2;
558                         repbody->valid |= OBD_MD_LINKNAME;
559                         repbody->eadatasize = rc;
560                         /* NULL terminate */
561                         ((char*)ma->ma_lmm)[rc - 1] = 0;
562                         CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
563                                (char*)ma->ma_lmm, rc);
564                         rc = 0;
565                 }
566         }
567
568         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
569                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
570                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
571                 repbody->valid |= OBD_MD_FLMODEASIZE;
572                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
573                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
574                        repbody->max_cookiesize);
575         }
576
577         if (exp_connect_rmtclient(info->mti_exp) &&
578             reqbody->valid & OBD_MD_FLRMTPERM) {
579                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
580
581                 /* mdt_getattr_lock only */
582                 rc = mdt_pack_remote_perm(info, o, buf);
583                 if (rc) {
584                         repbody->valid &= ~OBD_MD_FLRMTPERM;
585                         repbody->aclsize = 0;
586                         RETURN(rc);
587                 } else {
588                         repbody->valid |= OBD_MD_FLRMTPERM;
589                         repbody->aclsize = sizeof(struct mdt_remote_perm);
590                 }
591         }
592 #ifdef CONFIG_FS_POSIX_ACL
593         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
594                  (reqbody->valid & OBD_MD_FLACL)) {
595                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
596                 buffer->lb_len = req_capsule_get_size(pill,
597                                                       &RMF_ACL, RCL_SERVER);
598                 if (buffer->lb_len > 0) {
599                         rc = mo_xattr_get(env, next, buffer,
600                                           XATTR_NAME_ACL_ACCESS);
601                         if (rc < 0) {
602                                 if (rc == -ENODATA) {
603                                         repbody->aclsize = 0;
604                                         repbody->valid |= OBD_MD_FLACL;
605                                         rc = 0;
606                                 } else if (rc == -EOPNOTSUPP) {
607                                         rc = 0;
608                                 } else {
609                                         CERROR("got acl size: %d\n", rc);
610                                 }
611                         } else {
612                                 repbody->aclsize = rc;
613                                 repbody->valid |= OBD_MD_FLACL;
614                                 rc = 0;
615                         }
616                 }
617         }
618 #endif
619
620         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
621             info->mti_mdt->mdt_opts.mo_mds_capa &&
622             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
623                 struct lustre_capa *capa;
624
625                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
626                 LASSERT(capa);
627                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
628                 rc = mo_capa_get(env, next, capa, 0);
629                 if (rc)
630                         RETURN(rc);
631                 repbody->valid |= OBD_MD_FLMDSCAPA;
632         }
633         RETURN(rc);
634 }
635
636 static int mdt_renew_capa(struct mdt_thread_info *info)
637 {
638         struct mdt_object  *obj = info->mti_object;
639         struct mdt_body    *body;
640         struct lustre_capa *capa, *c;
641         int rc;
642         ENTRY;
643
644         /* if object doesn't exist, or server has disabled capability,
645          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
646          * flag not set.
647          */
648         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
649             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
650                 RETURN(0);
651
652         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
653         LASSERT(body != NULL);
654
655         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
656         LASSERT(c);
657
658         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
659         LASSERT(capa);
660
661         *capa = *c;
662         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
663         if (rc == 0)
664                 body->valid |= OBD_MD_FLOSSCAPA;
665         RETURN(rc);
666 }
667
668 static int mdt_getattr(struct mdt_thread_info *info)
669 {
670         struct mdt_object       *obj = info->mti_object;
671         struct req_capsule      *pill = info->mti_pill;
672         struct mdt_body         *reqbody;
673         struct mdt_body         *repbody;
674         mode_t                   mode;
675         int                      md_size;
676         int rc;
677         ENTRY;
678
679         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
680         LASSERT(reqbody);
681
682         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
683                 rc = req_capsule_server_pack(pill);
684                 if (unlikely(rc))
685                         RETURN(err_serious(rc));
686                 rc = mdt_renew_capa(info);
687                 GOTO(out_shrink, rc);
688         }
689
690         LASSERT(obj != NULL);
691         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
692
693         mode = lu_object_attr(&obj->mot_obj.mo_lu);
694         if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
695             (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
696                 md_size = reqbody->eadatasize;
697         else
698                 md_size = info->mti_mdt->mdt_max_mdsize;
699
700         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
701
702         rc = req_capsule_server_pack(pill);
703         if (unlikely(rc != 0))
704                 RETURN(err_serious(rc));
705
706         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
707         LASSERT(repbody != NULL);
708         repbody->eadatasize = 0;
709         repbody->aclsize = 0;
710
711         if (reqbody->valid & OBD_MD_FLRMTPERM)
712                 rc = mdt_init_ucred(info, reqbody);
713         else
714                 rc = mdt_check_ucred(info);
715         if (unlikely(rc))
716                 GOTO(out_shrink, rc);
717
718         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
719         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
720
721         /*
722          * Don't check capability at all, because rename might getattr for
723          * remote obj, and at that time no capability is available.
724          */
725         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
726         rc = mdt_getattr_internal(info, obj, 0);
727         if (reqbody->valid & OBD_MD_FLRMTPERM)
728                 mdt_exit_ucred(info);
729         EXIT;
730 out_shrink:
731         mdt_shrink_reply(info);
732         return rc;
733 }
734
735 static int mdt_is_subdir(struct mdt_thread_info *info)
736 {
737         struct mdt_object     *o = info->mti_object;
738         struct req_capsule    *pill = info->mti_pill;
739         const struct mdt_body *body = info->mti_body;
740         struct mdt_body       *repbody;
741         int                    rc;
742         ENTRY;
743
744         LASSERT(o != NULL);
745
746         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
747
748         /*
749          * We save last checked parent fid to @repbody->fid1 for remote
750          * directory case.
751          */
752         LASSERT(fid_is_sane(&body->fid2));
753         LASSERT(mdt_object_exists(o) > 0);
754         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
755                            &body->fid2, &repbody->fid1);
756         if (rc == 0 || rc == -EREMOTE)
757                 repbody->valid |= OBD_MD_FLID;
758
759         RETURN(rc);
760 }
761
762 static int mdt_raw_lookup(struct mdt_thread_info *info,
763                           struct mdt_object *parent,
764                           const struct lu_name *lname,
765                           struct ldlm_reply *ldlm_rep)
766 {
767         struct md_object *next = mdt_object_child(info->mti_object);
768         const struct mdt_body *reqbody = info->mti_body;
769         struct lu_fid *child_fid = &info->mti_tmp_fid1;
770         struct mdt_body *repbody;
771         int rc;
772         ENTRY;
773
774         if (reqbody->valid != OBD_MD_FLID)
775                 RETURN(0);
776
777         LASSERT(!info->mti_cross_ref);
778
779         /* Only got the fid of this obj by name */
780         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
781                         &info->mti_spec);
782 #if 0
783         /* XXX is raw_lookup possible as intent operation? */
784         if (rc != 0) {
785                 if (rc == -ENOENT)
786                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
787                 RETURN(rc);
788         } else
789                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
790
791         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
792 #endif
793         if (rc == 0) {
794                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
795                 repbody->fid1 = *child_fid;
796                 repbody->valid = OBD_MD_FLID;
797         }
798         RETURN(1);
799 }
800
801 /*
802  * UPDATE lock should be taken against parent, and be release before exit;
803  * child_bits lock should be taken against child, and be returned back:
804  *            (1)normal request should release the child lock;
805  *            (2)intent request will grant the lock to client.
806  */
807 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
808                                  struct mdt_lock_handle *lhc,
809                                  __u64 child_bits,
810                                  struct ldlm_reply *ldlm_rep)
811 {
812         struct ptlrpc_request  *req       = mdt_info_req(info);
813         struct mdt_body        *reqbody   = NULL;
814         struct mdt_object      *parent    = info->mti_object;
815         struct mdt_object      *child;
816         struct md_object       *next      = mdt_object_child(parent);
817         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
818         struct lu_name         *lname     = NULL;
819         const char             *name      = NULL;
820         int                     namelen   = 0;
821         struct mdt_lock_handle *lhp;
822         struct ldlm_lock       *lock;
823         struct ldlm_res_id     *res_id;
824         int                     is_resent;
825         int                     ma_need = 0;
826         int                     rc;
827
828         ENTRY;
829
830         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
831         LASSERT(ergo(is_resent,
832                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
833
834         LASSERT(parent != NULL);
835         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
836         if (name == NULL)
837                 RETURN(err_serious(-EFAULT));
838
839         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
840                                        RCL_CLIENT) - 1;
841         if (!info->mti_cross_ref) {
842                 /*
843                  * XXX: Check for "namelen == 0" is for getattr by fid
844                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
845                  * that is the name must contain at least one character and
846                  * the terminating '\0'
847                  */
848                 if (namelen == 0) {
849                         reqbody = req_capsule_client_get(info->mti_pill,
850                                                          &RMF_MDT_BODY);
851                         LASSERT(fid_is_sane(&reqbody->fid2));
852                         name = NULL;
853
854                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
855                                "ldlm_rep = %p\n",
856                                PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
857                                ldlm_rep);
858                 } else {
859                         lname = mdt_name(info->mti_env, (char *)name, namelen);
860                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
861                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
862                                name, ldlm_rep);
863                 }
864         }
865         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
866
867         rc = mdt_object_exists(parent);
868         if (unlikely(rc == 0)) {
869                 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
870                                 &parent->mot_obj.mo_lu,
871                                 "Parent doesn't exist!\n");
872                 RETURN(-ESTALE);
873         } else if (!info->mti_cross_ref) {
874                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
875                          PFID(mdt_object_fid(parent)));
876         }
877         if (lname) {
878                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
879                 if (rc != 0) {
880                         if (rc > 0)
881                                 rc = 0;
882                         RETURN(rc);
883                 }
884         }
885
886         if (info->mti_cross_ref) {
887                 /* Only getattr on the child. Parent is on another node. */
888                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
889                 child = parent;
890                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
891                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
892
893                 if (is_resent) {
894                         /* Do not take lock for resent case. */
895                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
896                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
897                                  lhc->mlh_reg_lh.cookie);
898                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
899                                                 &lock->l_resource->lr_name));
900                         LDLM_LOCK_PUT(lock);
901                         rc = 0;
902                 } else {
903                         mdt_lock_handle_init(lhc);
904                         mdt_lock_reg_init(lhc, LCK_PR);
905
906                         /*
907                          * Object's name is on another MDS, no lookup lock is
908                          * needed here but update is.
909                          */
910                         child_bits &= ~MDS_INODELOCK_LOOKUP;
911                         child_bits |= MDS_INODELOCK_UPDATE;
912
913                         rc = mdt_object_lock(info, child, lhc, child_bits,
914                                              MDT_LOCAL_LOCK);
915                 }
916                 if (rc == 0) {
917                         /* Finally, we can get attr for child. */
918                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
919                                          BYPASS_CAPA);
920                         rc = mdt_getattr_internal(info, child, 0);
921                         if (unlikely(rc != 0))
922                                 mdt_object_unlock(info, child, lhc, 1);
923                 }
924                 RETURN(rc);
925         }
926
927         /* step 1: lock parent */
928         lhp = &info->mti_lh[MDT_LH_PARENT];
929         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
930         rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
931                              MDT_LOCAL_LOCK);
932
933         if (unlikely(rc != 0))
934                 RETURN(rc);
935
936         if (lname) {
937                 /* step 2: lookup child's fid by name */
938                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
939                                 &info->mti_spec);
940
941                 if (rc != 0) {
942                         if (rc == -ENOENT)
943                                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
944                         GOTO(out_parent, rc);
945                 } else
946                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
947         } else {
948                 *child_fid = reqbody->fid2;
949                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
950         }
951
952         /*
953          *step 3: find the child object by fid & lock it.
954          *        regardless if it is local or remote.
955          */
956         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
957
958         if (unlikely(IS_ERR(child)))
959                 GOTO(out_parent, rc = PTR_ERR(child));
960         if (is_resent) {
961                 /* Do not take lock for resent case. */
962                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
963                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
964                          lhc->mlh_reg_lh.cookie);
965
966                 res_id = &lock->l_resource->lr_name;
967                 if (!fid_res_name_eq(mdt_object_fid(child),
968                                     &lock->l_resource->lr_name)) {
969                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
970                                                  &lock->l_resource->lr_name),
971                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
972                                  (unsigned long)res_id->name[0],
973                                  (unsigned long)res_id->name[1],
974                                  (unsigned long)res_id->name[2],
975                                  PFID(mdt_object_fid(parent)));
976                           CWARN("Although resent, but still not get child lock"
977                                 "parent:"DFID" child:"DFID"\n",
978                                 PFID(mdt_object_fid(parent)),
979                                 PFID(mdt_object_fid(child)));
980                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
981                           LDLM_LOCK_PUT(lock);
982                           GOTO(relock, 0);
983                 }
984                 LDLM_LOCK_PUT(lock);
985                 rc = 0;
986         } else {
987                 struct md_attr *ma;
988 relock:
989                 ma = &info->mti_attr;
990
991                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
992                 mdt_lock_handle_init(lhc);
993                 mdt_lock_reg_init(lhc, LCK_PR);
994
995                 if (mdt_object_exists(child) == 0) {
996                         LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
997                                         &child->mot_obj.mo_lu,
998                                         "Object doesn't exist!\n");
999                         GOTO(out_child, rc = -ESTALE);
1000                 }
1001
1002                 ma->ma_valid = 0;
1003                 ma->ma_need = MA_INODE;
1004                 rc = mo_attr_get(info->mti_env, next, ma);
1005                 if (unlikely(rc != 0))
1006                         GOTO(out_child, rc);
1007
1008                 /* If the file has not been changed for some time, we return
1009                  * not only a LOOKUP lock, but also an UPDATE lock and this
1010                  * might save us RPC on later STAT. For directories, it also
1011                  * let negative dentry starts working for this dir. */
1012                 if (ma->ma_valid & MA_INODE &&
1013                     ma->ma_attr.la_valid & LA_CTIME &&
1014                     info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1015                     ma->ma_attr.la_ctime < cfs_time_current_sec())
1016                         child_bits |= MDS_INODELOCK_UPDATE;
1017
1018                 rc = mdt_object_lock(info, child, lhc, child_bits,
1019                                      MDT_CROSS_LOCK);
1020
1021                 if (unlikely(rc != 0))
1022                         GOTO(out_child, rc);
1023         }
1024
1025         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1026         /* Get MA_SOM attributes if update lock is given. */
1027         if (lock &&
1028             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1029             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1030                 ma_need = MA_SOM;
1031
1032         /* finally, we can get attr for child. */
1033         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1034         rc = mdt_getattr_internal(info, child, ma_need);
1035         if (unlikely(rc != 0)) {
1036                 mdt_object_unlock(info, child, lhc, 1);
1037         } else if (lock) {
1038                 /* Debugging code. */
1039                 res_id = &lock->l_resource->lr_name;
1040                 LDLM_DEBUG(lock, "Returning lock to client");
1041                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1042                                          &lock->l_resource->lr_name),
1043                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1044                          (unsigned long)res_id->name[0],
1045                          (unsigned long)res_id->name[1],
1046                          (unsigned long)res_id->name[2],
1047                          PFID(mdt_object_fid(child)));
1048                 mdt_pack_size2body(info, child);
1049         }
1050         if (lock)
1051                 LDLM_LOCK_PUT(lock);
1052
1053         EXIT;
1054 out_child:
1055         mdt_object_put(info->mti_env, child);
1056 out_parent:
1057         mdt_object_unlock(info, parent, lhp, 1);
1058         return rc;
1059 }
1060
1061 /* normal handler: should release the child lock */
1062 static int mdt_getattr_name(struct mdt_thread_info *info)
1063 {
1064         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1065         struct mdt_body        *reqbody;
1066         struct mdt_body        *repbody;
1067         int rc;
1068         ENTRY;
1069
1070         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1071         LASSERT(reqbody != NULL);
1072         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1073         LASSERT(repbody != NULL);
1074
1075         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1076         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1077         repbody->eadatasize = 0;
1078         repbody->aclsize = 0;
1079
1080         rc = mdt_init_ucred(info, reqbody);
1081         if (unlikely(rc))
1082                 GOTO(out_shrink, rc);
1083
1084         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1085         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1086                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1087                 lhc->mlh_reg_lh.cookie = 0;
1088         }
1089         mdt_exit_ucred(info);
1090         EXIT;
1091 out_shrink:
1092         mdt_shrink_reply(info);
1093         return rc;
1094 }
1095
1096 static const struct lu_device_operations mdt_lu_ops;
1097
1098 static int lu_device_is_mdt(struct lu_device *d)
1099 {
1100         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1101 }
1102
1103 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1104                          void *karg, void *uarg);
1105
1106 static int mdt_set_info(struct mdt_thread_info *info)
1107 {
1108         struct ptlrpc_request *req = mdt_info_req(info);
1109         char *key;
1110         void *val;
1111         int keylen, vallen, rc = 0;
1112         ENTRY;
1113
1114         rc = req_capsule_server_pack(info->mti_pill);
1115         if (rc)
1116                 RETURN(rc);
1117
1118         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1119         if (key == NULL) {
1120                 DEBUG_REQ(D_HA, req, "no set_info key");
1121                 RETURN(-EFAULT);
1122         }
1123
1124         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1125                                       RCL_CLIENT);
1126
1127         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1128         if (val == NULL) {
1129                 DEBUG_REQ(D_HA, req, "no set_info val");
1130                 RETURN(-EFAULT);
1131         }
1132
1133         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1134                                       RCL_CLIENT);
1135
1136         /* Swab any part of val you need to here */
1137         if (KEY_IS(KEY_READ_ONLY)) {
1138                 req->rq_status = 0;
1139                 lustre_msg_set_status(req->rq_repmsg, 0);
1140
1141                 cfs_spin_lock(&req->rq_export->exp_lock);
1142                 if (*(__u32 *)val)
1143                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1144                 else
1145                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1146                 cfs_spin_unlock(&req->rq_export->exp_lock);
1147
1148         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1149                 struct changelog_setinfo *cs =
1150                         (struct changelog_setinfo *)val;
1151                 if (vallen != sizeof(*cs)) {
1152                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1153                         RETURN(-EINVAL);
1154                 }
1155                 if (ptlrpc_req_need_swab(req)) {
1156                         __swab64s(&cs->cs_recno);
1157                         __swab32s(&cs->cs_id);
1158                 }
1159
1160                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1161                                    vallen, val, NULL);
1162                 lustre_msg_set_status(req->rq_repmsg, rc);
1163
1164         } else {
1165                 RETURN(-EINVAL);
1166         }
1167         RETURN(0);
1168 }
1169
1170 static int mdt_connect(struct mdt_thread_info *info)
1171 {
1172         int rc;
1173         struct ptlrpc_request *req;
1174
1175         req = mdt_info_req(info);
1176         rc = target_handle_connect(req);
1177         if (rc == 0) {
1178                 LASSERT(req->rq_export != NULL);
1179                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1180                 rc = mdt_init_sec_level(info);
1181                 if (rc == 0)
1182                         rc = mdt_init_idmap(info);
1183                 if (rc != 0)
1184                         obd_disconnect(class_export_get(req->rq_export));
1185         } else {
1186                 rc = err_serious(rc);
1187         }
1188         return rc;
1189 }
1190
1191 static int mdt_disconnect(struct mdt_thread_info *info)
1192 {
1193         int rc;
1194         ENTRY;
1195
1196         rc = target_handle_disconnect(mdt_info_req(info));
1197         if (rc)
1198                 rc = err_serious(rc);
1199         RETURN(rc);
1200 }
1201
1202 static int mdt_sendpage(struct mdt_thread_info *info,
1203                         struct lu_rdpg *rdpg)
1204 {
1205         struct ptlrpc_request   *req = mdt_info_req(info);
1206         struct obd_export       *exp = req->rq_export;
1207         struct ptlrpc_bulk_desc *desc;
1208         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1209         int                      tmpcount;
1210         int                      tmpsize;
1211         int                      timeout;
1212         int                      i;
1213         int                      rc;
1214         ENTRY;
1215
1216         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1217                                     MDS_BULK_PORTAL);
1218         if (desc == NULL)
1219                 RETURN(-ENOMEM);
1220
1221         for (i = 0, tmpcount = rdpg->rp_count;
1222                 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1223                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1224                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1225         }
1226
1227         LASSERT(desc->bd_nob == rdpg->rp_count);
1228         rc = sptlrpc_svc_wrap_bulk(req, desc);
1229         if (rc)
1230                 GOTO(free_desc, rc);
1231
1232         rc = ptlrpc_start_bulk_transfer(desc);
1233         if (rc)
1234                 GOTO(free_desc, rc);
1235
1236         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1237                 GOTO(abort_bulk, rc = 0);
1238
1239         do {
1240                 timeout = (int) req->rq_deadline - cfs_time_current_sec();
1241                 if (timeout < 0)
1242                         CERROR("Req deadline already passed %lu (now: %lu)\n",
1243                                req->rq_deadline, cfs_time_current_sec());
1244                 *lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(max(timeout, 1)),
1245                                             cfs_time_seconds(1), NULL, NULL);
1246                 rc = l_wait_event(desc->bd_waitq,
1247                                   !ptlrpc_server_bulk_active(desc) ||
1248                                   exp->exp_failed ||
1249                                   exp->exp_abort_active_req, lwi);
1250                 LASSERT (rc == 0 || rc == -ETIMEDOUT);
1251         } while ((rc == -ETIMEDOUT) &&
1252                  (req->rq_deadline > cfs_time_current_sec()));
1253
1254         if (rc == 0) {
1255                 if (desc->bd_success &&
1256                     desc->bd_nob_transferred == rdpg->rp_count)
1257                         GOTO(free_desc, rc);
1258
1259                 rc = -ETIMEDOUT;
1260                 if (exp->exp_abort_active_req || exp->exp_failed)
1261                         GOTO(abort_bulk, rc);
1262         }
1263
1264         DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1265                   (rc == -ETIMEDOUT) ? "timeout" : "network error",
1266                   desc->bd_nob_transferred, rdpg->rp_count,
1267                   exp->exp_client_uuid.uuid,
1268                   exp->exp_connection->c_remote_uuid.uuid);
1269
1270         class_fail_export(exp);
1271
1272         EXIT;
1273 abort_bulk:
1274         ptlrpc_abort_bulk(desc);
1275 free_desc:
1276         ptlrpc_free_bulk(desc);
1277         return rc;
1278 }
1279
1280 #ifdef HAVE_SPLIT_SUPPORT
1281 /*
1282  * Retrieve dir entry from the page and insert it to the slave object, actually,
1283  * this should be in osd layer, but since it will not in the final product, so
1284  * just do it here and do not define more moo api anymore for this.
1285  */
1286 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1287                               int size)
1288 {
1289         struct mdt_object *object = info->mti_object;
1290         struct lu_fid *lf = &info->mti_tmp_fid2;
1291         struct md_attr *ma = &info->mti_attr;
1292         struct lu_dirpage *dp;
1293         struct lu_dirent *ent;
1294         int rc = 0, offset = 0;
1295         ENTRY;
1296
1297         /* Make sure we have at least one entry. */
1298         if (size == 0)
1299                 RETURN(-EINVAL);
1300
1301         /*
1302          * Disable trans for this name insert, since it will include many trans
1303          * for this.
1304          */
1305         info->mti_no_need_trans = 1;
1306         /*
1307          * When write_dir_page, no need update parent's ctime,
1308          * and no permission check for name_insert.
1309          */
1310         ma->ma_attr.la_ctime = 0;
1311         ma->ma_attr.la_valid = LA_MODE;
1312         ma->ma_valid = MA_INODE;
1313
1314         cfs_kmap(page);
1315         dp = page_address(page);
1316         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1317
1318         for (ent = lu_dirent_start(dp); ent != NULL;
1319              ent = lu_dirent_next(ent)) {
1320                 struct lu_name *lname;
1321                 char *name;
1322
1323                 if (le16_to_cpu(ent->lde_namelen) == 0)
1324                         continue;
1325
1326                 fid_le_to_cpu(lf, &ent->lde_fid);
1327                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1328                         ma->ma_attr.la_mode = S_IFDIR;
1329                 else
1330                         ma->ma_attr.la_mode = 0;
1331                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1332                 if (name == NULL)
1333                         GOTO(out, rc = -ENOMEM);
1334
1335                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1336                 lname = mdt_name(info->mti_env, name,
1337                                  le16_to_cpu(ent->lde_namelen));
1338                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1339                 rc = mdo_name_insert(info->mti_env,
1340                                      md_object_next(&object->mot_obj),
1341                                      lname, lf, ma);
1342                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1343                 if (rc) {
1344                         CERROR("Can't insert %*.*s, rc %d\n",
1345                                le16_to_cpu(ent->lde_namelen),
1346                                le16_to_cpu(ent->lde_namelen),
1347                                ent->lde_name, rc);
1348                         GOTO(out, rc);
1349                 }
1350
1351                 offset += lu_dirent_size(ent);
1352                 if (offset >= size)
1353                         break;
1354         }
1355         EXIT;
1356 out:
1357         cfs_kunmap(page);
1358         return rc;
1359 }
1360
1361 static int mdt_bulk_timeout(void *data)
1362 {
1363         ENTRY;
1364
1365         CERROR("mdt bulk transfer timeout \n");
1366
1367         RETURN(1);
1368 }
1369
1370 static int mdt_writepage(struct mdt_thread_info *info)
1371 {
1372         struct ptlrpc_request   *req = mdt_info_req(info);
1373         struct mdt_body         *reqbody;
1374         struct l_wait_info      *lwi;
1375         struct ptlrpc_bulk_desc *desc;
1376         struct page             *page;
1377         int                rc;
1378         ENTRY;
1379
1380
1381         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1382         if (reqbody == NULL)
1383                 RETURN(err_serious(-EFAULT));
1384
1385         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1386         if (desc == NULL)
1387                 RETURN(err_serious(-ENOMEM));
1388
1389         /* allocate the page for the desc */
1390         page = cfs_alloc_page(CFS_ALLOC_STD);
1391         if (page == NULL)
1392                 GOTO(desc_cleanup, rc = -ENOMEM);
1393
1394         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1395                (int)reqbody->size, (int)reqbody->nlink);
1396
1397         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1398                               (int)reqbody->nlink);
1399
1400         rc = sptlrpc_svc_prep_bulk(req, desc);
1401         if (rc != 0)
1402                 GOTO(cleanup_page, rc);
1403         /*
1404          * Check if client was evicted while we were doing i/o before touching
1405          * network.
1406          */
1407         OBD_ALLOC_PTR(lwi);
1408         if (!lwi)
1409                 GOTO(cleanup_page, rc = -ENOMEM);
1410
1411         if (desc->bd_export->exp_failed)
1412                 rc = -ENOTCONN;
1413         else
1414                 rc = ptlrpc_start_bulk_transfer (desc);
1415         if (rc == 0) {
1416                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
1417                                             mdt_bulk_timeout, desc);
1418                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1419                                   desc->bd_export->exp_failed, lwi);
1420                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1421                 if (rc == -ETIMEDOUT) {
1422                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1423                         ptlrpc_abort_bulk(desc);
1424                 } else if (desc->bd_export->exp_failed) {
1425                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1426                         rc = -ENOTCONN;
1427                         ptlrpc_abort_bulk(desc);
1428                 } else if (!desc->bd_success ||
1429                            desc->bd_nob_transferred != desc->bd_nob) {
1430                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1431                                   desc->bd_success ?
1432                                   "truncated" : "network error on",
1433                                   desc->bd_nob_transferred, desc->bd_nob);
1434                         /* XXX should this be a different errno? */
1435                         rc = -ETIMEDOUT;
1436                 }
1437         } else {
1438                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1439         }
1440         if (rc)
1441                 GOTO(cleanup_lwi, rc);
1442         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1443
1444 cleanup_lwi:
1445         OBD_FREE_PTR(lwi);
1446 cleanup_page:
1447         cfs_free_page(page);
1448 desc_cleanup:
1449         ptlrpc_free_bulk(desc);
1450         RETURN(rc);
1451 }
1452 #endif
1453
1454 static int mdt_readpage(struct mdt_thread_info *info)
1455 {
1456         struct mdt_object *object = info->mti_object;
1457         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1458         struct mdt_body   *reqbody;
1459         struct mdt_body   *repbody;
1460         int                rc;
1461         int                i;
1462         ENTRY;
1463
1464         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1465                 RETURN(err_serious(-ENOMEM));
1466
1467         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1468         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1469         if (reqbody == NULL || repbody == NULL)
1470                 RETURN(err_serious(-EFAULT));
1471
1472         /*
1473          * prepare @rdpg before calling lower layers and transfer itself. Here
1474          * reqbody->size contains offset of where to start to read and
1475          * reqbody->nlink contains number bytes to read.
1476          */
1477         rdpg->rp_hash = reqbody->size;
1478         if (rdpg->rp_hash != reqbody->size) {
1479                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1480                        rdpg->rp_hash, reqbody->size);
1481                 RETURN(-EFAULT);
1482         }
1483
1484         rdpg->rp_attrs = reqbody->mode;
1485         rdpg->rp_count  = reqbody->nlink;
1486         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1487         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1488         if (rdpg->rp_pages == NULL)
1489                 RETURN(-ENOMEM);
1490
1491         for (i = 0; i < rdpg->rp_npages; ++i) {
1492                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1493                 if (rdpg->rp_pages[i] == NULL)
1494                         GOTO(free_rdpg, rc = -ENOMEM);
1495         }
1496
1497         /* call lower layers to fill allocated pages with directory data */
1498         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1499         if (rc)
1500                 GOTO(free_rdpg, rc);
1501
1502         /* send pages to client */
1503         rc = mdt_sendpage(info, rdpg);
1504
1505         EXIT;
1506 free_rdpg:
1507
1508         for (i = 0; i < rdpg->rp_npages; i++)
1509                 if (rdpg->rp_pages[i] != NULL)
1510                         cfs_free_page(rdpg->rp_pages[i]);
1511         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1512
1513         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1514                 RETURN(0);
1515
1516         return rc;
1517 }
1518
1519 static int mdt_reint_internal(struct mdt_thread_info *info,
1520                               struct mdt_lock_handle *lhc,
1521                               __u32 op)
1522 {
1523         struct req_capsule      *pill = info->mti_pill;
1524         struct mdt_device       *mdt = info->mti_mdt;
1525         struct md_quota         *mq = md_quota(info->mti_env);
1526         struct mdt_body         *repbody;
1527         int                      rc = 0;
1528         ENTRY;
1529
1530         /* pack reply */
1531         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1532                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1533                                      mdt->mdt_max_mdsize);
1534         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1535                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1536                                      mdt->mdt_max_cookiesize);
1537
1538         rc = req_capsule_server_pack(pill);
1539         if (rc != 0) {
1540                 CERROR("Can't pack response, rc %d\n", rc);
1541                 RETURN(err_serious(rc));
1542         }
1543
1544         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1545                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1546                 LASSERT(repbody);
1547                 repbody->eadatasize = 0;
1548                 repbody->aclsize = 0;
1549         }
1550
1551         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1552                 GOTO(out_shrink, rc = err_serious(-EFAULT));
1553
1554         rc = mdt_reint_unpack(info, op);
1555         if (rc != 0) {
1556                 CERROR("Can't unpack reint, rc %d\n", rc);
1557                 GOTO(out_shrink, rc = err_serious(rc));
1558         }
1559
1560         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1561
1562         /* for replay no cookkie / lmm need, because client have this already */
1563         if (info->mti_spec.no_create == 1)  {
1564                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1565                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1566
1567                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1568                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1569                                              0);
1570         }
1571
1572         rc = mdt_init_ucred_reint(info);
1573         if (rc)
1574                 GOTO(out_shrink, rc);
1575
1576         rc = mdt_fix_attr_ucred(info, op);
1577         if (rc != 0)
1578                 GOTO(out_ucred, rc = err_serious(rc));
1579
1580         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1581                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1582                 GOTO(out_ucred, rc);
1583         }
1584         mq->mq_exp = info->mti_exp;
1585         rc = mdt_reint_rec(info, lhc);
1586         EXIT;
1587 out_ucred:
1588         mdt_exit_ucred(info);
1589 out_shrink:
1590         mdt_shrink_reply(info);
1591         return rc;
1592 }
1593
1594 static long mdt_reint_opcode(struct mdt_thread_info *info,
1595                              const struct req_format **fmt)
1596 {
1597         struct mdt_rec_reint *rec;
1598         long opc;
1599
1600         opc = err_serious(-EFAULT);
1601         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1602         if (rec != NULL) {
1603                 opc = rec->rr_opcode;
1604                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1605                 if (opc < REINT_MAX && fmt[opc] != NULL)
1606                         req_capsule_extend(info->mti_pill, fmt[opc]);
1607                 else {
1608                         CERROR("Unsupported opc: %ld\n", opc);
1609                         opc = err_serious(opc);
1610                 }
1611         }
1612         return opc;
1613 }
1614
1615 static int mdt_reint(struct mdt_thread_info *info)
1616 {
1617         long opc;
1618         int  rc;
1619
1620         static const struct req_format *reint_fmts[REINT_MAX] = {
1621                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1622                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1623                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1624                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1625                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1626                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1627                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1628         };
1629
1630         ENTRY;
1631
1632         opc = mdt_reint_opcode(info, reint_fmts);
1633         if (opc >= 0) {
1634                 /*
1635                  * No lock possible here from client to pass it to reint code
1636                  * path.
1637                  */
1638                 rc = mdt_reint_internal(info, NULL, opc);
1639         } else {
1640                 rc = opc;
1641         }
1642
1643         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1644         RETURN(rc);
1645 }
1646
1647 /* this should sync the whole device */
1648 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1649 {
1650         struct dt_device *dt = mdt->mdt_bottom;
1651         int rc;
1652         ENTRY;
1653
1654         rc = dt->dd_ops->dt_sync(env, dt);
1655         RETURN(rc);
1656 }
1657
1658 /* this should sync this object */
1659 static int mdt_object_sync(struct mdt_thread_info *info)
1660 {
1661         struct md_object *next;
1662         int rc;
1663         ENTRY;
1664
1665         if (!mdt_object_exists(info->mti_object)) {
1666                 CWARN("Non existing object  "DFID"!\n",
1667                       PFID(mdt_object_fid(info->mti_object)));
1668                 RETURN(-ESTALE);
1669         }
1670         next = mdt_object_child(info->mti_object);
1671         rc = mo_object_sync(info->mti_env, next);
1672
1673         RETURN(rc);
1674 }
1675
1676 static int mdt_sync(struct mdt_thread_info *info)
1677 {
1678         struct req_capsule *pill = info->mti_pill;
1679         struct mdt_body *body;
1680         int rc;
1681         ENTRY;
1682
1683         /* The fid may be zero, so we req_capsule_set manually */
1684         req_capsule_set(pill, &RQF_MDS_SYNC);
1685
1686         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1687         if (body == NULL)
1688                 RETURN(err_serious(-EINVAL));
1689
1690         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1691                 RETURN(err_serious(-ENOMEM));
1692
1693         if (fid_seq(&body->fid1) == 0) {
1694                 /* sync the whole device */
1695                 rc = req_capsule_server_pack(pill);
1696                 if (rc == 0)
1697                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1698                 else
1699                         rc = err_serious(rc);
1700         } else {
1701                 /* sync an object */
1702                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1703                 if (rc == 0) {
1704                         rc = mdt_object_sync(info);
1705                         if (rc == 0) {
1706                                 struct md_object *next;
1707                                 const struct lu_fid *fid;
1708                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1709
1710                                 next = mdt_object_child(info->mti_object);
1711                                 info->mti_attr.ma_need = MA_INODE;
1712                                 info->mti_attr.ma_valid = 0;
1713                                 rc = mo_attr_get(info->mti_env, next,
1714                                                  &info->mti_attr);
1715                                 if (rc == 0) {
1716                                         body = req_capsule_server_get(pill,
1717                                                                 &RMF_MDT_BODY);
1718                                         fid = mdt_object_fid(info->mti_object);
1719                                         mdt_pack_attr2body(info, body, la, fid);
1720                                 }
1721                         }
1722                 } else
1723                         rc = err_serious(rc);
1724         }
1725         RETURN(rc);
1726 }
1727
1728 #ifdef HAVE_QUOTA_SUPPORT
1729 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1730 {
1731         struct obd_quotactl *oqctl;
1732         struct req_capsule *pill = info->mti_pill;
1733         struct obd_export *exp = info->mti_exp;
1734         struct md_quota *mq = md_quota(info->mti_env);
1735         struct md_device *next = info->mti_mdt->mdt_child;
1736         int rc;
1737         ENTRY;
1738
1739         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1740         if (oqctl == NULL)
1741                 RETURN(-EPROTO);
1742
1743         /* remote client has no permission for quotacheck */
1744         if (unlikely(exp_connect_rmtclient(exp)))
1745                 RETURN(-EPERM);
1746
1747         rc = req_capsule_server_pack(pill);
1748         if (rc)
1749                 RETURN(rc);
1750
1751         mq->mq_exp = exp;
1752         rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next,
1753                                                oqctl->qc_type);
1754         RETURN(rc);
1755 }
1756
1757 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1758 {
1759         struct obd_quotactl *oqctl, *repoqc;
1760         struct req_capsule *pill = info->mti_pill;
1761         struct obd_export *exp = info->mti_exp;
1762         struct md_quota *mq = md_quota(info->mti_env);
1763         struct md_device *next = info->mti_mdt->mdt_child;
1764         const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
1765         int id, rc;
1766         ENTRY;
1767
1768         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1769         if (oqctl == NULL)
1770                 RETURN(-EPROTO);
1771
1772         id = oqctl->qc_id;
1773         if (exp_connect_rmtclient(exp)) {
1774                 struct ptlrpc_request *req = mdt_info_req(info);
1775                 struct mdt_export_data *med = mdt_req2med(req);
1776                 struct lustre_idmap_table *idmap = med->med_idmap;
1777
1778                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1779                              oqctl->qc_cmd != Q_GETINFO))
1780                         RETURN(-EPERM);
1781
1782
1783                 if (oqctl->qc_type == USRQUOTA)
1784                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1785                                                      oqctl->qc_id);
1786                 else if (oqctl->qc_type == GRPQUOTA)
1787                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1788                                                      oqctl->qc_id);
1789                 else
1790                         RETURN(-EINVAL);
1791
1792                 if (id == CFS_IDMAP_NOTFOUND) {
1793                         CDEBUG(D_QUOTA, "no mapping for id %u\n",
1794                                oqctl->qc_id);
1795                         RETURN(-EACCES);
1796                 }
1797         }
1798
1799         rc = req_capsule_server_pack(pill);
1800         if (rc)
1801                 RETURN(rc);
1802
1803         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
1804         LASSERT(repoqc != NULL);
1805
1806         mq->mq_exp = exp;
1807         switch (oqctl->qc_cmd) {
1808         case Q_QUOTAON:
1809                 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
1810                 break;
1811         case Q_QUOTAOFF:
1812                 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type);
1813                 break;
1814         case Q_SETINFO:
1815                 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
1816                                       &oqctl->qc_dqinfo);
1817                 break;
1818         case Q_GETINFO:
1819                 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
1820                                       &oqctl->qc_dqinfo);
1821                 break;
1822         case Q_SETQUOTA:
1823                 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
1824                                        &oqctl->qc_dqblk);
1825                 break;
1826         case Q_GETQUOTA:
1827                 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
1828                                        &oqctl->qc_dqblk);
1829                 break;
1830         case Q_GETOINFO:
1831                 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
1832                                        &oqctl->qc_dqinfo);
1833                 break;
1834         case Q_GETOQUOTA:
1835                 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
1836                                         &oqctl->qc_dqblk);
1837                 break;
1838         case LUSTRE_Q_INVALIDATE:
1839                 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
1840                 break;
1841         case LUSTRE_Q_FINVALIDATE:
1842                 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
1843                 break;
1844         default:
1845                 CERROR("unsupported mdt_quotactl command: %d\n",
1846                        oqctl->qc_cmd);
1847                 RETURN(-EFAULT);
1848         }
1849
1850         *repoqc = *oqctl;
1851         RETURN(rc);
1852 }
1853 #endif
1854
1855
1856 /*
1857  * OBD PING and other handlers.
1858  */
1859 static int mdt_obd_ping(struct mdt_thread_info *info)
1860 {
1861         int rc;
1862         ENTRY;
1863
1864         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1865
1866         rc = target_handle_ping(mdt_info_req(info));
1867         if (rc < 0)
1868                 rc = err_serious(rc);
1869         RETURN(rc);
1870 }
1871
1872 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1873 {
1874         return err_serious(-EOPNOTSUPP);
1875 }
1876
1877 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1878 {
1879         return err_serious(-EOPNOTSUPP);
1880 }
1881
1882
1883 /*
1884  * LLOG handlers.
1885  */
1886
1887 /** clone llog ctxt from child (mdd)
1888  * This allows remote llog (replicator) access.
1889  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
1890  * context was originally set up, or we can handle them directly.
1891  * I choose the latter, but that means I need any llog
1892  * contexts set up by child to be accessable by the mdt.  So we clone the
1893  * context into our context list here.
1894  */
1895 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
1896                                int idx)
1897 {
1898         struct md_device  *next = mdt->mdt_child;
1899         struct llog_ctxt *ctxt;
1900         int rc;
1901
1902         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
1903                 return 0;
1904
1905         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
1906         if (rc || ctxt == NULL) {
1907                 CERROR("Can't get mdd ctxt %d\n", rc);
1908                 return rc;
1909         }
1910
1911         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
1912         if (rc)
1913                 CERROR("Can't set mdt ctxt %d\n", rc);
1914
1915         return rc;
1916 }
1917
1918 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
1919                                  struct mdt_device *mdt, int idx)
1920 {
1921         struct llog_ctxt *ctxt;
1922
1923         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
1924         if (ctxt == NULL)
1925                 return 0;
1926         /* Put once for the get we just did, and once for the clone */
1927         llog_ctxt_put(ctxt);
1928         llog_ctxt_put(ctxt);
1929         return 0;
1930 }
1931
1932 static int mdt_llog_create(struct mdt_thread_info *info)
1933 {
1934         int rc;
1935
1936         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1937         rc = llog_origin_handle_create(mdt_info_req(info));
1938         return (rc < 0 ? err_serious(rc) : rc);
1939 }
1940
1941 static int mdt_llog_destroy(struct mdt_thread_info *info)
1942 {
1943         int rc;
1944
1945         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
1946         rc = llog_origin_handle_destroy(mdt_info_req(info));
1947         return (rc < 0 ? err_serious(rc) : rc);
1948 }
1949
1950 static int mdt_llog_read_header(struct mdt_thread_info *info)
1951 {
1952         int rc;
1953
1954         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1955         rc = llog_origin_handle_read_header(mdt_info_req(info));
1956         return (rc < 0 ? err_serious(rc) : rc);
1957 }
1958
1959 static int mdt_llog_next_block(struct mdt_thread_info *info)
1960 {
1961         int rc;
1962
1963         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1964         rc = llog_origin_handle_next_block(mdt_info_req(info));
1965         return (rc < 0 ? err_serious(rc) : rc);
1966 }
1967
1968 static int mdt_llog_prev_block(struct mdt_thread_info *info)
1969 {
1970         int rc;
1971
1972         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
1973         rc = llog_origin_handle_prev_block(mdt_info_req(info));
1974         return (rc < 0 ? err_serious(rc) : rc);
1975 }
1976
1977
1978 /*
1979  * DLM handlers.
1980  */
1981 static struct ldlm_callback_suite cbs = {
1982         .lcs_completion = ldlm_server_completion_ast,
1983         .lcs_blocking   = ldlm_server_blocking_ast,
1984         .lcs_glimpse    = NULL
1985 };
1986
1987 static int mdt_enqueue(struct mdt_thread_info *info)
1988 {
1989         struct ptlrpc_request *req;
1990         int rc;
1991
1992         /*
1993          * info->mti_dlm_req already contains swapped and (if necessary)
1994          * converted dlm request.
1995          */
1996         LASSERT(info->mti_dlm_req != NULL);
1997
1998         req = mdt_info_req(info);
1999         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2000                                   req, info->mti_dlm_req, &cbs);
2001         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2002         return rc ? err_serious(rc) : req->rq_status;
2003 }
2004
2005 static int mdt_convert(struct mdt_thread_info *info)
2006 {
2007         int rc;
2008         struct ptlrpc_request *req;
2009
2010         LASSERT(info->mti_dlm_req);
2011         req = mdt_info_req(info);
2012         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2013         return rc ? err_serious(rc) : req->rq_status;
2014 }
2015
2016 static int mdt_bl_callback(struct mdt_thread_info *info)
2017 {
2018         CERROR("bl callbacks should not happen on MDS\n");
2019         LBUG();
2020         return err_serious(-EOPNOTSUPP);
2021 }
2022
2023 static int mdt_cp_callback(struct mdt_thread_info *info)
2024 {
2025         CERROR("cp callbacks should not happen on MDS\n");
2026         LBUG();
2027         return err_serious(-EOPNOTSUPP);
2028 }
2029
2030 /*
2031  * sec context handlers
2032  */
2033 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2034 {
2035         int rc;
2036
2037         rc = mdt_handle_idmap(info);
2038
2039         if (unlikely(rc)) {
2040                 struct ptlrpc_request *req = mdt_info_req(info);
2041                 __u32                  opc;
2042
2043                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2044                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2045                         sptlrpc_svc_ctx_invalidate(req);
2046         }
2047
2048         OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
2049
2050         return rc;
2051 }
2052
2053 static struct mdt_object *mdt_obj(struct lu_object *o)
2054 {
2055         LASSERT(lu_device_is_mdt(o->lo_dev));
2056         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2057 }
2058
2059 struct mdt_object *mdt_object_find(const struct lu_env *env,
2060                                    struct mdt_device *d,
2061                                    const struct lu_fid *f)
2062 {
2063         struct lu_object *o;
2064         struct mdt_object *m;
2065         ENTRY;
2066
2067         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2068         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2069         if (unlikely(IS_ERR(o)))
2070                 m = (struct mdt_object *)o;
2071         else
2072                 m = mdt_obj(o);
2073         RETURN(m);
2074 }
2075
2076 /**
2077  * Asyncronous commit for mdt device.
2078  *
2079  * Pass asynchonous commit call down the MDS stack.
2080  *
2081  * \param env environment
2082  * \param mdt the mdt device
2083  */
2084 static void mdt_device_commit_async(const struct lu_env *env,
2085                                     struct mdt_device *mdt)
2086 {
2087         struct dt_device *dt = mdt->mdt_bottom;
2088         int rc;
2089
2090         rc = dt->dd_ops->dt_commit_async(env, dt);
2091         if (unlikely(rc != 0))
2092                 CWARN("async commit start failed with rc = %d", rc);
2093 }
2094
2095 /**
2096  * Mark the lock as "synchonous".
2097  *
2098  * Mark the lock to deffer transaction commit to the unlock time.
2099  *
2100  * \param lock the lock to mark as "synchonous"
2101  *
2102  * \see mdt_is_lock_sync
2103  * \see mdt_save_lock
2104  */
2105 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2106 {
2107         lock->l_ast_data = (void*)1;
2108 }
2109
2110 /**
2111  * Check whehter the lock "synchonous" or not.
2112  *
2113  * \param lock the lock to check
2114  * \retval 1 the lock is "synchonous"
2115  * \retval 0 the lock isn't "synchronous"
2116  *
2117  * \see mdt_set_lock_sync
2118  * \see mdt_save_lock
2119  */
2120 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2121 {
2122         return lock->l_ast_data != NULL;
2123 }
2124
2125 /**
2126  * Blocking AST for mdt locks.
2127  *
2128  * Starts transaction commit if in case of COS lock conflict or
2129  * deffers such a commit to the mdt_save_lock.
2130  *
2131  * \param lock the lock which blocks a request or cancelling lock
2132  * \param desc unused
2133  * \param data unused
2134  * \param flag indicates whether this cancelling or blocking callback
2135  * \retval 0
2136  * \see ldlm_blocking_ast_nocheck
2137  */
2138 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2139                      void *data, int flag)
2140 {
2141         struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
2142         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2143         int rc;
2144         ENTRY;
2145
2146         if (flag == LDLM_CB_CANCELING)
2147                 RETURN(0);
2148         lock_res_and_lock(lock);
2149         if (lock->l_blocking_ast != mdt_blocking_ast) {
2150                 unlock_res_and_lock(lock);
2151                 RETURN(0);
2152         }
2153         if (mdt_cos_is_enabled(mdt) &&
2154             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2155             lock->l_blocking_lock != NULL &&
2156             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2157                 mdt_set_lock_sync(lock);
2158         }
2159         rc = ldlm_blocking_ast_nocheck(lock);
2160
2161         /* There is no lock conflict if l_blocking_lock == NULL,
2162          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2163          * when the last reference to a local lock was released */
2164         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2165                 struct lu_env env;
2166
2167                 rc = lu_env_init(&env, LCT_MD_THREAD);
2168                 if (unlikely(rc != 0))
2169                         CWARN("lu_env initialization failed with rc = %d,"
2170                               "cannot start asynchronous commit\n", rc);
2171                 else
2172                         mdt_device_commit_async(&env, mdt);
2173                 lu_env_fini(&env);
2174         }
2175         RETURN(rc);
2176 }
2177
2178 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2179                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2180 {
2181         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2182         ldlm_policy_data_t *policy = &info->mti_policy;
2183         struct ldlm_res_id *res_id = &info->mti_res_id;
2184         int rc;
2185         ENTRY;
2186
2187         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2188         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2189         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2190         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2191
2192         if (mdt_object_exists(o) < 0) {
2193                 if (locality == MDT_CROSS_LOCK) {
2194                         /* cross-ref object fix */
2195                         ibits &= ~MDS_INODELOCK_UPDATE;
2196                         ibits |= MDS_INODELOCK_LOOKUP;
2197                 } else {
2198                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2199                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2200                 }
2201                 /* No PDO lock on remote object */
2202                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2203         }
2204
2205         if (lh->mlh_type == MDT_PDO_LOCK) {
2206                 /* check for exists after object is locked */
2207                 if (mdt_object_exists(o) == 0) {
2208                         /* Non-existent object shouldn't have PDO lock */
2209                         RETURN(-ESTALE);
2210                 } else {
2211                         /* Non-dir object shouldn't have PDO lock */
2212                         LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
2213                 }
2214         }
2215
2216         memset(policy, 0, sizeof(*policy));
2217         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2218
2219         /*
2220          * Take PDO lock on whole directory and build correct @res_id for lock
2221          * on part of directory.
2222          */
2223         if (lh->mlh_pdo_hash != 0) {
2224                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2225                 mdt_lock_pdo_mode(info, o, lh);
2226                 if (lh->mlh_pdo_mode != LCK_NL) {
2227                         /*
2228                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2229                          * is never going to be sent to client and we do not
2230                          * want it slowed down due to possible cancels.
2231                          */
2232                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2233                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2234                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2235                                           &info->mti_exp->exp_handle.h_cookie);
2236                         if (unlikely(rc))
2237                                 RETURN(rc);
2238                 }
2239
2240                 /*
2241                  * Finish res_id initializing by name hash marking part of
2242                  * directory which is taking modification.
2243                  */
2244                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2245         }
2246
2247         policy->l_inodebits.bits = ibits;
2248
2249         /*
2250          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2251          * going to be sent to client. If it is - mdt_intent_policy() path will
2252          * fix it up and turn FL_LOCAL flag off.
2253          */
2254         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2255                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2256                           &info->mti_exp->exp_handle.h_cookie);
2257         if (rc)
2258                 mdt_object_unlock(info, o, lh, 1);
2259         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2260                  lh->mlh_pdo_hash != 0 &&
2261                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2262                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 10);
2263         }
2264
2265         RETURN(rc);
2266 }
2267
2268 /**
2269  * Save a lock within request object.
2270  *
2271  * Keep the lock referenced until whether client ACK or transaction
2272  * commit happens or release the lock immediately depending on input
2273  * parameters. If COS is ON, a write lock is converted to COS lock
2274  * before saving.
2275  *
2276  * \param info thead info object
2277  * \param h lock handle
2278  * \param mode lock mode
2279  * \param decref force immediate lock releasing
2280  */
2281 static
2282 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2283                    ldlm_mode_t mode, int decref)
2284 {
2285         ENTRY;
2286
2287         if (lustre_handle_is_used(h)) {
2288                 if (decref || !info->mti_has_trans ||
2289                     !(mode & (LCK_PW | LCK_EX))){
2290                         mdt_fid_unlock(h, mode);
2291                 } else {
2292                         struct mdt_device *mdt = info->mti_mdt;
2293                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2294                         struct ptlrpc_request *req = mdt_info_req(info);
2295                         int no_ack = 0;
2296
2297                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2298                                  h->cookie);
2299                         CDEBUG(D_HA, "request = %p reply state = %p"
2300                                " transno = "LPD64"\n",
2301                                req, req->rq_reply_state, req->rq_transno);
2302                         if (mdt_cos_is_enabled(mdt)) {
2303                                 no_ack = 1;
2304                                 ldlm_lock_downgrade(lock, LCK_COS);
2305                                 mode = LCK_COS;
2306                         }
2307                         ptlrpc_save_lock(req, h, mode, no_ack);
2308                         if (mdt_is_lock_sync(lock)) {
2309                                 CDEBUG(D_HA, "found sync-lock,"
2310                                        " async commit started\n");
2311                                 mdt_device_commit_async(info->mti_env,
2312                                                         mdt);
2313                         }
2314                         LDLM_LOCK_PUT(lock);
2315                 }
2316                 h->cookie = 0ull;
2317         }
2318
2319         EXIT;
2320 }
2321
2322 /**
2323  * Unlock mdt object.
2324  *
2325  * Immeditely release the regular lock and the PDO lock or save the
2326  * lock in reqeuest and keep them referenced until client ACK or
2327  * transaction commit.
2328  *
2329  * \param info thread info object
2330  * \param o mdt object
2331  * \param lh mdt lock handle referencing regular and PDO locks
2332  * \param decref force immediate lock releasing
2333  */
2334 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2335                        struct mdt_lock_handle *lh, int decref)
2336 {
2337         ENTRY;
2338
2339         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2340         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2341
2342         EXIT;
2343 }
2344
2345 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2346                                         const struct lu_fid *f,
2347                                         struct mdt_lock_handle *lh,
2348                                         __u64 ibits)
2349 {
2350         struct mdt_object *o;
2351
2352         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2353         if (!IS_ERR(o)) {
2354                 int rc;
2355
2356                 rc = mdt_object_lock(info, o, lh, ibits,
2357                                      MDT_LOCAL_LOCK);
2358                 if (rc != 0) {
2359                         mdt_object_put(info->mti_env, o);
2360                         o = ERR_PTR(rc);
2361                 }
2362         }
2363         return o;
2364 }
2365
2366 void mdt_object_unlock_put(struct mdt_thread_info * info,
2367                            struct mdt_object * o,
2368                            struct mdt_lock_handle *lh,
2369                            int decref)
2370 {
2371         mdt_object_unlock(info, o, lh, decref);
2372         mdt_object_put(info->mti_env, o);
2373 }
2374
2375 static struct mdt_handler *mdt_handler_find(__u32 opc,
2376                                             struct mdt_opc_slice *supported)
2377 {
2378         struct mdt_opc_slice *s;
2379         struct mdt_handler   *h;
2380
2381         h = NULL;
2382         for (s = supported; s->mos_hs != NULL; s++) {
2383                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2384                         h = s->mos_hs + (opc - s->mos_opc_start);
2385                         if (likely(h->mh_opc != 0))
2386                                 LASSERTF(h->mh_opc == opc,
2387                                          "opcode mismatch %d != %d\n",
2388                                          h->mh_opc, opc);
2389                         else
2390                                 h = NULL; /* unsupported opc */
2391                         break;
2392                 }
2393         }
2394         return h;
2395 }
2396
2397 static int mdt_lock_resname_compat(struct mdt_device *m,
2398                                    struct ldlm_request *req)
2399 {
2400         /* XXX something... later. */
2401         return 0;
2402 }
2403
2404 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2405 {
2406         /* XXX something... later. */
2407         return 0;
2408 }
2409
2410 /*
2411  * Generic code handling requests that have struct mdt_body passed in:
2412  *
2413  *  - extract mdt_body from request and save it in @info, if present;
2414  *
2415  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2416  *  @info;
2417  *
2418  *  - if HABEO_CORPUS flag is set for this request type check whether object
2419  *  actually exists on storage (lu_object_exists()).
2420  *
2421  */
2422 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2423 {
2424         const struct mdt_body    *body;
2425         struct mdt_object        *obj;
2426         const struct lu_env      *env;
2427         struct req_capsule       *pill;
2428         int                       rc;
2429         ENTRY;
2430
2431         env = info->mti_env;
2432         pill = info->mti_pill;
2433
2434         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2435         if (body == NULL)
2436                 RETURN(-EFAULT);
2437
2438         if (!(body->valid & OBD_MD_FLID))
2439                 RETURN(0);
2440
2441         if (!fid_is_sane(&body->fid1)) {
2442                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2443                 RETURN(-EINVAL);
2444         }
2445
2446         /*
2447          * Do not get size or any capa fields before we check that request
2448          * contains capa actually. There are some requests which do not, for
2449          * instance MDS_IS_SUBDIR.
2450          */
2451         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2452             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2453                 mdt_set_capainfo(info, 0, &body->fid1,
2454                                  req_capsule_client_get(pill, &RMF_CAPA1));
2455
2456         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2457         if (!IS_ERR(obj)) {
2458                 if ((flags & HABEO_CORPUS) &&
2459                     !mdt_object_exists(obj)) {
2460                         mdt_object_put(env, obj);
2461                         /* for capability renew ENOENT will be handled in
2462                          * mdt_renew_capa */
2463                         if (body->valid & OBD_MD_FLOSSCAPA)
2464                                 rc = 0;
2465                         else
2466                                 rc = -ENOENT;
2467                 } else {
2468                         info->mti_object = obj;
2469                         rc = 0;
2470                 }
2471         } else
2472                 rc = PTR_ERR(obj);
2473
2474         RETURN(rc);
2475 }
2476
2477 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2478 {
2479         struct req_capsule *pill = info->mti_pill;
2480         int rc;
2481         ENTRY;
2482
2483         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2484                 rc = mdt_body_unpack(info, flags);
2485         else
2486                 rc = 0;
2487
2488         if (rc == 0 && (flags & HABEO_REFERO)) {
2489                 struct mdt_device *mdt = info->mti_mdt;
2490
2491                 /* Pack reply. */
2492
2493                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2494                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2495                                              mdt->mdt_max_mdsize);
2496                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2497                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2498                                              mdt->mdt_max_cookiesize);
2499
2500                 rc = req_capsule_server_pack(pill);
2501         }
2502         RETURN(rc);
2503 }
2504
2505 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2506 {
2507         struct md_device *next = m->mdt_child;
2508
2509         return next->md_ops->mdo_init_capa_ctxt(env, next,
2510                                                 m->mdt_opts.mo_mds_capa,
2511                                                 m->mdt_capa_timeout,
2512                                                 m->mdt_capa_alg,
2513                                                 m->mdt_capa_keys);
2514 }
2515
2516 /*
2517  * Invoke handler for this request opc. Also do necessary preprocessing
2518  * (according to handler ->mh_flags), and post-processing (setting of
2519  * ->last_{xid,committed}).
2520  */
2521 static int mdt_req_handle(struct mdt_thread_info *info,
2522                           struct mdt_handler *h, struct ptlrpc_request *req)
2523 {
2524         int   rc, serious = 0;
2525         __u32 flags;
2526
2527         ENTRY;
2528
2529         LASSERT(h->mh_act != NULL);
2530         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2531         LASSERT(current->journal_info == NULL);
2532
2533         /*
2534          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2535          * to put same checks into handlers like mdt_close(), mdt_reint(),
2536          * etc., without talking to mdt authors first. Checking same thing
2537          * there again is useless and returning 0 error without packing reply
2538          * is buggy! Handlers either pack reply or return error.
2539          *
2540          * We return 0 here and do not send any reply in order to emulate
2541          * network failure. Do not send any reply in case any of NET related
2542          * fail_id has occured.
2543          */
2544         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2545                 RETURN(0);
2546
2547         rc = 0;
2548         flags = h->mh_flags;
2549         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2550
2551         if (h->mh_fmt != NULL) {
2552                 req_capsule_set(info->mti_pill, h->mh_fmt);
2553                 rc = mdt_unpack_req_pack_rep(info, flags);
2554         }
2555
2556         if (rc == 0 && flags & MUTABOR &&
2557             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2558                 /* should it be rq_status? */
2559                 rc = -EROFS;
2560
2561         if (rc == 0 && flags & HABEO_CLAVIS) {
2562                 struct ldlm_request *dlm_req;
2563
2564                 LASSERT(h->mh_fmt != NULL);
2565
2566                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2567                 if (dlm_req != NULL) {
2568                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2569                                         LDLM_IBITS &&
2570                                      dlm_req->lock_desc.l_policy_data.\
2571                                         l_inodebits.bits == 0)) {
2572                                 /*
2573                                  * Lock without inodebits makes no sense and
2574                                  * will oops later in ldlm. If client miss to
2575                                  * set such bits, do not trigger ASSERTION.
2576                                  *
2577                                  * For liblustre flock case, it maybe zero.
2578                                  */
2579                                 rc = -EPROTO;
2580                         } else {
2581                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2582                                         rc = mdt_lock_resname_compat(
2583                                                                 info->mti_mdt,
2584                                                                 dlm_req);
2585                                 info->mti_dlm_req = dlm_req;
2586                         }
2587                 } else {
2588                         rc = -EFAULT;
2589                 }
2590         }
2591
2592         /* capability setting changed via /proc, needs reinitialize ctxt */
2593         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2594                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2595                 info->mti_mdt->mdt_capa_conf = 0;
2596         }
2597
2598         if (likely(rc == 0)) {
2599                 /*
2600                  * Process request, there can be two types of rc:
2601                  * 1) errors with msg unpack/pack, other failures outside the
2602                  * operation itself. This is counted as serious errors;
2603                  * 2) errors during fs operation, should be placed in rq_status
2604                  * only
2605                  */
2606                 rc = h->mh_act(info);
2607                 if (rc == 0 &&
2608                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2609                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2610                                   "pack reply and returned 0 error\n",
2611                                   h->mh_name);
2612                         LBUG();
2613                 }
2614                 serious = is_serious(rc);
2615                 rc = clear_serious(rc);
2616         } else
2617                 serious = 1;
2618
2619         req->rq_status = rc;
2620
2621         /*
2622          * ELDLM_* codes which > 0 should be in rq_status only as well as
2623          * all non-serious errors.
2624          */
2625         if (rc > 0 || !serious)
2626                 rc = 0;
2627
2628         LASSERT(current->journal_info == NULL);
2629
2630         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2631             info->mti_mdt->mdt_opts.mo_compat_resname) {
2632                 struct ldlm_reply *dlmrep;
2633
2634                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2635                 if (dlmrep != NULL)
2636                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2637         }
2638
2639         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2640         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
2641                 target_committed_to_req(req);
2642
2643         if (unlikely(req_is_replay(req) &&
2644                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2645                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2646                 LBUG();
2647         }
2648
2649         target_send_reply(req, rc, info->mti_fail_id);
2650         RETURN(0);
2651 }
2652
2653 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2654 {
2655         lh->mlh_type = MDT_NUL_LOCK;
2656         lh->mlh_reg_lh.cookie = 0ull;
2657         lh->mlh_reg_mode = LCK_MINMODE;
2658         lh->mlh_pdo_lh.cookie = 0ull;
2659         lh->mlh_pdo_mode = LCK_MINMODE;
2660 }
2661
2662 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2663 {
2664         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2665         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2666 }
2667
2668 /*
2669  * Initialize fields of struct mdt_thread_info. Other fields are left in
2670  * uninitialized state, because it's too expensive to zero out whole
2671  * mdt_thread_info (> 1K) on each request arrival.
2672  */
2673 static void mdt_thread_info_init(struct ptlrpc_request *req,
2674                                  struct mdt_thread_info *info)
2675 {
2676         int i;
2677         struct md_capainfo *ci;
2678
2679         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2680         info->mti_pill = &req->rq_pill;
2681
2682         /* lock handle */
2683         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2684                 mdt_lock_handle_init(&info->mti_lh[i]);
2685
2686         /* mdt device: it can be NULL while CONNECT */
2687         if (req->rq_export) {
2688                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2689                 info->mti_exp = req->rq_export;
2690         } else
2691                 info->mti_mdt = NULL;
2692         info->mti_env = req->rq_svc_thread->t_env;
2693         ci = md_capainfo(info->mti_env);
2694         memset(ci, 0, sizeof *ci);
2695         if (req->rq_export) {
2696                 if (exp_connect_rmtclient(req->rq_export))
2697                         ci->mc_auth = LC_ID_CONVERT;
2698                 else if (req->rq_export->exp_connect_flags &
2699                          OBD_CONNECT_MDS_CAPA)
2700                         ci->mc_auth = LC_ID_PLAIN;
2701                 else
2702                         ci->mc_auth = LC_ID_NONE;
2703         }
2704
2705         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2706         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2707         info->mti_mos = NULL;
2708
2709         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2710         info->mti_body = NULL;
2711         info->mti_object = NULL;
2712         info->mti_dlm_req = NULL;
2713         info->mti_has_trans = 0;
2714         info->mti_no_need_trans = 0;
2715         info->mti_cross_ref = 0;
2716         info->mti_opdata = 0;
2717
2718         /* To not check for split by default. */
2719         info->mti_spec.sp_ck_split = 0;
2720         info->mti_spec.no_create = 0;
2721 }
2722
2723 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2724 {
2725         int i;
2726
2727         req_capsule_fini(info->mti_pill);
2728         if (info->mti_object != NULL) {
2729                 /*
2730                  * freeing an object may lead to OSD level transaction, do not
2731                  * let it mess with MDT. bz19385.
2732                  */
2733                 info->mti_no_need_trans = 1;
2734                 mdt_object_put(info->mti_env, info->mti_object);
2735                 info->mti_object = NULL;
2736         }
2737         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2738                 mdt_lock_handle_fini(&info->mti_lh[i]);
2739         info->mti_env = NULL;
2740 }
2741
2742 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
2743                                        struct obd_device *obd, int *process)
2744 {
2745         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2746         case MDS_CONNECT: /* This will never get here, but for completeness. */
2747         case OST_CONNECT: /* This will never get here, but for completeness. */
2748         case MDS_DISCONNECT:
2749         case OST_DISCONNECT:
2750                *process = 1;
2751                RETURN(0);
2752
2753         case MDS_CLOSE:
2754         case MDS_DONE_WRITING:
2755         case MDS_SYNC: /* used in unmounting */
2756         case OBD_PING:
2757         case MDS_REINT:
2758         case SEQ_QUERY:
2759         case FLD_QUERY:
2760         case LDLM_ENQUEUE:
2761                 *process = target_queue_recovery_request(req, obd);
2762                 RETURN(0);
2763
2764         default:
2765                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2766                 *process = -EAGAIN;
2767                 RETURN(0);
2768         }
2769 }
2770
2771 /*
2772  * Handle recovery. Return:
2773  *        +1: continue request processing;
2774  *       -ve: abort immediately with the given error code;
2775  *         0: send reply with error code in req->rq_status;
2776  */
2777 static int mdt_recovery(struct mdt_thread_info *info)
2778 {
2779         struct ptlrpc_request *req = mdt_info_req(info);
2780         struct obd_device *obd;
2781
2782         ENTRY;
2783
2784         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2785         case MDS_CONNECT:
2786         case SEC_CTX_INIT:
2787         case SEC_CTX_INIT_CONT:
2788         case SEC_CTX_FINI:
2789                 {
2790 #if 0
2791                         int rc;
2792
2793                         rc = mdt_handle_idmap(info);
2794                         if (rc)
2795                                 RETURN(rc);
2796                         else
2797 #endif
2798                                 RETURN(+1);
2799                 }
2800         }
2801
2802         if (unlikely(!class_connected_export(req->rq_export))) {
2803                 CERROR("operation %d on unconnected MDS from %s\n",
2804                        lustre_msg_get_opc(req->rq_reqmsg),
2805                        libcfs_id2str(req->rq_peer));
2806                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2807                  * mds_A will get -ENOTCONN(especially for ping req),
2808                  * which will cause that mds_A deactive timeout, then when
2809                  * mds_A cleanup, the cleanup process will be suspended since
2810                  * deactive timeout is not zero.
2811                  */
2812                 req->rq_status = -ENOTCONN;
2813                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2814                 RETURN(0);
2815         }
2816
2817         /* sanity check: if the xid matches, the request must be marked as a
2818          * resent or replayed */
2819         if (req_xid_is_last(req)) {
2820                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2821                       (MSG_RESENT | MSG_REPLAY))) {
2822                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2823                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2824                                   lustre_msg_get_flags(req->rq_reqmsg));
2825                         LBUG();
2826                         req->rq_status = -ENOTCONN;
2827                         RETURN(-ENOTCONN);
2828                 }
2829         }
2830
2831         /* else: note the opposite is not always true; a RESENT req after a
2832          * failover will usually not match the last_xid, since it was likely
2833          * never committed. A REPLAYed request will almost never match the
2834          * last xid, however it could for a committed, but still retained,
2835          * open. */
2836
2837         obd = req->rq_export->exp_obd;
2838
2839         /* Check for aborted recovery... */
2840         if (unlikely(obd->obd_recovering)) {
2841                 int rc;
2842                 int should_process;
2843                 DEBUG_REQ(D_INFO, req, "Got new replay");
2844                 rc = mdt_filter_recovery_request(req, obd, &should_process);
2845                 if (rc != 0 || !should_process)
2846                         RETURN(rc);
2847                 else if (should_process < 0) {
2848                         req->rq_status = should_process;
2849                         rc = ptlrpc_error(req);
2850                         RETURN(rc);
2851                 }
2852         }
2853         RETURN(+1);
2854 }
2855
2856 static int mdt_msg_check_version(struct lustre_msg *msg)
2857 {
2858         int rc;
2859
2860         switch (lustre_msg_get_opc(msg)) {
2861         case MDS_CONNECT:
2862         case MDS_DISCONNECT:
2863         case OBD_PING:
2864         case SEC_CTX_INIT:
2865         case SEC_CTX_INIT_CONT:
2866         case SEC_CTX_FINI:
2867                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2868                 if (rc)
2869                         CERROR("bad opc %u version %08x, expecting %08x\n",
2870                                lustre_msg_get_opc(msg),
2871                                lustre_msg_get_version(msg),
2872                                LUSTRE_OBD_VERSION);
2873                 break;
2874         case MDS_GETSTATUS:
2875         case MDS_GETATTR:
2876         case MDS_GETATTR_NAME:
2877         case MDS_STATFS:
2878         case MDS_READPAGE:
2879         case MDS_WRITEPAGE:
2880         case MDS_IS_SUBDIR:
2881         case MDS_REINT:
2882         case MDS_CLOSE:
2883         case MDS_DONE_WRITING:
2884         case MDS_PIN:
2885         case MDS_SYNC:
2886         case MDS_GETXATTR:
2887         case MDS_SETXATTR:
2888         case MDS_SET_INFO:
2889         case MDS_GET_INFO:
2890         case MDS_QUOTACHECK:
2891         case MDS_QUOTACTL:
2892         case QUOTA_DQACQ:
2893         case QUOTA_DQREL:
2894         case SEQ_QUERY:
2895         case FLD_QUERY:
2896                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2897                 if (rc)
2898                         CERROR("bad opc %u version %08x, expecting %08x\n",
2899                                lustre_msg_get_opc(msg),
2900                                lustre_msg_get_version(msg),
2901                                LUSTRE_MDS_VERSION);
2902                 break;
2903         case LDLM_ENQUEUE:
2904         case LDLM_CONVERT:
2905         case LDLM_BL_CALLBACK:
2906         case LDLM_CP_CALLBACK:
2907                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2908                 if (rc)
2909                         CERROR("bad opc %u version %08x, expecting %08x\n",
2910                                lustre_msg_get_opc(msg),
2911                                lustre_msg_get_version(msg),
2912                                LUSTRE_DLM_VERSION);
2913                 break;
2914         case OBD_LOG_CANCEL:
2915         case LLOG_ORIGIN_HANDLE_CREATE:
2916         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2917         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2918         case LLOG_ORIGIN_HANDLE_CLOSE:
2919         case LLOG_ORIGIN_HANDLE_DESTROY:
2920         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2921         case LLOG_CATINFO:
2922                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2923                 if (rc)
2924                         CERROR("bad opc %u version %08x, expecting %08x\n",
2925                                lustre_msg_get_opc(msg),
2926                                lustre_msg_get_version(msg),
2927                                LUSTRE_LOG_VERSION);
2928                 break;
2929         default:
2930                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
2931                 rc = -ENOTSUPP;
2932         }
2933         return rc;
2934 }
2935
2936 static int mdt_handle0(struct ptlrpc_request *req,
2937                        struct mdt_thread_info *info,
2938                        struct mdt_opc_slice *supported)
2939 {
2940         struct mdt_handler *h;
2941         struct lustre_msg  *msg;
2942         int                 rc;
2943
2944         ENTRY;
2945
2946         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2947                 RETURN(0);
2948
2949         LASSERT(current->journal_info == NULL);
2950
2951         msg = req->rq_reqmsg;
2952         rc = mdt_msg_check_version(msg);
2953         if (likely(rc == 0)) {
2954                 rc = mdt_recovery(info);
2955                 if (likely(rc == +1)) {
2956                         h = mdt_handler_find(lustre_msg_get_opc(msg),
2957                                              supported);
2958                         if (likely(h != NULL)) {
2959                                 rc = mdt_req_handle(info, h, req);
2960                         } else {
2961                                 CERROR("The unsupported opc: 0x%x\n",
2962                                        lustre_msg_get_opc(msg) );
2963                                 req->rq_status = -ENOTSUPP;
2964                                 rc = ptlrpc_error(req);
2965                                 RETURN(rc);
2966                         }
2967                 }
2968         } else
2969                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2970         RETURN(rc);
2971 }
2972
2973 /*
2974  * MDT handler function called by ptlrpc service thread when request comes.
2975  *
2976  * XXX common "target" functionality should be factored into separate module
2977  * shared by mdt, ost and stand-alone services like fld.
2978  */
2979 static int mdt_handle_common(struct ptlrpc_request *req,
2980                              struct mdt_opc_slice *supported)
2981 {
2982         struct lu_env          *env;
2983         struct mdt_thread_info *info;
2984         int                     rc;
2985         ENTRY;
2986
2987         env = req->rq_svc_thread->t_env;
2988         LASSERT(env != NULL);
2989         LASSERT(env->le_ses != NULL);
2990         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2991         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2992         LASSERT(info != NULL);
2993
2994         mdt_thread_info_init(req, info);
2995
2996         rc = mdt_handle0(req, info, supported);
2997
2998         mdt_thread_info_fini(info);
2999         RETURN(rc);
3000 }
3001
3002 /*
3003  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
3004  * as well.
3005  */
3006 int mdt_recovery_handle(struct ptlrpc_request *req)
3007 {
3008         int rc;
3009         ENTRY;
3010
3011         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3012         case FLD_QUERY:
3013                 rc = mdt_handle_common(req, mdt_fld_handlers);
3014                 break;
3015         case SEQ_QUERY:
3016                 rc = mdt_handle_common(req, mdt_seq_handlers);
3017                 break;
3018         default:
3019                 rc = mdt_handle_common(req, mdt_regular_handlers);
3020                 break;
3021         }
3022
3023         RETURN(rc);
3024 }
3025
3026 static int mdt_regular_handle(struct ptlrpc_request *req)
3027 {
3028         return mdt_handle_common(req, mdt_regular_handlers);
3029 }
3030
3031 static int mdt_readpage_handle(struct ptlrpc_request *req)
3032 {
3033         return mdt_handle_common(req, mdt_readpage_handlers);
3034 }
3035
3036 static int mdt_xmds_handle(struct ptlrpc_request *req)
3037 {
3038         return mdt_handle_common(req, mdt_xmds_handlers);
3039 }
3040
3041 static int mdt_mdsc_handle(struct ptlrpc_request *req)
3042 {
3043         return mdt_handle_common(req, mdt_seq_handlers);
3044 }
3045
3046 static int mdt_mdss_handle(struct ptlrpc_request *req)
3047 {
3048         return mdt_handle_common(req, mdt_seq_handlers);
3049 }
3050
3051 static int mdt_dtss_handle(struct ptlrpc_request *req)
3052 {
3053         return mdt_handle_common(req, mdt_seq_handlers);
3054 }
3055
3056 static int mdt_fld_handle(struct ptlrpc_request *req)
3057 {
3058         return mdt_handle_common(req, mdt_fld_handlers);
3059 }
3060
3061 enum mdt_it_code {
3062         MDT_IT_OPEN,
3063         MDT_IT_OCREAT,
3064         MDT_IT_CREATE,
3065         MDT_IT_GETATTR,
3066         MDT_IT_READDIR,
3067         MDT_IT_LOOKUP,
3068         MDT_IT_UNLINK,
3069         MDT_IT_TRUNC,
3070         MDT_IT_GETXATTR,
3071         MDT_IT_NR
3072 };
3073
3074 static int mdt_intent_getattr(enum mdt_it_code opcode,
3075                               struct mdt_thread_info *info,
3076                               struct ldlm_lock **,
3077                               int);
3078 static int mdt_intent_reint(enum mdt_it_code opcode,
3079                             struct mdt_thread_info *info,
3080                             struct ldlm_lock **,
3081                             int);
3082
3083 static struct mdt_it_flavor {
3084         const struct req_format *it_fmt;
3085         __u32                    it_flags;
3086         int                    (*it_act)(enum mdt_it_code ,
3087                                          struct mdt_thread_info *,
3088                                          struct ldlm_lock **,
3089                                          int);
3090         long                     it_reint;
3091 } mdt_it_flavor[] = {
3092         [MDT_IT_OPEN]     = {
3093                 .it_fmt   = &RQF_LDLM_INTENT,
3094                 /*.it_flags = HABEO_REFERO,*/
3095                 .it_flags = 0,
3096                 .it_act   = mdt_intent_reint,
3097                 .it_reint = REINT_OPEN
3098         },
3099         [MDT_IT_OCREAT]   = {
3100                 .it_fmt   = &RQF_LDLM_INTENT,
3101                 .it_flags = MUTABOR,
3102                 .it_act   = mdt_intent_reint,
3103                 .it_reint = REINT_OPEN
3104         },
3105         [MDT_IT_CREATE]   = {
3106                 .it_fmt   = &RQF_LDLM_INTENT,
3107                 .it_flags = MUTABOR,
3108                 .it_act   = mdt_intent_reint,
3109                 .it_reint = REINT_CREATE
3110         },
3111         [MDT_IT_GETATTR]  = {
3112                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3113                 .it_flags = HABEO_REFERO,
3114                 .it_act   = mdt_intent_getattr
3115         },
3116         [MDT_IT_READDIR]  = {
3117                 .it_fmt   = NULL,
3118                 .it_flags = 0,
3119                 .it_act   = NULL
3120         },
3121         [MDT_IT_LOOKUP]   = {
3122                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3123                 .it_flags = HABEO_REFERO,
3124                 .it_act   = mdt_intent_getattr
3125         },
3126         [MDT_IT_UNLINK]   = {
3127                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3128                 .it_flags = MUTABOR,
3129                 .it_act   = NULL,
3130                 .it_reint = REINT_UNLINK
3131         },
3132         [MDT_IT_TRUNC]    = {
3133                 .it_fmt   = NULL,
3134                 .it_flags = MUTABOR,
3135                 .it_act   = NULL
3136         },
3137         [MDT_IT_GETXATTR] = {
3138                 .it_fmt   = NULL,
3139                 .it_flags = 0,
3140                 .it_act   = NULL
3141         }
3142 };
3143
3144 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3145                             struct ldlm_lock **lockp,
3146                             struct ldlm_lock *new_lock,
3147                             struct mdt_lock_handle *lh,
3148                             int flags)
3149 {
3150         struct ptlrpc_request  *req = mdt_info_req(info);
3151         struct ldlm_lock       *lock = *lockp;
3152
3153         /*
3154          * Get new lock only for cases when possible resent did not find any
3155          * lock.
3156          */
3157         if (new_lock == NULL)
3158                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3159
3160         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3161                 lh->mlh_reg_lh.cookie = 0;
3162                 RETURN(0);
3163         }
3164
3165         LASSERTF(new_lock != NULL,
3166                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3167
3168         /*
3169          * If we've already given this lock to a client once, then we should
3170          * have no readers or writers.  Otherwise, we should have one reader
3171          * _or_ writer ref (which will be zeroed below) before returning the
3172          * lock to a client.
3173          */
3174         if (new_lock->l_export == req->rq_export) {
3175                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3176         } else {
3177                 LASSERT(new_lock->l_export == NULL);
3178                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3179         }
3180
3181         *lockp = new_lock;
3182
3183         if (new_lock->l_export == req->rq_export) {
3184                 /*
3185                  * Already gave this to the client, which means that we
3186                  * reconstructed a reply.
3187                  */
3188                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3189                         MSG_RESENT);
3190                 lh->mlh_reg_lh.cookie = 0;
3191                 RETURN(ELDLM_LOCK_REPLACED);
3192         }
3193
3194         /*
3195          * Fixup the lock to be given to the client.
3196          */
3197         lock_res_and_lock(new_lock);
3198         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3199          * possible blocking AST. */
3200         while (new_lock->l_readers > 0) {
3201                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3202                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3203                 new_lock->l_readers--;
3204         }
3205         while (new_lock->l_writers > 0) {
3206                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3207                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3208                 new_lock->l_writers--;
3209         }
3210
3211         new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3212         new_lock->l_blocking_ast = lock->l_blocking_ast;
3213         new_lock->l_completion_ast = lock->l_completion_ast;
3214         new_lock->l_remote_handle = lock->l_remote_handle;
3215         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3216
3217         unlock_res_and_lock(new_lock);
3218
3219         cfs_hash_add(new_lock->l_export->exp_lock_hash,
3220                      &new_lock->l_remote_handle,
3221                      &new_lock->l_exp_hash);
3222
3223         LDLM_LOCK_RELEASE(new_lock);
3224         lh->mlh_reg_lh.cookie = 0;
3225
3226         RETURN(ELDLM_LOCK_REPLACED);
3227 }
3228
3229 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3230                                     struct ldlm_lock *new_lock,
3231                                     struct ldlm_lock **old_lock,
3232                                     struct mdt_lock_handle *lh)
3233 {
3234         struct ptlrpc_request  *req = mdt_info_req(info);
3235         struct obd_export      *exp = req->rq_export;
3236         struct lustre_handle    remote_hdl;
3237         struct ldlm_request    *dlmreq;
3238         struct ldlm_lock       *lock;
3239
3240         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3241                 return;
3242
3243         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3244       &nb