Whamcloud - gitweb
b=22637 MDS returns OBD_MD_FLSIZE to client only when no OSS object allocated
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52 #define DEBUG_SUBSYSTEM S_MDS
53
54 #include <linux/module.h>
55 /*
56  * struct OBD_{ALLOC,FREE}*()
57  */
58 #include <obd_support.h>
59 /* struct ptlrpc_request */
60 #include <lustre_net.h>
61 /* struct obd_export */
62 #include <lustre_export.h>
63 /* struct obd_device */
64 #include <obd.h>
65 /* lu2dt_dev() */
66 #include <dt_object.h>
67 #include <lustre_mds.h>
68 #include <lustre_mdt.h>
69 #include "mdt_internal.h"
70 #ifdef HAVE_QUOTA_SUPPORT
71 # include <lustre_quota.h>
72 #endif
73 #include <lustre_acl.h>
74 #include <lustre_param.h>
75 #include <lustre_fsfilt.h>
76
77 mdl_mode_t mdt_mdl_lock_modes[] = {
78         [LCK_MINMODE] = MDL_MINMODE,
79         [LCK_EX]      = MDL_EX,
80         [LCK_PW]      = MDL_PW,
81         [LCK_PR]      = MDL_PR,
82         [LCK_CW]      = MDL_CW,
83         [LCK_CR]      = MDL_CR,
84         [LCK_NL]      = MDL_NL,
85         [LCK_GROUP]   = MDL_GROUP
86 };
87
88 ldlm_mode_t mdt_dlm_lock_modes[] = {
89         [MDL_MINMODE] = LCK_MINMODE,
90         [MDL_EX]      = LCK_EX,
91         [MDL_PW]      = LCK_PW,
92         [MDL_PR]      = LCK_PR,
93         [MDL_CW]      = LCK_CW,
94         [MDL_CR]      = LCK_CR,
95         [MDL_NL]      = LCK_NL,
96         [MDL_GROUP]   = LCK_GROUP
97 };
98
99 /*
100  * Initialized in mdt_mod_init().
101  */
102 static unsigned long mdt_num_threads;
103 static unsigned long mdt_min_threads;
104 static unsigned long mdt_max_threads;
105
106 /* ptlrpc request handler for MDT. All handlers are
107  * grouped into several slices - struct mdt_opc_slice,
108  * and stored in an array - mdt_handlers[].
109  */
110 struct mdt_handler {
111         /* The name of this handler. */
112         const char *mh_name;
113         /* Fail id for this handler, checked at the beginning of this handler*/
114         int         mh_fail_id;
115         /* Operation code for this handler */
116         __u32       mh_opc;
117         /* flags are listed in enum mdt_handler_flags below. */
118         __u32       mh_flags;
119         /* The actual handler function to execute. */
120         int (*mh_act)(struct mdt_thread_info *info);
121         /* Request format for this request. */
122         const struct req_format *mh_fmt;
123 };
124
125 enum mdt_handler_flags {
126         /*
127          * struct mdt_body is passed in the incoming message, and object
128          * identified by this fid exists on disk.
129          *
130          * "habeo corpus" == "I have a body"
131          */
132         HABEO_CORPUS = (1 << 0),
133         /*
134          * struct ldlm_request is passed in the incoming message.
135          *
136          * "habeo clavis" == "I have a key"
137          */
138         HABEO_CLAVIS = (1 << 1),
139         /*
140          * this request has fixed reply format, so that reply message can be
141          * packed by generic code.
142          *
143          * "habeo refero" == "I have a reply"
144          */
145         HABEO_REFERO = (1 << 2),
146         /*
147          * this request will modify something, so check whether the filesystem
148          * is readonly or not, then return -EROFS to client asap if necessary.
149          *
150          * "mutabor" == "I shall modify"
151          */
152         MUTABOR      = (1 << 3)
153 };
154
155 struct mdt_opc_slice {
156         __u32               mos_opc_start;
157         int                 mos_opc_end;
158         struct mdt_handler *mos_hs;
159 };
160
161 static struct mdt_opc_slice mdt_regular_handlers[];
162 static struct mdt_opc_slice mdt_readpage_handlers[];
163 static struct mdt_opc_slice mdt_xmds_handlers[];
164 static struct mdt_opc_slice mdt_seq_handlers[];
165 static struct mdt_opc_slice mdt_fld_handlers[];
166
167 static struct mdt_device *mdt_dev(struct lu_device *d);
168 static int mdt_regular_handle(struct ptlrpc_request *req);
169 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
170 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
171                         struct getinfo_fid2path *fp);
172
173 static const struct lu_object_operations mdt_obj_ops;
174
175 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
176 {
177         if (!rep)
178                 return 0;
179         return (rep->lock_policy_res1 & flag);
180 }
181
182 void mdt_clear_disposition(struct mdt_thread_info *info,
183                            struct ldlm_reply *rep, int flag)
184 {
185         if (info)
186                 info->mti_opdata &= ~flag;
187         if (rep)
188                 rep->lock_policy_res1 &= ~flag;
189 }
190
191 void mdt_set_disposition(struct mdt_thread_info *info,
192                          struct ldlm_reply *rep, int flag)
193 {
194         if (info)
195                 info->mti_opdata |= flag;
196         if (rep)
197                 rep->lock_policy_res1 |= flag;
198 }
199
200 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
201 {
202         lh->mlh_pdo_hash = 0;
203         lh->mlh_reg_mode = lm;
204         lh->mlh_type = MDT_REG_LOCK;
205 }
206
207 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
208                        const char *name, int namelen)
209 {
210         lh->mlh_reg_mode = lm;
211         lh->mlh_type = MDT_PDO_LOCK;
212
213         if (name != NULL) {
214                 LASSERT(namelen > 0);
215                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
216         } else {
217                 LASSERT(namelen == 0);
218                 lh->mlh_pdo_hash = 0ull;
219         }
220 }
221
222 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
223                               struct mdt_lock_handle *lh)
224 {
225         mdl_mode_t mode;
226         ENTRY;
227
228         /*
229          * Any dir access needs couple of locks:
230          *
231          * 1) on part of dir we gonna take lookup/modify;
232          *
233          * 2) on whole dir to protect it from concurrent splitting and/or to
234          * flush client's cache for readdir().
235          *
236          * so, for a given mode and object this routine decides what lock mode
237          * to use for lock #2:
238          *
239          * 1) if caller's gonna lookup in dir then we need to protect dir from
240          * being splitted only - LCK_CR
241          *
242          * 2) if caller's gonna modify dir then we need to protect dir from
243          * being splitted and to flush cache - LCK_CW
244          *
245          * 3) if caller's gonna modify dir and that dir seems ready for
246          * splitting then we need to protect it from any type of access
247          * (lookup/modify/split) - LCK_EX --bzzz
248          */
249
250         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
251         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
252
253         /*
254          * Ask underlaying level its opinion about preferable PDO lock mode
255          * having access type passed as regular lock mode:
256          *
257          * - MDL_MINMODE means that lower layer does not want to specify lock
258          * mode;
259          *
260          * - MDL_NL means that no PDO lock should be taken. This is used in some
261          * cases. Say, for non-splittable directories no need to use PDO locks
262          * at all.
263          */
264         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
265                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
266
267         if (mode != MDL_MINMODE) {
268                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
269         } else {
270                 /*
271                  * Lower layer does not want to specify locking mode. We do it
272                  * our selves. No special protection is needed, just flush
273                  * client's cache on modification and allow concurrent
274                  * mondification.
275                  */
276                 switch (lh->mlh_reg_mode) {
277                 case LCK_EX:
278                         lh->mlh_pdo_mode = LCK_EX;
279                         break;
280                 case LCK_PR:
281                         lh->mlh_pdo_mode = LCK_CR;
282                         break;
283                 case LCK_PW:
284                         lh->mlh_pdo_mode = LCK_CW;
285                         break;
286                 default:
287                         CERROR("Not expected lock type (0x%x)\n",
288                                (int)lh->mlh_reg_mode);
289                         LBUG();
290                 }
291         }
292
293         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
294         EXIT;
295 }
296
297 static int mdt_getstatus(struct mdt_thread_info *info)
298 {
299         struct mdt_device *mdt  = info->mti_mdt;
300         struct md_device  *next = mdt->mdt_child;
301         struct mdt_body   *repbody;
302         int                rc;
303
304         ENTRY;
305
306         rc = mdt_check_ucred(info);
307         if (rc)
308                 RETURN(err_serious(rc));
309
310         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
311                 RETURN(err_serious(-ENOMEM));
312
313         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
314         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
315         if (rc != 0)
316                 RETURN(rc);
317
318         repbody->valid |= OBD_MD_FLID;
319
320         if (mdt->mdt_opts.mo_mds_capa &&
321             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
322                 struct mdt_object  *root;
323                 struct lustre_capa *capa;
324
325                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
326                 if (IS_ERR(root))
327                         RETURN(PTR_ERR(root));
328
329                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
330                 LASSERT(capa);
331                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
332                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
333                                  0);
334                 mdt_object_put(info->mti_env, root);
335                 if (rc == 0)
336                         repbody->valid |= OBD_MD_FLMDSCAPA;
337         }
338
339         RETURN(rc);
340 }
341
342 static int mdt_statfs(struct mdt_thread_info *info)
343 {
344         struct md_device      *next  = info->mti_mdt->mdt_child;
345         struct ptlrpc_service *svc;
346         struct obd_statfs     *osfs;
347         int                    rc;
348
349         ENTRY;
350
351         svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
352
353         /* This will trigger a watchdog timeout */
354         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
355                          (MDT_SERVICE_WATCHDOG_FACTOR *
356                           at_get(&svc->srv_at_estimate)) + 1);
357
358         rc = mdt_check_ucred(info);
359         if (rc)
360                 RETURN(err_serious(rc));
361
362         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
363                 rc = err_serious(-ENOMEM);
364         } else {
365                 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
366                 rc = next->md_ops->mdo_statfs(info->mti_env, next,
367                                               &info->mti_u.ksfs);
368                 statfs_pack(osfs, &info->mti_u.ksfs);
369         }
370         RETURN(rc);
371 }
372
373 /**
374  * Pack SOM attributes into the reply.
375  * Call under a DLM UPDATE lock.
376  */
377 static void mdt_pack_size2body(struct mdt_thread_info *info,
378                                struct mdt_object *mo)
379 {
380         struct mdt_body *b;
381         struct md_attr *ma = &info->mti_attr;
382
383         LASSERT(ma->ma_attr.la_valid & LA_MODE);
384         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
385
386         /* Check if Size-on-MDS is supported, if this is a regular file,
387          * if SOM is enabled on the object and if SOM cache exists and valid.
388          * Otherwise do not pack Size-on-MDS attributes to the reply. */
389         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
390             !S_ISREG(ma->ma_attr.la_mode) ||
391             !mdt_object_is_som_enabled(mo) ||
392             !(ma->ma_valid & MA_SOM))
393                 return;
394
395         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
396         b->size = ma->ma_som->msd_size;
397         b->blocks = ma->ma_som->msd_blocks;
398 }
399
400 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
401                         const struct lu_attr *attr, const struct lu_fid *fid)
402 {
403         struct md_attr          *ma  = &info->mti_attr;
404
405         LASSERT(ma->ma_valid & MA_INODE);
406
407         /*XXX should pack the reply body according to lu_valid*/
408         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
409                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
410                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
411                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
412
413         if (!S_ISREG(attr->la_mode)) {
414                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
415         } else if (ma->ma_need & MA_LOV && ma->ma_lmm_size == 0) {
416                 /* means no objects are allocated on osts. */
417                 LASSERT(!(ma->ma_valid & MA_LOV));
418                 LASSERT(attr->la_blocks == 0);
419                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
420                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
421         }
422
423         b->atime      = attr->la_atime;
424         b->mtime      = attr->la_mtime;
425         b->ctime      = attr->la_ctime;
426         b->mode       = attr->la_mode;
427         b->size       = attr->la_size;
428         b->blocks     = attr->la_blocks;
429         b->uid        = attr->la_uid;
430         b->gid        = attr->la_gid;
431         b->flags      = attr->la_flags;
432         b->nlink      = attr->la_nlink;
433         b->rdev       = attr->la_rdev;
434
435         if (fid) {
436                 b->fid1 = *fid;
437                 b->valid |= OBD_MD_FLID;
438
439                 /* FIXME: these should be fixed when new igif ready.*/
440                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
441                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
442                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
443
444                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
445                                 PFID(fid), b->nlink, b->mode, b->size);
446         }
447
448         if (info)
449                 mdt_body_reverse_idmap(info, b);
450 }
451
452 static inline int mdt_body_has_lov(const struct lu_attr *la,
453                                    const struct mdt_body *body)
454 {
455         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
456                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
457 }
458
459 static int mdt_getattr_internal(struct mdt_thread_info *info,
460                                 struct mdt_object *o, int ma_need)
461 {
462         struct md_object        *next = mdt_object_child(o);
463         const struct mdt_body   *reqbody = info->mti_body;
464         struct ptlrpc_request   *req = mdt_info_req(info);
465         struct md_attr          *ma = &info->mti_attr;
466         struct lu_attr          *la = &ma->ma_attr;
467         struct req_capsule      *pill = info->mti_pill;
468         const struct lu_env     *env = info->mti_env;
469         struct mdt_body         *repbody;
470         struct lu_buf           *buffer = &info->mti_buf;
471         int                     rc;
472         ENTRY;
473
474         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
475                 RETURN(err_serious(-ENOMEM));
476
477         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
478
479         ma->ma_valid = 0;
480
481         rc = mdt_object_exists(o);
482         if (rc < 0) {
483                 /* This object is located on remote node.*/
484                 repbody->fid1 = *mdt_object_fid(o);
485                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
486                 RETURN(0);
487         }
488
489         buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
490         buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
491
492         /* If it is dir object and client require MEA, then we got MEA */
493         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
494             reqbody->valid & OBD_MD_MEA) {
495                 /* Assumption: MDT_MD size is enough for lmv size. */
496                 ma->ma_lmv = buffer->lb_buf;
497                 ma->ma_lmv_size = buffer->lb_len;
498                 ma->ma_need = MA_LMV | MA_INODE;
499         } else {
500                 ma->ma_lmm = buffer->lb_buf;
501                 ma->ma_lmm_size = buffer->lb_len;
502                 ma->ma_need = MA_LOV | MA_INODE;
503         }
504
505         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
506             reqbody->valid & OBD_MD_FLDIREA  &&
507             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
508                 /* get default stripe info for this dir. */
509                 ma->ma_need |= MA_LOV_DEF;
510         }
511         ma->ma_need |= ma_need;
512         if (ma->ma_need & MA_SOM)
513                 ma->ma_som = &info->mti_u.som.data;
514
515         rc = mo_attr_get(env, next, ma);
516         if (unlikely(rc)) {
517                 CERROR("getattr error for "DFID": %d\n",
518                         PFID(mdt_object_fid(o)), rc);
519                 RETURN(rc);
520         }
521
522         if (likely(ma->ma_valid & MA_INODE))
523                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
524         else
525                 RETURN(-EFAULT);
526
527         if (mdt_body_has_lov(la, reqbody)) {
528                 if (ma->ma_valid & MA_LOV) {
529                         LASSERT(ma->ma_lmm_size);
530                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
531                         repbody->eadatasize = ma->ma_lmm_size;
532                         if (S_ISDIR(la->la_mode))
533                                 repbody->valid |= OBD_MD_FLDIREA;
534                         else
535                                 repbody->valid |= OBD_MD_FLEASIZE;
536                 }
537                 if (ma->ma_valid & MA_LMV) {
538                         LASSERT(S_ISDIR(la->la_mode));
539                         repbody->eadatasize = ma->ma_lmv_size;
540                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
541                 }
542                 if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
543                         repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
544                 }
545         } else if (S_ISLNK(la->la_mode) &&
546                    reqbody->valid & OBD_MD_LINKNAME) {
547                 buffer->lb_buf = ma->ma_lmm;
548                 buffer->lb_len = reqbody->eadatasize;
549                 rc = mo_readlink(env, next, buffer);
550                 if (unlikely(rc <= 0)) {
551                         CERROR("readlink failed: %d\n", rc);
552                         rc = -EFAULT;
553                 } else {
554                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
555                                  rc -= 2;
556                         repbody->valid |= OBD_MD_LINKNAME;
557                         repbody->eadatasize = rc;
558                         /* NULL terminate */
559                         ((char*)ma->ma_lmm)[rc - 1] = 0;
560                         CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
561                                (char*)ma->ma_lmm, rc);
562                         rc = 0;
563                 }
564         }
565
566         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
567                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
568                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
569                 repbody->valid |= OBD_MD_FLMODEASIZE;
570                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
571                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
572                        repbody->max_cookiesize);
573         }
574
575         if (exp_connect_rmtclient(info->mti_exp) &&
576             reqbody->valid & OBD_MD_FLRMTPERM) {
577                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
578
579                 /* mdt_getattr_lock only */
580                 rc = mdt_pack_remote_perm(info, o, buf);
581                 if (rc) {
582                         repbody->valid &= ~OBD_MD_FLRMTPERM;
583                         repbody->aclsize = 0;
584                         RETURN(rc);
585                 } else {
586                         repbody->valid |= OBD_MD_FLRMTPERM;
587                         repbody->aclsize = sizeof(struct mdt_remote_perm);
588                 }
589         }
590 #ifdef CONFIG_FS_POSIX_ACL
591         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
592                  (reqbody->valid & OBD_MD_FLACL)) {
593                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
594                 buffer->lb_len = req_capsule_get_size(pill,
595                                                       &RMF_ACL, RCL_SERVER);
596                 if (buffer->lb_len > 0) {
597                         rc = mo_xattr_get(env, next, buffer,
598                                           XATTR_NAME_ACL_ACCESS);
599                         if (rc < 0) {
600                                 if (rc == -ENODATA) {
601                                         repbody->aclsize = 0;
602                                         repbody->valid |= OBD_MD_FLACL;
603                                         rc = 0;
604                                 } else if (rc == -EOPNOTSUPP) {
605                                         rc = 0;
606                                 } else {
607                                         CERROR("got acl size: %d\n", rc);
608                                 }
609                         } else {
610                                 repbody->aclsize = rc;
611                                 repbody->valid |= OBD_MD_FLACL;
612                                 rc = 0;
613                         }
614                 }
615         }
616 #endif
617
618         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
619             info->mti_mdt->mdt_opts.mo_mds_capa &&
620             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
621                 struct lustre_capa *capa;
622
623                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
624                 LASSERT(capa);
625                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
626                 rc = mo_capa_get(env, next, capa, 0);
627                 if (rc)
628                         RETURN(rc);
629                 repbody->valid |= OBD_MD_FLMDSCAPA;
630         }
631         RETURN(rc);
632 }
633
634 static int mdt_renew_capa(struct mdt_thread_info *info)
635 {
636         struct mdt_object  *obj = info->mti_object;
637         struct mdt_body    *body;
638         struct lustre_capa *capa, *c;
639         int rc;
640         ENTRY;
641
642         /* if object doesn't exist, or server has disabled capability,
643          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
644          * flag not set.
645          */
646         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
647             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
648                 RETURN(0);
649
650         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
651         LASSERT(body != NULL);
652
653         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
654         LASSERT(c);
655
656         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
657         LASSERT(capa);
658
659         *capa = *c;
660         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
661         if (rc == 0)
662                 body->valid |= OBD_MD_FLOSSCAPA;
663         RETURN(rc);
664 }
665
666 static int mdt_getattr(struct mdt_thread_info *info)
667 {
668         struct mdt_object       *obj = info->mti_object;
669         struct req_capsule      *pill = info->mti_pill;
670         struct mdt_body         *reqbody;
671         struct mdt_body         *repbody;
672         mode_t                   mode;
673         int                      md_size;
674         int rc;
675         ENTRY;
676
677         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
678         LASSERT(reqbody);
679
680         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
681                 rc = req_capsule_server_pack(pill);
682                 if (unlikely(rc))
683                         RETURN(err_serious(rc));
684                 rc = mdt_renew_capa(info);
685                 GOTO(out_shrink, rc);
686         }
687
688         LASSERT(obj != NULL);
689         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
690
691         mode = lu_object_attr(&obj->mot_obj.mo_lu);
692         if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
693             (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
694                 md_size = reqbody->eadatasize;
695         else
696                 md_size = info->mti_mdt->mdt_max_mdsize;
697
698         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
699
700         rc = req_capsule_server_pack(pill);
701         if (unlikely(rc != 0))
702                 RETURN(err_serious(rc));
703
704         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
705         LASSERT(repbody != NULL);
706         repbody->eadatasize = 0;
707         repbody->aclsize = 0;
708
709         if (reqbody->valid & OBD_MD_FLRMTPERM)
710                 rc = mdt_init_ucred(info, reqbody);
711         else
712                 rc = mdt_check_ucred(info);
713         if (unlikely(rc))
714                 GOTO(out_shrink, rc);
715
716         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
717         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
718
719         /*
720          * Don't check capability at all, because rename might getattr for
721          * remote obj, and at that time no capability is available.
722          */
723         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
724         rc = mdt_getattr_internal(info, obj, 0);
725         if (reqbody->valid & OBD_MD_FLRMTPERM)
726                 mdt_exit_ucred(info);
727         EXIT;
728 out_shrink:
729         mdt_shrink_reply(info);
730         return rc;
731 }
732
733 static int mdt_is_subdir(struct mdt_thread_info *info)
734 {
735         struct mdt_object     *o = info->mti_object;
736         struct req_capsule    *pill = info->mti_pill;
737         const struct mdt_body *body = info->mti_body;
738         struct mdt_body       *repbody;
739         int                    rc;
740         ENTRY;
741
742         LASSERT(o != NULL);
743
744         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
745
746         /*
747          * We save last checked parent fid to @repbody->fid1 for remote
748          * directory case.
749          */
750         LASSERT(fid_is_sane(&body->fid2));
751         LASSERT(mdt_object_exists(o) > 0);
752         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
753                            &body->fid2, &repbody->fid1);
754         if (rc == 0 || rc == -EREMOTE)
755                 repbody->valid |= OBD_MD_FLID;
756
757         RETURN(rc);
758 }
759
760 static int mdt_raw_lookup(struct mdt_thread_info *info,
761                           struct mdt_object *parent,
762                           const struct lu_name *lname,
763                           struct ldlm_reply *ldlm_rep)
764 {
765         struct md_object *next = mdt_object_child(info->mti_object);
766         const struct mdt_body *reqbody = info->mti_body;
767         struct lu_fid *child_fid = &info->mti_tmp_fid1;
768         struct mdt_body *repbody;
769         int rc;
770         ENTRY;
771
772         if (reqbody->valid != OBD_MD_FLID)
773                 RETURN(0);
774
775         LASSERT(!info->mti_cross_ref);
776
777         /* Only got the fid of this obj by name */
778         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
779                         &info->mti_spec);
780 #if 0
781         /* XXX is raw_lookup possible as intent operation? */
782         if (rc != 0) {
783                 if (rc == -ENOENT)
784                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
785                 RETURN(rc);
786         } else
787                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
788
789         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
790 #endif
791         if (rc == 0) {
792                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
793                 repbody->fid1 = *child_fid;
794                 repbody->valid = OBD_MD_FLID;
795         }
796         RETURN(1);
797 }
798
799 /*
800  * UPDATE lock should be taken against parent, and be release before exit;
801  * child_bits lock should be taken against child, and be returned back:
802  *            (1)normal request should release the child lock;
803  *            (2)intent request will grant the lock to client.
804  */
805 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
806                                  struct mdt_lock_handle *lhc,
807                                  __u64 child_bits,
808                                  struct ldlm_reply *ldlm_rep)
809 {
810         struct ptlrpc_request  *req       = mdt_info_req(info);
811         struct mdt_body        *reqbody   = NULL;
812         struct mdt_object      *parent    = info->mti_object;
813         struct mdt_object      *child;
814         struct md_object       *next      = mdt_object_child(parent);
815         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
816         struct lu_name         *lname     = NULL;
817         const char             *name      = NULL;
818         int                     namelen   = 0;
819         struct mdt_lock_handle *lhp;
820         struct ldlm_lock       *lock;
821         struct ldlm_res_id     *res_id;
822         int                     is_resent;
823         int                     ma_need = 0;
824         int                     rc;
825
826         ENTRY;
827
828         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
829         LASSERT(ergo(is_resent,
830                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
831
832         LASSERT(parent != NULL);
833         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
834         if (name == NULL)
835                 RETURN(err_serious(-EFAULT));
836
837         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
838                                        RCL_CLIENT) - 1;
839         if (!info->mti_cross_ref) {
840                 /*
841                  * XXX: Check for "namelen == 0" is for getattr by fid
842                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
843                  * that is the name must contain at least one character and
844                  * the terminating '\0'
845                  */
846                 if (namelen == 0) {
847                         reqbody = req_capsule_client_get(info->mti_pill,
848                                                          &RMF_MDT_BODY);
849                         LASSERT(fid_is_sane(&reqbody->fid2));
850                         name = NULL;
851
852                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
853                                "ldlm_rep = %p\n",
854                                PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
855                                ldlm_rep);
856                 } else {
857                         lname = mdt_name(info->mti_env, (char *)name, namelen);
858                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
859                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
860                                name, ldlm_rep);
861                 }
862         }
863         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
864
865         rc = mdt_object_exists(parent);
866         if (unlikely(rc == 0)) {
867                 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
868                                 &parent->mot_obj.mo_lu,
869                                 "Parent doesn't exist!\n");
870                 RETURN(-ESTALE);
871         } else if (!info->mti_cross_ref) {
872                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
873                          PFID(mdt_object_fid(parent)));
874         }
875         if (lname) {
876                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
877                 if (rc != 0) {
878                         if (rc > 0)
879                                 rc = 0;
880                         RETURN(rc);
881                 }
882         }
883
884         if (info->mti_cross_ref) {
885                 /* Only getattr on the child. Parent is on another node. */
886                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
887                 child = parent;
888                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
889                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
890
891                 if (is_resent) {
892                         /* Do not take lock for resent case. */
893                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
894                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
895                                  lhc->mlh_reg_lh.cookie);
896                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
897                                                 &lock->l_resource->lr_name));
898                         LDLM_LOCK_PUT(lock);
899                         rc = 0;
900                 } else {
901                         mdt_lock_handle_init(lhc);
902                         mdt_lock_reg_init(lhc, LCK_PR);
903
904                         /*
905                          * Object's name is on another MDS, no lookup lock is
906                          * needed here but update is.
907                          */
908                         child_bits &= ~MDS_INODELOCK_LOOKUP;
909                         child_bits |= MDS_INODELOCK_UPDATE;
910
911                         rc = mdt_object_lock(info, child, lhc, child_bits,
912                                              MDT_LOCAL_LOCK);
913                 }
914                 if (rc == 0) {
915                         /* Finally, we can get attr for child. */
916                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
917                                          BYPASS_CAPA);
918                         rc = mdt_getattr_internal(info, child, 0);
919                         if (unlikely(rc != 0))
920                                 mdt_object_unlock(info, child, lhc, 1);
921                 }
922                 RETURN(rc);
923         }
924
925         /* step 1: lock parent */
926         lhp = &info->mti_lh[MDT_LH_PARENT];
927         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
928         rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
929                              MDT_LOCAL_LOCK);
930
931         if (unlikely(rc != 0))
932                 RETURN(rc);
933
934         if (lname) {
935                 /* step 2: lookup child's fid by name */
936                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
937                                 &info->mti_spec);
938
939                 if (rc != 0) {
940                         if (rc == -ENOENT)
941                                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
942                         GOTO(out_parent, rc);
943                 } else
944                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
945         } else {
946                 *child_fid = reqbody->fid2;
947                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
948         }
949
950         /*
951          *step 3: find the child object by fid & lock it.
952          *        regardless if it is local or remote.
953          */
954         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
955
956         if (unlikely(IS_ERR(child)))
957                 GOTO(out_parent, rc = PTR_ERR(child));
958         if (is_resent) {
959                 /* Do not take lock for resent case. */
960                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
961                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
962                          lhc->mlh_reg_lh.cookie);
963
964                 res_id = &lock->l_resource->lr_name;
965                 if (!fid_res_name_eq(mdt_object_fid(child),
966                                     &lock->l_resource->lr_name)) {
967                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
968                                                  &lock->l_resource->lr_name),
969                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
970                                  (unsigned long)res_id->name[0],
971                                  (unsigned long)res_id->name[1],
972                                  (unsigned long)res_id->name[2],
973                                  PFID(mdt_object_fid(parent)));
974                           CWARN("Although resent, but still not get child lock"
975                                 "parent:"DFID" child:"DFID"\n",
976                                 PFID(mdt_object_fid(parent)),
977                                 PFID(mdt_object_fid(child)));
978                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
979                           LDLM_LOCK_PUT(lock);
980                           GOTO(relock, 0);
981                 }
982                 LDLM_LOCK_PUT(lock);
983                 rc = 0;
984         } else {
985                 struct md_attr *ma;
986 relock:
987                 ma = &info->mti_attr;
988
989                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
990                 mdt_lock_handle_init(lhc);
991                 mdt_lock_reg_init(lhc, LCK_PR);
992
993                 if (mdt_object_exists(child) == 0) {
994                         LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
995                                         &child->mot_obj.mo_lu,
996                                         "Object doesn't exist!\n");
997                         GOTO(out_child, rc = -ESTALE);
998                 }
999
1000                 ma->ma_valid = 0;
1001                 ma->ma_need = MA_INODE;
1002                 rc = mo_attr_get(info->mti_env, next, ma);
1003                 if (unlikely(rc != 0))
1004                         GOTO(out_child, rc);
1005
1006                 /* If the file has not been changed for some time, we return
1007                  * not only a LOOKUP lock, but also an UPDATE lock and this
1008                  * might save us RPC on later STAT. For directories, it also
1009                  * let negative dentry starts working for this dir. */
1010                 if (ma->ma_valid & MA_INODE &&
1011                     ma->ma_attr.la_valid & LA_CTIME &&
1012                     info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1013                     ma->ma_attr.la_ctime < cfs_time_current_sec())
1014                         child_bits |= MDS_INODELOCK_UPDATE;
1015
1016                 rc = mdt_object_lock(info, child, lhc, child_bits,
1017                                      MDT_CROSS_LOCK);
1018
1019                 if (unlikely(rc != 0))
1020                         GOTO(out_child, rc);
1021         }
1022
1023         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1024         /* Get MA_SOM attributes if update lock is given. */
1025         if (lock &&
1026             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1027             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1028                 ma_need = MA_SOM;
1029
1030         /* finally, we can get attr for child. */
1031         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1032         rc = mdt_getattr_internal(info, child, ma_need);
1033         if (unlikely(rc != 0)) {
1034                 mdt_object_unlock(info, child, lhc, 1);
1035         } else if (lock) {
1036                 /* Debugging code. */
1037                 res_id = &lock->l_resource->lr_name;
1038                 LDLM_DEBUG(lock, "Returning lock to client");
1039                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1040                                          &lock->l_resource->lr_name),
1041                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1042                          (unsigned long)res_id->name[0],
1043                          (unsigned long)res_id->name[1],
1044                          (unsigned long)res_id->name[2],
1045                          PFID(mdt_object_fid(child)));
1046                 mdt_pack_size2body(info, child);
1047         }
1048         if (lock)
1049                 LDLM_LOCK_PUT(lock);
1050
1051         EXIT;
1052 out_child:
1053         mdt_object_put(info->mti_env, child);
1054 out_parent:
1055         mdt_object_unlock(info, parent, lhp, 1);
1056         return rc;
1057 }
1058
1059 /* normal handler: should release the child lock */
1060 static int mdt_getattr_name(struct mdt_thread_info *info)
1061 {
1062         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1063         struct mdt_body        *reqbody;
1064         struct mdt_body        *repbody;
1065         int rc;
1066         ENTRY;
1067
1068         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1069         LASSERT(reqbody != NULL);
1070         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1071         LASSERT(repbody != NULL);
1072
1073         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1074         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1075         repbody->eadatasize = 0;
1076         repbody->aclsize = 0;
1077
1078         rc = mdt_init_ucred(info, reqbody);
1079         if (unlikely(rc))
1080                 GOTO(out_shrink, rc);
1081
1082         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1083         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1084                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1085                 lhc->mlh_reg_lh.cookie = 0;
1086         }
1087         mdt_exit_ucred(info);
1088         EXIT;
1089 out_shrink:
1090         mdt_shrink_reply(info);
1091         return rc;
1092 }
1093
1094 static const struct lu_device_operations mdt_lu_ops;
1095
1096 static int lu_device_is_mdt(struct lu_device *d)
1097 {
1098         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1099 }
1100
1101 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1102                          void *karg, void *uarg);
1103
1104 static int mdt_set_info(struct mdt_thread_info *info)
1105 {
1106         struct ptlrpc_request *req = mdt_info_req(info);
1107         char *key;
1108         void *val;
1109         int keylen, vallen, rc = 0;
1110         ENTRY;
1111
1112         rc = req_capsule_server_pack(info->mti_pill);
1113         if (rc)
1114                 RETURN(rc);
1115
1116         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1117         if (key == NULL) {
1118                 DEBUG_REQ(D_HA, req, "no set_info key");
1119                 RETURN(-EFAULT);
1120         }
1121
1122         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1123                                       RCL_CLIENT);
1124
1125         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1126         if (val == NULL) {
1127                 DEBUG_REQ(D_HA, req, "no set_info val");
1128                 RETURN(-EFAULT);
1129         }
1130
1131         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1132                                       RCL_CLIENT);
1133
1134         /* Swab any part of val you need to here */
1135         if (KEY_IS(KEY_READ_ONLY)) {
1136                 req->rq_status = 0;
1137                 lustre_msg_set_status(req->rq_repmsg, 0);
1138
1139                 cfs_spin_lock(&req->rq_export->exp_lock);
1140                 if (*(__u32 *)val)
1141                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1142                 else
1143                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1144                 cfs_spin_unlock(&req->rq_export->exp_lock);
1145
1146         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1147                 struct changelog_setinfo *cs =
1148                         (struct changelog_setinfo *)val;
1149                 if (vallen != sizeof(*cs)) {
1150                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1151                         RETURN(-EINVAL);
1152                 }
1153                 if (ptlrpc_req_need_swab(req)) {
1154                         __swab64s(&cs->cs_recno);
1155                         __swab32s(&cs->cs_id);
1156                 }
1157
1158                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1159                                    vallen, val, NULL);
1160                 lustre_msg_set_status(req->rq_repmsg, rc);
1161
1162         } else {
1163                 RETURN(-EINVAL);
1164         }
1165         RETURN(0);
1166 }
1167
1168 static int mdt_connect(struct mdt_thread_info *info)
1169 {
1170         int rc;
1171         struct ptlrpc_request *req;
1172
1173         req = mdt_info_req(info);
1174         rc = target_handle_connect(req);
1175         if (rc == 0) {
1176                 LASSERT(req->rq_export != NULL);
1177                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1178                 rc = mdt_init_sec_level(info);
1179                 if (rc == 0)
1180                         rc = mdt_init_idmap(info);
1181                 if (rc != 0)
1182                         obd_disconnect(class_export_get(req->rq_export));
1183         } else {
1184                 rc = err_serious(rc);
1185         }
1186         return rc;
1187 }
1188
1189 static int mdt_disconnect(struct mdt_thread_info *info)
1190 {
1191         int rc;
1192         ENTRY;
1193
1194         rc = target_handle_disconnect(mdt_info_req(info));
1195         if (rc)
1196                 rc = err_serious(rc);
1197         RETURN(rc);
1198 }
1199
1200 static int mdt_sendpage(struct mdt_thread_info *info,
1201                         struct lu_rdpg *rdpg)
1202 {
1203         struct ptlrpc_request   *req = mdt_info_req(info);
1204         struct obd_export       *exp = req->rq_export;
1205         struct ptlrpc_bulk_desc *desc;
1206         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1207         int                      tmpcount;
1208         int                      tmpsize;
1209         int                      timeout;
1210         int                      i;
1211         int                      rc;
1212         ENTRY;
1213
1214         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1215                                     MDS_BULK_PORTAL);
1216         if (desc == NULL)
1217                 RETURN(-ENOMEM);
1218
1219         for (i = 0, tmpcount = rdpg->rp_count;
1220                 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1221                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1222                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1223         }
1224
1225         LASSERT(desc->bd_nob == rdpg->rp_count);
1226         rc = sptlrpc_svc_wrap_bulk(req, desc);
1227         if (rc)
1228                 GOTO(free_desc, rc);
1229
1230         rc = ptlrpc_start_bulk_transfer(desc);
1231         if (rc)
1232                 GOTO(free_desc, rc);
1233
1234         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1235                 GOTO(abort_bulk, rc = 0);
1236
1237         timeout = (int) req->rq_deadline - cfs_time_current_sec();
1238         if (timeout < 0)
1239                 CERROR("Req deadline already passed %lu (now: %lu)\n",
1240                        req->rq_deadline, cfs_time_current_sec());
1241         *lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(max(timeout, 1)),
1242                                     cfs_time_seconds(1), NULL, NULL);
1243         rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc) ||
1244                           exp->exp_failed || exp->exp_abort_active_req, lwi);
1245         LASSERT (rc == 0 || rc == -ETIMEDOUT);
1246
1247         if (rc == 0) {
1248                 if (desc->bd_success &&
1249                     desc->bd_nob_transferred == rdpg->rp_count)
1250                         GOTO(free_desc, rc);
1251
1252                 rc = -ETIMEDOUT;
1253                 if (exp->exp_abort_active_req || exp->exp_failed)
1254                         GOTO(abort_bulk, rc);
1255         }
1256
1257         DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1258                   (rc == -ETIMEDOUT) ? "timeout" : "network error",
1259                   desc->bd_nob_transferred, rdpg->rp_count,
1260                   exp->exp_client_uuid.uuid,
1261                   exp->exp_connection->c_remote_uuid.uuid);
1262
1263         class_fail_export(exp);
1264
1265         EXIT;
1266 abort_bulk:
1267         ptlrpc_abort_bulk(desc);
1268 free_desc:
1269         ptlrpc_free_bulk(desc);
1270         return rc;
1271 }
1272
1273 #ifdef HAVE_SPLIT_SUPPORT
1274 /*
1275  * Retrieve dir entry from the page and insert it to the slave object, actually,
1276  * this should be in osd layer, but since it will not in the final product, so
1277  * just do it here and do not define more moo api anymore for this.
1278  */
1279 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1280                               int size)
1281 {
1282         struct mdt_object *object = info->mti_object;
1283         struct lu_fid *lf = &info->mti_tmp_fid2;
1284         struct md_attr *ma = &info->mti_attr;
1285         struct lu_dirpage *dp;
1286         struct lu_dirent *ent;
1287         int rc = 0, offset = 0;
1288         ENTRY;
1289
1290         /* Make sure we have at least one entry. */
1291         if (size == 0)
1292                 RETURN(-EINVAL);
1293
1294         /*
1295          * Disable trans for this name insert, since it will include many trans
1296          * for this.
1297          */
1298         info->mti_no_need_trans = 1;
1299         /*
1300          * When write_dir_page, no need update parent's ctime,
1301          * and no permission check for name_insert.
1302          */
1303         ma->ma_attr.la_ctime = 0;
1304         ma->ma_attr.la_valid = LA_MODE;
1305         ma->ma_valid = MA_INODE;
1306
1307         cfs_kmap(page);
1308         dp = page_address(page);
1309         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1310
1311         for (ent = lu_dirent_start(dp); ent != NULL;
1312              ent = lu_dirent_next(ent)) {
1313                 struct lu_name *lname;
1314                 char *name;
1315
1316                 if (le16_to_cpu(ent->lde_namelen) == 0)
1317                         continue;
1318
1319                 fid_le_to_cpu(lf, &ent->lde_fid);
1320                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1321                         ma->ma_attr.la_mode = S_IFDIR;
1322                 else
1323                         ma->ma_attr.la_mode = 0;
1324                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1325                 if (name == NULL)
1326                         GOTO(out, rc = -ENOMEM);
1327
1328                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1329                 lname = mdt_name(info->mti_env, name,
1330                                  le16_to_cpu(ent->lde_namelen));
1331                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1332                 rc = mdo_name_insert(info->mti_env,
1333                                      md_object_next(&object->mot_obj),
1334                                      lname, lf, ma);
1335                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1336                 if (rc) {
1337                         CERROR("Can't insert %*.*s, rc %d\n",
1338                                le16_to_cpu(ent->lde_namelen),
1339                                le16_to_cpu(ent->lde_namelen),
1340                                ent->lde_name, rc);
1341                         GOTO(out, rc);
1342                 }
1343
1344                 offset += lu_dirent_size(ent);
1345                 if (offset >= size)
1346                         break;
1347         }
1348         EXIT;
1349 out:
1350         cfs_kunmap(page);
1351         return rc;
1352 }
1353
1354 static int mdt_bulk_timeout(void *data)
1355 {
1356         ENTRY;
1357
1358         CERROR("mdt bulk transfer timeout \n");
1359
1360         RETURN(1);
1361 }
1362
1363 static int mdt_writepage(struct mdt_thread_info *info)
1364 {
1365         struct ptlrpc_request   *req = mdt_info_req(info);
1366         struct mdt_body         *reqbody;
1367         struct l_wait_info      *lwi;
1368         struct ptlrpc_bulk_desc *desc;
1369         struct page             *page;
1370         int                rc;
1371         ENTRY;
1372
1373
1374         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1375         if (reqbody == NULL)
1376                 RETURN(err_serious(-EFAULT));
1377
1378         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1379         if (desc == NULL)
1380                 RETURN(err_serious(-ENOMEM));
1381
1382         /* allocate the page for the desc */
1383         page = cfs_alloc_page(CFS_ALLOC_STD);
1384         if (page == NULL)
1385                 GOTO(desc_cleanup, rc = -ENOMEM);
1386
1387         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1388                (int)reqbody->size, (int)reqbody->nlink);
1389
1390         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1391                               (int)reqbody->nlink);
1392
1393         rc = sptlrpc_svc_prep_bulk(req, desc);
1394         if (rc != 0)
1395                 GOTO(cleanup_page, rc);
1396         /*
1397          * Check if client was evicted while we were doing i/o before touching
1398          * network.
1399          */
1400         OBD_ALLOC_PTR(lwi);
1401         if (!lwi)
1402                 GOTO(cleanup_page, rc = -ENOMEM);
1403
1404         if (desc->bd_export->exp_failed)
1405                 rc = -ENOTCONN;
1406         else
1407                 rc = ptlrpc_start_bulk_transfer (desc);
1408         if (rc == 0) {
1409                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
1410                                             mdt_bulk_timeout, desc);
1411                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1412                                   desc->bd_export->exp_failed, lwi);
1413                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1414                 if (rc == -ETIMEDOUT) {
1415                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1416                         ptlrpc_abort_bulk(desc);
1417                 } else if (desc->bd_export->exp_failed) {
1418                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1419                         rc = -ENOTCONN;
1420                         ptlrpc_abort_bulk(desc);
1421                 } else if (!desc->bd_success ||
1422                            desc->bd_nob_transferred != desc->bd_nob) {
1423                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1424                                   desc->bd_success ?
1425                                   "truncated" : "network error on",
1426                                   desc->bd_nob_transferred, desc->bd_nob);
1427                         /* XXX should this be a different errno? */
1428                         rc = -ETIMEDOUT;
1429                 }
1430         } else {
1431                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1432         }
1433         if (rc)
1434                 GOTO(cleanup_lwi, rc);
1435         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1436
1437 cleanup_lwi:
1438         OBD_FREE_PTR(lwi);
1439 cleanup_page:
1440         cfs_free_page(page);
1441 desc_cleanup:
1442         ptlrpc_free_bulk(desc);
1443         RETURN(rc);
1444 }
1445 #endif
1446
1447 static int mdt_readpage(struct mdt_thread_info *info)
1448 {
1449         struct mdt_object *object = info->mti_object;
1450         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1451         struct mdt_body   *reqbody;
1452         struct mdt_body   *repbody;
1453         int                rc;
1454         int                i;
1455         ENTRY;
1456
1457         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1458                 RETURN(err_serious(-ENOMEM));
1459
1460         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1461         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1462         if (reqbody == NULL || repbody == NULL)
1463                 RETURN(err_serious(-EFAULT));
1464
1465         /*
1466          * prepare @rdpg before calling lower layers and transfer itself. Here
1467          * reqbody->size contains offset of where to start to read and
1468          * reqbody->nlink contains number bytes to read.
1469          */
1470         rdpg->rp_hash = reqbody->size;
1471         if (rdpg->rp_hash != reqbody->size) {
1472                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1473                        rdpg->rp_hash, reqbody->size);
1474                 RETURN(-EFAULT);
1475         }
1476
1477         rdpg->rp_attrs = reqbody->mode;
1478         rdpg->rp_count  = reqbody->nlink;
1479         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1480         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1481         if (rdpg->rp_pages == NULL)
1482                 RETURN(-ENOMEM);
1483
1484         for (i = 0; i < rdpg->rp_npages; ++i) {
1485                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1486                 if (rdpg->rp_pages[i] == NULL)
1487                         GOTO(free_rdpg, rc = -ENOMEM);
1488         }
1489
1490         /* call lower layers to fill allocated pages with directory data */
1491         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1492         if (rc)
1493                 GOTO(free_rdpg, rc);
1494
1495         /* send pages to client */
1496         rc = mdt_sendpage(info, rdpg);
1497
1498         EXIT;
1499 free_rdpg:
1500
1501         for (i = 0; i < rdpg->rp_npages; i++)
1502                 if (rdpg->rp_pages[i] != NULL)
1503                         cfs_free_page(rdpg->rp_pages[i]);
1504         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1505
1506         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1507                 RETURN(0);
1508
1509         return rc;
1510 }
1511
1512 static int mdt_reint_internal(struct mdt_thread_info *info,
1513                               struct mdt_lock_handle *lhc,
1514                               __u32 op)
1515 {
1516         struct req_capsule      *pill = info->mti_pill;
1517         struct mdt_device       *mdt = info->mti_mdt;
1518         struct md_quota         *mq = md_quota(info->mti_env);
1519         struct mdt_body         *repbody;
1520         int                      rc = 0;
1521         ENTRY;
1522
1523         /* pack reply */
1524         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1525                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1526                                      mdt->mdt_max_mdsize);
1527         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1528                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1529                                      mdt->mdt_max_cookiesize);
1530
1531         rc = req_capsule_server_pack(pill);
1532         if (rc != 0) {
1533                 CERROR("Can't pack response, rc %d\n", rc);
1534                 RETURN(err_serious(rc));
1535         }
1536
1537         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1538                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1539                 LASSERT(repbody);
1540                 repbody->eadatasize = 0;
1541                 repbody->aclsize = 0;
1542         }
1543
1544         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1545                 GOTO(out_shrink, rc = err_serious(-EFAULT));
1546
1547         rc = mdt_reint_unpack(info, op);
1548         if (rc != 0) {
1549                 CERROR("Can't unpack reint, rc %d\n", rc);
1550                 GOTO(out_shrink, rc = err_serious(rc));
1551         }
1552
1553         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1554
1555         /* for replay no cookkie / lmm need, because client have this already */
1556         if (info->mti_spec.no_create == 1)  {
1557                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1558                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1559
1560                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1561                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1562                                              0);
1563         }
1564
1565         rc = mdt_init_ucred_reint(info);
1566         if (rc)
1567                 GOTO(out_shrink, rc);
1568
1569         rc = mdt_fix_attr_ucred(info, op);
1570         if (rc != 0)
1571                 GOTO(out_ucred, rc = err_serious(rc));
1572
1573         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1574                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1575                 GOTO(out_ucred, rc);
1576         }
1577         mq->mq_exp = info->mti_exp;
1578         rc = mdt_reint_rec(info, lhc);
1579         EXIT;
1580 out_ucred:
1581         mdt_exit_ucred(info);
1582 out_shrink:
1583         mdt_shrink_reply(info);
1584         return rc;
1585 }
1586
1587 static long mdt_reint_opcode(struct mdt_thread_info *info,
1588                              const struct req_format **fmt)
1589 {
1590         struct mdt_rec_reint *rec;
1591         long opc;
1592
1593         opc = err_serious(-EFAULT);
1594         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1595         if (rec != NULL) {
1596                 opc = rec->rr_opcode;
1597                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1598                 if (opc < REINT_MAX && fmt[opc] != NULL)
1599                         req_capsule_extend(info->mti_pill, fmt[opc]);
1600                 else {
1601                         CERROR("Unsupported opc: %ld\n", opc);
1602                         opc = err_serious(opc);
1603                 }
1604         }
1605         return opc;
1606 }
1607
1608 static int mdt_reint(struct mdt_thread_info *info)
1609 {
1610         long opc;
1611         int  rc;
1612
1613         static const struct req_format *reint_fmts[REINT_MAX] = {
1614                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1615                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1616                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1617                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1618                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1619                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1620                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1621         };
1622
1623         ENTRY;
1624
1625         opc = mdt_reint_opcode(info, reint_fmts);
1626         if (opc >= 0) {
1627                 /*
1628                  * No lock possible here from client to pass it to reint code
1629                  * path.
1630                  */
1631                 rc = mdt_reint_internal(info, NULL, opc);
1632         } else {
1633                 rc = opc;
1634         }
1635
1636         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1637         RETURN(rc);
1638 }
1639
1640 /* this should sync the whole device */
1641 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1642 {
1643         struct dt_device *dt = mdt->mdt_bottom;
1644         int rc;
1645         ENTRY;
1646
1647         rc = dt->dd_ops->dt_sync(env, dt);
1648         RETURN(rc);
1649 }
1650
1651 /* this should sync this object */
1652 static int mdt_object_sync(struct mdt_thread_info *info)
1653 {
1654         struct md_object *next;
1655         int rc;
1656         ENTRY;
1657
1658         if (!mdt_object_exists(info->mti_object)) {
1659                 CWARN("Non existing object  "DFID"!\n",
1660                       PFID(mdt_object_fid(info->mti_object)));
1661                 RETURN(-ESTALE);
1662         }
1663         next = mdt_object_child(info->mti_object);
1664         rc = mo_object_sync(info->mti_env, next);
1665
1666         RETURN(rc);
1667 }
1668
1669 static int mdt_sync(struct mdt_thread_info *info)
1670 {
1671         struct req_capsule *pill = info->mti_pill;
1672         struct mdt_body *body;
1673         int rc;
1674         ENTRY;
1675
1676         /* The fid may be zero, so we req_capsule_set manually */
1677         req_capsule_set(pill, &RQF_MDS_SYNC);
1678
1679         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1680         if (body == NULL)
1681                 RETURN(err_serious(-EINVAL));
1682
1683         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1684                 RETURN(err_serious(-ENOMEM));
1685
1686         if (fid_seq(&body->fid1) == 0) {
1687                 /* sync the whole device */
1688                 rc = req_capsule_server_pack(pill);
1689                 if (rc == 0)
1690                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1691                 else
1692                         rc = err_serious(rc);
1693         } else {
1694                 /* sync an object */
1695                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1696                 if (rc == 0) {
1697                         rc = mdt_object_sync(info);
1698                         if (rc == 0) {
1699                                 struct md_object *next;
1700                                 const struct lu_fid *fid;
1701                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1702
1703                                 next = mdt_object_child(info->mti_object);
1704                                 info->mti_attr.ma_need = MA_INODE;
1705                                 info->mti_attr.ma_valid = 0;
1706                                 rc = mo_attr_get(info->mti_env, next,
1707                                                  &info->mti_attr);
1708                                 if (rc == 0) {
1709                                         body = req_capsule_server_get(pill,
1710                                                                 &RMF_MDT_BODY);
1711                                         fid = mdt_object_fid(info->mti_object);
1712                                         mdt_pack_attr2body(info, body, la, fid);
1713                                 }
1714                         }
1715                 } else
1716                         rc = err_serious(rc);
1717         }
1718         RETURN(rc);
1719 }
1720
1721 #ifdef HAVE_QUOTA_SUPPORT
1722 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1723 {
1724         struct obd_quotactl *oqctl;
1725         struct req_capsule *pill = info->mti_pill;
1726         struct obd_export *exp = info->mti_exp;
1727         struct md_quota *mq = md_quota(info->mti_env);
1728         struct md_device *next = info->mti_mdt->mdt_child;
1729         int rc;
1730         ENTRY;
1731
1732         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1733         if (oqctl == NULL)
1734                 RETURN(-EPROTO);
1735
1736         /* remote client has no permission for quotacheck */
1737         if (unlikely(exp_connect_rmtclient(exp)))
1738                 RETURN(-EPERM);
1739
1740         rc = req_capsule_server_pack(pill);
1741         if (rc)
1742                 RETURN(rc);
1743
1744         mq->mq_exp = exp;
1745         rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next,
1746                                                oqctl->qc_type);
1747         RETURN(rc);
1748 }
1749
1750 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1751 {
1752         struct obd_quotactl *oqctl, *repoqc;
1753         struct req_capsule *pill = info->mti_pill;
1754         struct obd_export *exp = info->mti_exp;
1755         struct md_quota *mq = md_quota(info->mti_env);
1756         struct md_device *next = info->mti_mdt->mdt_child;
1757         const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
1758         int id, rc;
1759         ENTRY;
1760
1761         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1762         if (oqctl == NULL)
1763                 RETURN(-EPROTO);
1764
1765         id = oqctl->qc_id;
1766         if (exp_connect_rmtclient(exp)) {
1767                 struct ptlrpc_request *req = mdt_info_req(info);
1768                 struct mdt_export_data *med = mdt_req2med(req);
1769                 struct lustre_idmap_table *idmap = med->med_idmap;
1770
1771                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1772                              oqctl->qc_cmd != Q_GETINFO))
1773                         RETURN(-EPERM);
1774
1775
1776                 if (oqctl->qc_type == USRQUOTA)
1777                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1778                                                      oqctl->qc_id);
1779                 else if (oqctl->qc_type == GRPQUOTA)
1780                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1781                                                      oqctl->qc_id);
1782                 else
1783                         RETURN(-EINVAL);
1784
1785                 if (id == CFS_IDMAP_NOTFOUND) {
1786                         CDEBUG(D_QUOTA, "no mapping for id %u\n",
1787                                oqctl->qc_id);
1788                         RETURN(-EACCES);
1789                 }
1790         }
1791
1792         rc = req_capsule_server_pack(pill);
1793         if (rc)
1794                 RETURN(rc);
1795
1796         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
1797         LASSERT(repoqc != NULL);
1798
1799         mq->mq_exp = exp;
1800         switch (oqctl->qc_cmd) {
1801         case Q_QUOTAON:
1802                 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
1803                 break;
1804         case Q_QUOTAOFF:
1805                 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type);
1806                 break;
1807         case Q_SETINFO:
1808                 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
1809                                       &oqctl->qc_dqinfo);
1810                 break;
1811         case Q_GETINFO:
1812                 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
1813                                       &oqctl->qc_dqinfo);
1814                 break;
1815         case Q_SETQUOTA:
1816                 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
1817                                        &oqctl->qc_dqblk);
1818                 break;
1819         case Q_GETQUOTA:
1820                 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
1821                                        &oqctl->qc_dqblk);
1822                 break;
1823         case Q_GETOINFO:
1824                 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
1825                                        &oqctl->qc_dqinfo);
1826                 break;
1827         case Q_GETOQUOTA:
1828                 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
1829                                         &oqctl->qc_dqblk);
1830                 break;
1831         case LUSTRE_Q_INVALIDATE:
1832                 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
1833                 break;
1834         case LUSTRE_Q_FINVALIDATE:
1835                 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
1836                 break;
1837         default:
1838                 CERROR("unsupported mdt_quotactl command: %d\n",
1839                        oqctl->qc_cmd);
1840                 RETURN(-EFAULT);
1841         }
1842
1843         *repoqc = *oqctl;
1844         RETURN(rc);
1845 }
1846 #endif
1847
1848
1849 /*
1850  * OBD PING and other handlers.
1851  */
1852 static int mdt_obd_ping(struct mdt_thread_info *info)
1853 {
1854         int rc;
1855         ENTRY;
1856
1857         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1858
1859         rc = target_handle_ping(mdt_info_req(info));
1860         if (rc < 0)
1861                 rc = err_serious(rc);
1862         RETURN(rc);
1863 }
1864
1865 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1866 {
1867         return err_serious(-EOPNOTSUPP);
1868 }
1869
1870 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1871 {
1872         return err_serious(-EOPNOTSUPP);
1873 }
1874
1875
1876 /*
1877  * LLOG handlers.
1878  */
1879
1880 /** clone llog ctxt from child (mdd)
1881  * This allows remote llog (replicator) access.
1882  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
1883  * context was originally set up, or we can handle them directly.
1884  * I choose the latter, but that means I need any llog
1885  * contexts set up by child to be accessable by the mdt.  So we clone the
1886  * context into our context list here.
1887  */
1888 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
1889                                int idx)
1890 {
1891         struct md_device  *next = mdt->mdt_child;
1892         struct llog_ctxt *ctxt;
1893         int rc;
1894
1895         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
1896                 return 0;
1897
1898         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
1899         if (rc || ctxt == NULL) {
1900                 CERROR("Can't get mdd ctxt %d\n", rc);
1901                 return rc;
1902         }
1903
1904         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
1905         if (rc)
1906                 CERROR("Can't set mdt ctxt %d\n", rc);
1907
1908         return rc;
1909 }
1910
1911 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
1912                                  struct mdt_device *mdt, int idx)
1913 {
1914         struct llog_ctxt *ctxt;
1915
1916         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
1917         if (ctxt == NULL)
1918                 return 0;
1919         /* Put once for the get we just did, and once for the clone */
1920         llog_ctxt_put(ctxt);
1921         llog_ctxt_put(ctxt);
1922         return 0;
1923 }
1924
1925 static int mdt_llog_create(struct mdt_thread_info *info)
1926 {
1927         int rc;
1928
1929         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1930         rc = llog_origin_handle_create(mdt_info_req(info));
1931         return (rc < 0 ? err_serious(rc) : rc);
1932 }
1933
1934 static int mdt_llog_destroy(struct mdt_thread_info *info)
1935 {
1936         int rc;
1937
1938         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
1939         rc = llog_origin_handle_destroy(mdt_info_req(info));
1940         return (rc < 0 ? err_serious(rc) : rc);
1941 }
1942
1943 static int mdt_llog_read_header(struct mdt_thread_info *info)
1944 {
1945         int rc;
1946
1947         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1948         rc = llog_origin_handle_read_header(mdt_info_req(info));
1949         return (rc < 0 ? err_serious(rc) : rc);
1950 }
1951
1952 static int mdt_llog_next_block(struct mdt_thread_info *info)
1953 {
1954         int rc;
1955
1956         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1957         rc = llog_origin_handle_next_block(mdt_info_req(info));
1958         return (rc < 0 ? err_serious(rc) : rc);
1959 }
1960
1961 static int mdt_llog_prev_block(struct mdt_thread_info *info)
1962 {
1963         int rc;
1964
1965         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
1966         rc = llog_origin_handle_prev_block(mdt_info_req(info));
1967         return (rc < 0 ? err_serious(rc) : rc);
1968 }
1969
1970
1971 /*
1972  * DLM handlers.
1973  */
1974 static struct ldlm_callback_suite cbs = {
1975         .lcs_completion = ldlm_server_completion_ast,
1976         .lcs_blocking   = ldlm_server_blocking_ast,
1977         .lcs_glimpse    = NULL
1978 };
1979
1980 static int mdt_enqueue(struct mdt_thread_info *info)
1981 {
1982         struct ptlrpc_request *req;
1983         __u64 req_bits;
1984         int rc;
1985
1986         /*
1987          * info->mti_dlm_req already contains swapped and (if necessary)
1988          * converted dlm request.
1989          */
1990         LASSERT(info->mti_dlm_req != NULL);
1991
1992         req = mdt_info_req(info);
1993
1994         /*
1995          * Lock without inodebits makes no sense and will oops later in
1996          * ldlm. Let's check it now to see if we have wrong lock from client or
1997          * bits get corrupted somewhere in mdt_intent_policy().
1998          */
1999         req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
2000         /* This is disabled because we need to support liblustre flock.
2001          * LASSERT(req_bits != 0);
2002          */
2003
2004         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2005                                   req, info->mti_dlm_req, &cbs);
2006         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2007         return rc ? err_serious(rc) : req->rq_status;
2008 }
2009
2010 static int mdt_convert(struct mdt_thread_info *info)
2011 {
2012         int rc;
2013         struct ptlrpc_request *req;
2014
2015         LASSERT(info->mti_dlm_req);
2016         req = mdt_info_req(info);
2017         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2018         return rc ? err_serious(rc) : req->rq_status;
2019 }
2020
2021 static int mdt_bl_callback(struct mdt_thread_info *info)
2022 {
2023         CERROR("bl callbacks should not happen on MDS\n");
2024         LBUG();
2025         return err_serious(-EOPNOTSUPP);
2026 }
2027
2028 static int mdt_cp_callback(struct mdt_thread_info *info)
2029 {
2030         CERROR("cp callbacks should not happen on MDS\n");
2031         LBUG();
2032         return err_serious(-EOPNOTSUPP);
2033 }
2034
2035 /*
2036  * sec context handlers
2037  */
2038 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2039 {
2040         int rc;
2041
2042         rc = mdt_handle_idmap(info);
2043
2044         if (unlikely(rc)) {
2045                 struct ptlrpc_request *req = mdt_info_req(info);
2046                 __u32                  opc;
2047
2048                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2049                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2050                         sptlrpc_svc_ctx_invalidate(req);
2051         }
2052
2053         OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
2054
2055         return rc;
2056 }
2057
2058 static struct mdt_object *mdt_obj(struct lu_object *o)
2059 {
2060         LASSERT(lu_device_is_mdt(o->lo_dev));
2061         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2062 }
2063
2064 struct mdt_object *mdt_object_find(const struct lu_env *env,
2065                                    struct mdt_device *d,
2066                                    const struct lu_fid *f)
2067 {
2068         struct lu_object *o;
2069         struct mdt_object *m;
2070         ENTRY;
2071
2072         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2073         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2074         if (unlikely(IS_ERR(o)))
2075                 m = (struct mdt_object *)o;
2076         else
2077                 m = mdt_obj(o);
2078         RETURN(m);
2079 }
2080
2081 /**
2082  * Asyncronous commit for mdt device.
2083  *
2084  * Pass asynchonous commit call down the MDS stack.
2085  *
2086  * \param env environment
2087  * \param mdt the mdt device
2088  */
2089 static void mdt_device_commit_async(const struct lu_env *env,
2090                                     struct mdt_device *mdt)
2091 {
2092         struct dt_device *dt = mdt->mdt_bottom;
2093         int rc;
2094
2095         rc = dt->dd_ops->dt_commit_async(env, dt);
2096         if (unlikely(rc != 0))
2097                 CWARN("async commit start failed with rc = %d", rc);
2098 }
2099
2100 /**
2101  * Mark the lock as "synchonous".
2102  *
2103  * Mark the lock to deffer transaction commit to the unlock time.
2104  *
2105  * \param lock the lock to mark as "synchonous"
2106  *
2107  * \see mdt_is_lock_sync
2108  * \see mdt_save_lock
2109  */
2110 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2111 {
2112         lock->l_ast_data = (void*)1;
2113 }
2114
2115 /**
2116  * Check whehter the lock "synchonous" or not.
2117  *
2118  * \param lock the lock to check
2119  * \retval 1 the lock is "synchonous"
2120  * \retval 0 the lock isn't "synchronous"
2121  *
2122  * \see mdt_set_lock_sync
2123  * \see mdt_save_lock
2124  */
2125 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2126 {
2127         return lock->l_ast_data != NULL;
2128 }
2129
2130 /**
2131  * Blocking AST for mdt locks.
2132  *
2133  * Starts transaction commit if in case of COS lock conflict or
2134  * deffers such a commit to the mdt_save_lock.
2135  *
2136  * \param lock the lock which blocks a request or cancelling lock
2137  * \param desc unused
2138  * \param data unused
2139  * \param flag indicates whether this cancelling or blocking callback
2140  * \retval 0
2141  * \see ldlm_blocking_ast_nocheck
2142  */
2143 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2144                      void *data, int flag)
2145 {
2146         struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
2147         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2148         int rc;
2149         ENTRY;
2150
2151         if (flag == LDLM_CB_CANCELING)
2152                 RETURN(0);
2153         lock_res_and_lock(lock);
2154         if (lock->l_blocking_ast != mdt_blocking_ast) {
2155                 unlock_res_and_lock(lock);
2156                 RETURN(0);
2157         }
2158         if (mdt_cos_is_enabled(mdt) &&
2159             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2160             lock->l_blocking_lock != NULL &&
2161             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2162                 mdt_set_lock_sync(lock);
2163         }
2164         rc = ldlm_blocking_ast_nocheck(lock);
2165
2166         /* There is no lock conflict if l_blocking_lock == NULL,
2167          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2168          * when the last reference to a local lock was released */
2169         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2170                 struct lu_env env;
2171
2172                 rc = lu_env_init(&env, LCT_MD_THREAD);
2173                 if (unlikely(rc != 0))
2174                         CWARN("lu_env initialization failed with rc = %d,"
2175                               "cannot start asynchronous commit\n", rc);
2176                 else
2177                         mdt_device_commit_async(&env, mdt);
2178                 lu_env_fini(&env);
2179         }
2180         RETURN(rc);
2181 }
2182
2183 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2184                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2185 {
2186         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2187         ldlm_policy_data_t *policy = &info->mti_policy;
2188         struct ldlm_res_id *res_id = &info->mti_res_id;
2189         int rc;
2190         ENTRY;
2191
2192         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2193         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2194         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2195         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2196
2197         if (mdt_object_exists(o) < 0) {
2198                 if (locality == MDT_CROSS_LOCK) {
2199                         /* cross-ref object fix */
2200                         ibits &= ~MDS_INODELOCK_UPDATE;
2201                         ibits |= MDS_INODELOCK_LOOKUP;
2202                 } else {
2203                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2204                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2205                 }
2206                 /* No PDO lock on remote object */
2207                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2208         }
2209
2210         if (lh->mlh_type == MDT_PDO_LOCK) {
2211                 /* check for exists after object is locked */
2212                 if (mdt_object_exists(o) == 0) {
2213                         /* Non-existent object shouldn't have PDO lock */
2214                         RETURN(-ESTALE);
2215                 } else {
2216                         /* Non-dir object shouldn't have PDO lock */
2217                         LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
2218                 }
2219         }
2220
2221         memset(policy, 0, sizeof(*policy));
2222         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2223
2224         /*
2225          * Take PDO lock on whole directory and build correct @res_id for lock
2226          * on part of directory.
2227          */
2228         if (lh->mlh_pdo_hash != 0) {
2229                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2230                 mdt_lock_pdo_mode(info, o, lh);
2231                 if (lh->mlh_pdo_mode != LCK_NL) {
2232                         /*
2233                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2234                          * is never going to be sent to client and we do not
2235                          * want it slowed down due to possible cancels.
2236                          */
2237                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2238                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2239                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2240                                           &info->mti_exp->exp_handle.h_cookie);
2241                         if (unlikely(rc))
2242                                 RETURN(rc);
2243                 }
2244
2245                 /*
2246                  * Finish res_id initializing by name hash marking part of
2247                  * directory which is taking modification.
2248                  */
2249                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2250         }
2251
2252         policy->l_inodebits.bits = ibits;
2253
2254         /*
2255          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2256          * going to be sent to client. If it is - mdt_intent_policy() path will
2257          * fix it up and turn FL_LOCAL flag off.
2258          */
2259         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2260                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2261                           &info->mti_exp->exp_handle.h_cookie);
2262         if (rc)
2263                 mdt_object_unlock(info, o, lh, 1);
2264         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2265                  lh->mlh_pdo_hash != 0 &&
2266                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2267                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 10);
2268         }
2269
2270         RETURN(rc);
2271 }
2272
2273 /**
2274  * Save a lock within request object.
2275  *
2276  * Keep the lock referenced until whether client ACK or transaction
2277  * commit happens or release the lock immediately depending on input
2278  * parameters. If COS is ON, a write lock is converted to COS lock
2279  * before saving.
2280  *
2281  * \param info thead info object
2282  * \param h lock handle
2283  * \param mode lock mode
2284  * \param decref force immediate lock releasing
2285  */
2286 static
2287 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2288                    ldlm_mode_t mode, int decref)
2289 {
2290         ENTRY;
2291
2292         if (lustre_handle_is_used(h)) {
2293                 if (decref || !info->mti_has_trans ||
2294                     !(mode & (LCK_PW | LCK_EX))){
2295                         mdt_fid_unlock(h, mode);
2296                 } else {
2297                         struct mdt_device *mdt = info->mti_mdt;
2298                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2299                         struct ptlrpc_request *req = mdt_info_req(info);
2300                         int no_ack = 0;
2301
2302                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2303                                  h->cookie);
2304                         CDEBUG(D_HA, "request = %p reply state = %p"
2305                                " transno = "LPD64"\n",
2306                                req, req->rq_reply_state, req->rq_transno);
2307                         if (mdt_cos_is_enabled(mdt)) {
2308                                 no_ack = 1;
2309                                 ldlm_lock_downgrade(lock, LCK_COS);
2310                                 mode = LCK_COS;
2311                         }
2312                         ptlrpc_save_lock(req, h, mode, no_ack);
2313                         if (mdt_is_lock_sync(lock)) {
2314                                 CDEBUG(D_HA, "found sync-lock,"
2315                                        " async commit started\n");
2316                                 mdt_device_commit_async(info->mti_env,
2317                                                         mdt);
2318                         }
2319                         LDLM_LOCK_PUT(lock);
2320                 }
2321                 h->cookie = 0ull;
2322         }
2323
2324         EXIT;
2325 }
2326
2327 /**
2328  * Unlock mdt object.
2329  *
2330  * Immeditely release the regular lock and the PDO lock or save the
2331  * lock in reqeuest and keep them referenced until client ACK or
2332  * transaction commit.
2333  *
2334  * \param info thread info object
2335  * \param o mdt object
2336  * \param lh mdt lock handle referencing regular and PDO locks
2337  * \param decref force immediate lock releasing
2338  */
2339 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2340                        struct mdt_lock_handle *lh, int decref)
2341 {
2342         ENTRY;
2343
2344         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2345         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2346
2347         EXIT;
2348 }
2349
2350 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2351                                         const struct lu_fid *f,
2352                                         struct mdt_lock_handle *lh,
2353                                         __u64 ibits)
2354 {
2355         struct mdt_object *o;
2356
2357         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2358         if (!IS_ERR(o)) {
2359                 int rc;
2360
2361                 rc = mdt_object_lock(info, o, lh, ibits,
2362                                      MDT_LOCAL_LOCK);
2363                 if (rc != 0) {
2364                         mdt_object_put(info->mti_env, o);
2365                         o = ERR_PTR(rc);
2366                 }
2367         }
2368         return o;
2369 }
2370
2371 void mdt_object_unlock_put(struct mdt_thread_info * info,
2372                            struct mdt_object * o,
2373                            struct mdt_lock_handle *lh,
2374                            int decref)
2375 {
2376         mdt_object_unlock(info, o, lh, decref);
2377         mdt_object_put(info->mti_env, o);
2378 }
2379
2380 static struct mdt_handler *mdt_handler_find(__u32 opc,
2381                                             struct mdt_opc_slice *supported)
2382 {
2383         struct mdt_opc_slice *s;
2384         struct mdt_handler   *h;
2385
2386         h = NULL;
2387         for (s = supported; s->mos_hs != NULL; s++) {
2388                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2389                         h = s->mos_hs + (opc - s->mos_opc_start);
2390                         if (likely(h->mh_opc != 0))
2391                                 LASSERTF(h->mh_opc == opc,
2392                                          "opcode mismatch %d != %d\n",
2393                                          h->mh_opc, opc);
2394                         else
2395                                 h = NULL; /* unsupported opc */
2396                         break;
2397                 }
2398         }
2399         return h;
2400 }
2401
2402 static int mdt_lock_resname_compat(struct mdt_device *m,
2403                                    struct ldlm_request *req)
2404 {
2405         /* XXX something... later. */
2406         return 0;
2407 }
2408
2409 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2410 {
2411         /* XXX something... later. */
2412         return 0;
2413 }
2414
2415 /*
2416  * Generic code handling requests that have struct mdt_body passed in:
2417  *
2418  *  - extract mdt_body from request and save it in @info, if present;
2419  *
2420  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2421  *  @info;
2422  *
2423  *  - if HABEO_CORPUS flag is set for this request type check whether object
2424  *  actually exists on storage (lu_object_exists()).
2425  *
2426  */
2427 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2428 {
2429         const struct mdt_body    *body;
2430         struct mdt_object        *obj;
2431         const struct lu_env      *env;
2432         struct req_capsule       *pill;
2433         int                       rc;
2434         ENTRY;
2435
2436         env = info->mti_env;
2437         pill = info->mti_pill;
2438
2439         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2440         if (body == NULL)
2441                 RETURN(-EFAULT);
2442
2443         if (!(body->valid & OBD_MD_FLID))
2444                 RETURN(0);
2445
2446         if (!fid_is_sane(&body->fid1)) {
2447                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2448                 RETURN(-EINVAL);
2449         }
2450
2451         /*
2452          * Do not get size or any capa fields before we check that request
2453          * contains capa actually. There are some requests which do not, for
2454          * instance MDS_IS_SUBDIR.
2455          */
2456         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2457             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2458                 mdt_set_capainfo(info, 0, &body->fid1,
2459                                  req_capsule_client_get(pill, &RMF_CAPA1));
2460
2461         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2462         if (!IS_ERR(obj)) {
2463                 if ((flags & HABEO_CORPUS) &&
2464                     !mdt_object_exists(obj)) {
2465                         mdt_object_put(env, obj);
2466                         /* for capability renew ENOENT will be handled in
2467                          * mdt_renew_capa */
2468                         if (body->valid & OBD_MD_FLOSSCAPA)
2469                                 rc = 0;
2470                         else
2471                                 rc = -ENOENT;
2472                 } else {
2473                         info->mti_object = obj;
2474                         rc = 0;
2475                 }
2476         } else
2477                 rc = PTR_ERR(obj);
2478
2479         RETURN(rc);
2480 }
2481
2482 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2483 {
2484         struct req_capsule *pill = info->mti_pill;
2485         int rc;
2486         ENTRY;
2487
2488         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2489                 rc = mdt_body_unpack(info, flags);
2490         else
2491                 rc = 0;
2492
2493         if (rc == 0 && (flags & HABEO_REFERO)) {
2494                 struct mdt_device *mdt = info->mti_mdt;
2495
2496                 /* Pack reply. */
2497
2498                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2499                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2500                                              mdt->mdt_max_mdsize);
2501                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2502                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2503                                              mdt->mdt_max_cookiesize);
2504
2505                 rc = req_capsule_server_pack(pill);
2506         }
2507         RETURN(rc);
2508 }
2509
2510 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2511 {
2512         struct md_device *next = m->mdt_child;
2513
2514         return next->md_ops->mdo_init_capa_ctxt(env, next,
2515                                                 m->mdt_opts.mo_mds_capa,
2516                                                 m->mdt_capa_timeout,
2517                                                 m->mdt_capa_alg,
2518                                                 m->mdt_capa_keys);
2519 }
2520
2521 /*
2522  * Invoke handler for this request opc. Also do necessary preprocessing
2523  * (according to handler ->mh_flags), and post-processing (setting of
2524  * ->last_{xid,committed}).
2525  */
2526 static int mdt_req_handle(struct mdt_thread_info *info,
2527                           struct mdt_handler *h, struct ptlrpc_request *req)
2528 {
2529         int   rc, serious = 0;
2530         __u32 flags;
2531
2532         ENTRY;
2533
2534         LASSERT(h->mh_act != NULL);
2535         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2536         LASSERT(current->journal_info == NULL);
2537
2538         /*
2539          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2540          * to put same checks into handlers like mdt_close(), mdt_reint(),
2541          * etc., without talking to mdt authors first. Checking same thing
2542          * there again is useless and returning 0 error without packing reply
2543          * is buggy! Handlers either pack reply or return error.
2544          *
2545          * We return 0 here and do not send any reply in order to emulate
2546          * network failure. Do not send any reply in case any of NET related
2547          * fail_id has occured.
2548          */
2549         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2550                 RETURN(0);
2551
2552         rc = 0;
2553         flags = h->mh_flags;
2554         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2555
2556         if (h->mh_fmt != NULL) {
2557                 req_capsule_set(info->mti_pill, h->mh_fmt);
2558                 rc = mdt_unpack_req_pack_rep(info, flags);
2559         }
2560
2561         if (rc == 0 && flags & MUTABOR &&
2562             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2563                 /* should it be rq_status? */
2564                 rc = -EROFS;
2565
2566         if (rc == 0 && flags & HABEO_CLAVIS) {
2567                 struct ldlm_request *dlm_req;
2568
2569                 LASSERT(h->mh_fmt != NULL);
2570
2571                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2572                 if (dlm_req != NULL) {
2573                         if (info->mti_mdt->mdt_opts.mo_compat_resname)
2574                                 rc = mdt_lock_resname_compat(info->mti_mdt,
2575                                                              dlm_req);
2576                         info->mti_dlm_req = dlm_req;
2577                 } else {
2578                         rc = -EFAULT;
2579                 }
2580         }
2581
2582         /* capability setting changed via /proc, needs reinitialize ctxt */
2583         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2584                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2585                 info->mti_mdt->mdt_capa_conf = 0;
2586         }
2587
2588         if (likely(rc == 0)) {
2589                 /*
2590                  * Process request, there can be two types of rc:
2591                  * 1) errors with msg unpack/pack, other failures outside the
2592                  * operation itself. This is counted as serious errors;
2593                  * 2) errors during fs operation, should be placed in rq_status
2594                  * only
2595                  */
2596                 rc = h->mh_act(info);
2597                 if (rc == 0 &&
2598                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2599                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2600                                   "pack reply and returned 0 error\n",
2601                                   h->mh_name);
2602                         LBUG();
2603                 }
2604                 serious = is_serious(rc);
2605                 rc = clear_serious(rc);
2606         } else
2607                 serious = 1;
2608
2609         req->rq_status = rc;
2610
2611         /*
2612          * ELDLM_* codes which > 0 should be in rq_status only as well as
2613          * all non-serious errors.
2614          */
2615         if (rc > 0 || !serious)
2616                 rc = 0;
2617
2618         LASSERT(current->journal_info == NULL);
2619
2620         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2621             info->mti_mdt->mdt_opts.mo_compat_resname) {
2622                 struct ldlm_reply *dlmrep;
2623
2624                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2625                 if (dlmrep != NULL)
2626                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2627         }
2628
2629         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2630         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
2631                 target_committed_to_req(req);
2632
2633         if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
2634                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2635                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2636                 LBUG();
2637         }
2638
2639         target_send_reply(req, rc, info->mti_fail_id);
2640         RETURN(0);
2641 }
2642
2643 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2644 {
2645         lh->mlh_type = MDT_NUL_LOCK;
2646         lh->mlh_reg_lh.cookie = 0ull;
2647         lh->mlh_reg_mode = LCK_MINMODE;
2648         lh->mlh_pdo_lh.cookie = 0ull;
2649         lh->mlh_pdo_mode = LCK_MINMODE;
2650 }
2651
2652 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2653 {
2654         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2655         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2656 }
2657
2658 /*
2659  * Initialize fields of struct mdt_thread_info. Other fields are left in
2660  * uninitialized state, because it's too expensive to zero out whole
2661  * mdt_thread_info (> 1K) on each request arrival.
2662  */
2663 static void mdt_thread_info_init(struct ptlrpc_request *req,
2664                                  struct mdt_thread_info *info)
2665 {
2666         int i;
2667         struct md_capainfo *ci;
2668
2669         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2670         info->mti_pill = &req->rq_pill;
2671
2672         /* lock handle */
2673         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2674                 mdt_lock_handle_init(&info->mti_lh[i]);
2675
2676         /* mdt device: it can be NULL while CONNECT */
2677         if (req->rq_export) {
2678                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2679                 info->mti_exp = req->rq_export;
2680         } else
2681                 info->mti_mdt = NULL;
2682         info->mti_env = req->rq_svc_thread->t_env;
2683         ci = md_capainfo(info->mti_env);
2684         memset(ci, 0, sizeof *ci);
2685         if (req->rq_export) {
2686                 if (exp_connect_rmtclient(req->rq_export))
2687                         ci->mc_auth = LC_ID_CONVERT;
2688                 else if (req->rq_export->exp_connect_flags &
2689                          OBD_CONNECT_MDS_CAPA)
2690                         ci->mc_auth = LC_ID_PLAIN;
2691                 else
2692                         ci->mc_auth = LC_ID_NONE;
2693         }
2694
2695         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2696         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2697         info->mti_mos[0] = NULL;
2698         info->mti_mos[1] = NULL;
2699         info->mti_mos[2] = NULL;
2700         info->mti_mos[3] = NULL;
2701
2702         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2703         info->mti_body = NULL;
2704         info->mti_object = NULL;
2705         info->mti_dlm_req = NULL;
2706         info->mti_has_trans = 0;
2707         info->mti_no_need_trans = 0;
2708         info->mti_cross_ref = 0;
2709         info->mti_opdata = 0;
2710
2711         /* To not check for split by default. */
2712         info->mti_spec.sp_ck_split = 0;
2713         info->mti_spec.no_create = 0;
2714 }
2715
2716 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2717 {
2718         int i;
2719
2720         req_capsule_fini(info->mti_pill);
2721         if (info->mti_object != NULL) {
2722                 /*
2723                  * freeing an object may lead to OSD level transaction, do not
2724                  * let it mess with MDT. bz19385.
2725                  */
2726                 info->mti_no_need_trans = 1;
2727                 mdt_object_put(info->mti_env, info->mti_object);
2728                 info->mti_object = NULL;
2729         }
2730         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2731                 mdt_lock_handle_fini(&info->mti_lh[i]);
2732         info->mti_env = NULL;
2733 }
2734
2735 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
2736                                        struct obd_device *obd, int *process)
2737 {
2738         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2739         case MDS_CONNECT: /* This will never get here, but for completeness. */
2740         case OST_CONNECT: /* This will never get here, but for completeness. */
2741         case MDS_DISCONNECT:
2742         case OST_DISCONNECT:
2743                *process = 1;
2744                RETURN(0);
2745
2746         case MDS_CLOSE:
2747         case MDS_DONE_WRITING:
2748         case MDS_SYNC: /* used in unmounting */
2749         case OBD_PING:
2750         case MDS_REINT:
2751         case SEQ_QUERY:
2752         case FLD_QUERY:
2753         case LDLM_ENQUEUE:
2754                 *process = target_queue_recovery_request(req, obd);
2755                 RETURN(0);
2756
2757         default:
2758                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2759                 *process = -EAGAIN;
2760                 RETURN(0);
2761         }
2762 }
2763
2764 /*
2765  * Handle recovery. Return:
2766  *        +1: continue request processing;
2767  *       -ve: abort immediately with the given error code;
2768  *         0: send reply with error code in req->rq_status;
2769  */
2770 static int mdt_recovery(struct mdt_thread_info *info)
2771 {
2772         struct ptlrpc_request *req = mdt_info_req(info);
2773         struct obd_device *obd;
2774
2775         ENTRY;
2776
2777         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2778         case MDS_CONNECT:
2779         case SEC_CTX_INIT:
2780         case SEC_CTX_INIT_CONT:
2781         case SEC_CTX_FINI:
2782                 {
2783 #if 0
2784                         int rc;
2785
2786                         rc = mdt_handle_idmap(info);
2787                         if (rc)
2788                                 RETURN(rc);
2789                         else
2790 #endif
2791                                 RETURN(+1);
2792                 }
2793         }
2794
2795         if (unlikely(!class_connected_export(req->rq_export))) {
2796                 CERROR("operation %d on unconnected MDS from %s\n",
2797                        lustre_msg_get_opc(req->rq_reqmsg),
2798                        libcfs_id2str(req->rq_peer));
2799                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2800                  * mds_A will get -ENOTCONN(especially for ping req),
2801                  * which will cause that mds_A deactive timeout, then when
2802                  * mds_A cleanup, the cleanup process will be suspended since
2803                  * deactive timeout is not zero.
2804                  */
2805                 req->rq_status = -ENOTCONN;
2806                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2807                 RETURN(0);
2808         }
2809
2810         /* sanity check: if the xid matches, the request must be marked as a
2811          * resent or replayed */
2812         if (req_xid_is_last(req)) {
2813                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2814                       (MSG_RESENT | MSG_REPLAY))) {
2815                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2816                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2817                                   lustre_msg_get_flags(req->rq_reqmsg));
2818                         LBUG();
2819                         req->rq_status = -ENOTCONN;
2820                         RETURN(-ENOTCONN);
2821                 }
2822         }
2823
2824         /* else: note the opposite is not always true; a RESENT req after a
2825          * failover will usually not match the last_xid, since it was likely
2826          * never committed. A REPLAYed request will almost never match the
2827          * last xid, however it could for a committed, but still retained,
2828          * open. */
2829
2830         obd = req->rq_export->exp_obd;
2831
2832         /* Check for aborted recovery... */
2833         if (unlikely(obd->obd_recovering)) {
2834                 int rc;
2835                 int should_process;
2836                 DEBUG_REQ(D_INFO, req, "Got new replay");
2837                 rc = mdt_filter_recovery_request(req, obd, &should_process);
2838                 if (rc != 0 || !should_process)
2839                         RETURN(rc);
2840                 else if (should_process < 0) {
2841                         req->rq_status = should_process;
2842                         rc = ptlrpc_error(req);
2843                         RETURN(rc);
2844                 }
2845         }
2846         RETURN(+1);
2847 }
2848
2849 static int mdt_msg_check_version(struct lustre_msg *msg)
2850 {
2851         int rc;
2852
2853         switch (lustre_msg_get_opc(msg)) {
2854         case MDS_CONNECT:
2855         case MDS_DISCONNECT:
2856         case OBD_PING:
2857         case SEC_CTX_INIT:
2858         case SEC_CTX_INIT_CONT:
2859         case SEC_CTX_FINI:
2860                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2861                 if (rc)
2862                         CERROR("bad opc %u version %08x, expecting %08x\n",
2863                                lustre_msg_get_opc(msg),
2864                                lustre_msg_get_version(msg),
2865                                LUSTRE_OBD_VERSION);
2866                 break;
2867         case MDS_GETSTATUS:
2868         case MDS_GETATTR:
2869         case MDS_GETATTR_NAME:
2870         case MDS_STATFS:
2871         case MDS_READPAGE:
2872         case MDS_WRITEPAGE:
2873         case MDS_IS_SUBDIR:
2874         case MDS_REINT:
2875         case MDS_CLOSE:
2876         case MDS_DONE_WRITING:
2877         case MDS_PIN:
2878         case MDS_SYNC:
2879         case MDS_GETXATTR:
2880         case MDS_SETXATTR:
2881         case MDS_SET_INFO:
2882         case MDS_GET_INFO:
2883         case MDS_QUOTACHECK:
2884         case MDS_QUOTACTL:
2885         case QUOTA_DQACQ:
2886         case QUOTA_DQREL:
2887         case SEQ_QUERY:
2888         case FLD_QUERY:
2889                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2890                 if (rc)
2891                         CERROR("bad opc %u version %08x, expecting %08x\n",
2892                                lustre_msg_get_opc(msg),
2893                                lustre_msg_get_version(msg),
2894                                LUSTRE_MDS_VERSION);
2895                 break;
2896         case LDLM_ENQUEUE:
2897         case LDLM_CONVERT:
2898         case LDLM_BL_CALLBACK:
2899         case LDLM_CP_CALLBACK:
2900                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2901                 if (rc)
2902                         CERROR("bad opc %u version %08x, expecting %08x\n",
2903                                lustre_msg_get_opc(msg),
2904                                lustre_msg_get_version(msg),
2905                                LUSTRE_DLM_VERSION);
2906                 break;
2907         case OBD_LOG_CANCEL:
2908         case LLOG_ORIGIN_HANDLE_CREATE:
2909         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2910         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2911         case LLOG_ORIGIN_HANDLE_CLOSE:
2912         case LLOG_ORIGIN_HANDLE_DESTROY:
2913         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2914         case LLOG_CATINFO:
2915                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2916                 if (rc)
2917                         CERROR("bad opc %u version %08x, expecting %08x\n",
2918                                lustre_msg_get_opc(msg),
2919                                lustre_msg_get_version(msg),
2920                                LUSTRE_LOG_VERSION);
2921                 break;
2922         default:
2923                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
2924                 rc = -ENOTSUPP;
2925         }
2926         return rc;
2927 }
2928
2929 static int mdt_handle0(struct ptlrpc_request *req,
2930                        struct mdt_thread_info *info,
2931                        struct mdt_opc_slice *supported)
2932 {
2933         struct mdt_handler *h;
2934         struct lustre_msg  *msg;
2935         int                 rc;
2936
2937         ENTRY;
2938
2939         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2940                 RETURN(0);
2941
2942         LASSERT(current->journal_info == NULL);
2943
2944         msg = req->rq_reqmsg;
2945         rc = mdt_msg_check_version(msg);
2946         if (likely(rc == 0)) {
2947                 rc = mdt_recovery(info);
2948                 if (likely(rc == +1)) {
2949                         h = mdt_handler_find(lustre_msg_get_opc(msg),
2950                                              supported);
2951                         if (likely(h != NULL)) {
2952                                 rc = mdt_req_handle(info, h, req);
2953                         } else {
2954                                 CERROR("The unsupported opc: 0x%x\n",
2955                                        lustre_msg_get_opc(msg) );
2956                                 req->rq_status = -ENOTSUPP;
2957                                 rc = ptlrpc_error(req);
2958                                 RETURN(rc);
2959                         }
2960                 }
2961         } else
2962                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2963         RETURN(rc);
2964 }
2965
2966 /*
2967  * MDT handler function called by ptlrpc service thread when request comes.
2968  *
2969  * XXX common "target" functionality should be factored into separate module
2970  * shared by mdt, ost and stand-alone services like fld.
2971  */
2972 static int mdt_handle_common(struct ptlrpc_request *req,
2973                              struct mdt_opc_slice *supported)
2974 {
2975         struct lu_env          *env;
2976         struct mdt_thread_info *info;
2977         int                     rc;
2978         ENTRY;
2979
2980         env = req->rq_svc_thread->t_env;
2981         LASSERT(env != NULL);
2982         LASSERT(env->le_ses != NULL);
2983         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2984         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2985         LASSERT(info != NULL);
2986
2987         mdt_thread_info_init(req, info);
2988
2989         rc = mdt_handle0(req, info, supported);
2990
2991         mdt_thread_info_fini(info);
2992         RETURN(rc);
2993 }
2994
2995 /*
2996  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
2997  * as well.
2998  */
2999 int mdt_recovery_handle(struct ptlrpc_request *req)
3000 {
3001         int rc;
3002         ENTRY;
3003
3004         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3005         case FLD_QUERY:
3006                 rc = mdt_handle_common(req, mdt_fld_handlers);
3007                 break;
3008         case SEQ_QUERY:
3009                 rc = mdt_handle_common(req, mdt_seq_handlers);
3010                 break;
3011         default:
3012                 rc = mdt_handle_common(req, mdt_regular_handlers);
3013                 break;
3014         }
3015
3016         RETURN(rc);
3017 }
3018
3019 static int mdt_regular_handle(struct ptlrpc_request *req)
3020 {
3021         return mdt_handle_common(req, mdt_regular_handlers);
3022 }
3023
3024 static int mdt_readpage_handle(struct ptlrpc_request *req)
3025 {
3026         return mdt_handle_common(req, mdt_readpage_handlers);
3027 }
3028
3029 static int mdt_xmds_handle(struct ptlrpc_request *req)
3030 {
3031         return mdt_handle_common(req, mdt_xmds_handlers);
3032 }
3033
3034 static int mdt_mdsc_handle(struct ptlrpc_request *req)
3035 {
3036         return mdt_handle_common(req, mdt_seq_handlers);
3037 }
3038
3039 static int mdt_mdss_handle(struct ptlrpc_request *req)
3040 {
3041         return mdt_handle_common(req, mdt_seq_handlers);
3042 }
3043
3044 static int mdt_dtss_handle(struct ptlrpc_request *req)
3045 {
3046         return mdt_handle_common(req, mdt_seq_handlers);
3047 }
3048
3049 static int mdt_fld_handle(struct ptlrpc_request *req)
3050 {
3051         return mdt_handle_common(req, mdt_fld_handlers);
3052 }
3053
3054 enum mdt_it_code {
3055         MDT_IT_OPEN,
3056         MDT_IT_OCREAT,
3057         MDT_IT_CREATE,
3058         MDT_IT_GETATTR,
3059         MDT_IT_READDIR,
3060         MDT_IT_LOOKUP,
3061         MDT_IT_UNLINK,
3062         MDT_IT_TRUNC,
3063         MDT_IT_GETXATTR,
3064         MDT_IT_NR
3065 };
3066
3067 static int mdt_intent_getattr(enum mdt_it_code opcode,
3068                               struct mdt_thread_info *info,
3069                               struct ldlm_lock **,
3070                               int);
3071 static int mdt_intent_reint(enum mdt_it_code opcode,
3072                             struct mdt_thread_info *info,
3073                             struct ldlm_lock **,
3074                             int);
3075
3076 static struct mdt_it_flavor {
3077         const struct req_format *it_fmt;
3078         __u32                    it_flags;
3079         int                    (*it_act)(enum mdt_it_code ,
3080                                          struct mdt_thread_info *,
3081                                          struct ldlm_lock **,
3082                                          int);
3083         long                     it_reint;
3084 } mdt_it_flavor[] = {
3085         [MDT_IT_OPEN]     = {
3086                 .it_fmt   = &RQF_LDLM_INTENT,
3087                 /*.it_flags = HABEO_REFERO,*/
3088                 .it_flags = 0,
3089                 .it_act   = mdt_intent_reint,
3090                 .it_reint = REINT_OPEN
3091         },
3092         [MDT_IT_OCREAT]   = {
3093                 .it_fmt   = &RQF_LDLM_INTENT,
3094                 .it_flags = MUTABOR,
3095                 .it_act   = mdt_intent_reint,
3096                 .it_reint = REINT_OPEN
3097         },
3098         [MDT_IT_CREATE]   = {
3099                 .it_fmt   = &RQF_LDLM_INTENT,
3100                 .it_flags = MUTABOR,
3101                 .it_act   = mdt_intent_reint,
3102                 .it_reint = REINT_CREATE
3103         },
3104         [MDT_IT_GETATTR]  = {
3105                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3106                 .it_flags = HABEO_REFERO,
3107                 .it_act   = mdt_intent_getattr
3108         },
3109         [MDT_IT_READDIR]  = {
3110                 .it_fmt   = NULL,
3111                 .it_flags = 0,
3112                 .it_act   = NULL
3113         },
3114         [MDT_IT_LOOKUP]   = {
3115                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3116                 .it_flags = HABEO_REFERO,
3117                 .it_act   = mdt_intent_getattr
3118         },
3119         [MDT_IT_UNLINK]   = {
3120                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3121                 .it_flags = MUTABOR,
3122                 .it_act   = NULL,
3123                 .it_reint = REINT_UNLINK
3124         },
3125         [MDT_IT_TRUNC]    = {
3126                 .it_fmt   = NULL,
3127                 .it_flags = MUTABOR,
3128                 .it_act   = NULL
3129         },
3130         [MDT_IT_GETXATTR] = {
3131                 .it_fmt   = NULL,
3132                 .it_flags = 0,
3133                 .it_act   = NULL
3134         }
3135 };
3136
3137 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3138                             struct ldlm_lock **lockp,
3139                             struct ldlm_lock *new_lock,
3140                             struct mdt_lock_handle *lh,
3141                             int flags)
3142 {
3143         struct ptlrpc_request  *req = mdt_info_req(info);
3144         struct ldlm_lock       *lock = *lockp;
3145
3146         /*
3147          * Get new lock only for cases when possible resent did not find any
3148          * lock.
3149          */
3150         if (new_lock == NULL)
3151                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3152
3153         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3154                 lh->mlh_reg_lh.cookie = 0;
3155                 RETURN(0);
3156         }
3157
3158         LASSERTF(new_lock != NULL,
3159                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3160
3161         /*
3162          * If we've already given this lock to a client once, then we should
3163          * have no readers or writers.  Otherwise, we should have one reader
3164          * _or_ writer ref (which will be zeroed below) before returning the
3165          * lock to a client.
3166          */
3167         if (new_lock->l_export == req->rq_export) {
3168                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3169         } else {
3170                 LASSERT(new_lock->l_export == NULL);
3171                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3172         }
3173
3174         *lockp = new_lock;
3175
3176         if (new_lock->l_export == req->rq_export) {
3177                 /*
3178                  * Already gave this to the client, which means that we
3179                  * reconstructed a reply.
3180                  */
3181                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3182                         MSG_RESENT);
3183                 lh->mlh_reg_lh.cookie = 0;
3184                 RETURN(ELDLM_LOCK_REPLACED);
3185         }
3186
3187         /*
3188          * Fixup the lock to be given to the client.
3189          */
3190         lock_res_and_lock(new_lock);
3191         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3192          * possible blocking AST. */
3193         while (new_lock->l_readers > 0) {
3194                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3195                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3196                 new_lock->l_readers--;
3197         }
3198         while (new_lock->l_writers > 0) {
3199                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3200                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3201                 new_lock->l_writers--;
3202         }
3203
3204         new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3205         new_lock->l_blocking_ast = lock->l_blocking_ast;
3206         new_lock->l_completion_ast = lock->l_completion_ast;
3207         new_lock->l_remote_handle = lock->l_remote_handle;
3208         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3209
3210         unlock_res_and_lock(new_lock);
3211
3212         cfs_hash_add(new_lock->l_export->exp_lock_hash,
3213                      &new_lock->l_remote_handle,
3214                      &new_lock->l_exp_hash);
3215
3216         LDLM_LOCK_RELEASE(new_lock);
3217         lh->mlh_reg_lh.cookie = 0;
3218
3219         RETURN(ELDLM_LOCK_REPLACED);
3220 }
3221
3222 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3223                                     struct ldlm_lock *new_lock,
3224                                     struct ldlm_lock **old_lock,
3225                                     struct mdt_lock_handle *lh)
3226 {
3227         struct ptlrpc_request  *req = mdt_info_req(info);
3228         struct obd_export      *exp = req->rq_export;
3229         struct lustre_handle    remote_hdl;
3230         struct ldlm_request    *dlmreq;
3231         struct ldlm_lock       *lock;
3232
3233         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3234                 return;
3235
3236         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3237         remote_hdl = dlmreq->lock_handle[0];
3238
3239         lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3240         if (lock) {
3241                 if (lock != new_lock) {
3242                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3243                         lh->mlh_reg_mode = lock->l_granted_mode;
3244
3245                         LDLM_DEBUG(lock, "Restoring lock cookie");
3246                         DEBUG_REQ(D_DLMTRACE, req,
3247                                   "restoring lock cookie "LPX64,
3248                                   lh->mlh_reg_lh.cookie);
3249                         if (old_lock)
3250                                 *old_lock = LDLM_LOCK_GET(lock);