Whamcloud - gitweb
branch: HEAD
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52 #define DEBUG_SUBSYSTEM S_MDS
53
54 #include <linux/module.h>
55 /*
56  * struct OBD_{ALLOC,FREE}*()
57  */
58 #include <obd_support.h>
59 /* struct ptlrpc_request */
60 #include <lustre_net.h>
61 /* struct obd_export */
62 #include <lustre_export.h>
63 /* struct obd_device */
64 #include <obd.h>
65 /* lu2dt_dev() */
66 #include <dt_object.h>
67 #include <lustre_mds.h>
68 #include <lustre_mdt.h>
69 #include "mdt_internal.h"
70 #ifdef HAVE_QUOTA_SUPPORT
71 # include <lustre_quota.h>
72 #endif
73 #include <lustre_acl.h>
74 #include <lustre_param.h>
75 #include <lustre_fsfilt.h>
76
77 mdl_mode_t mdt_mdl_lock_modes[] = {
78         [LCK_MINMODE] = MDL_MINMODE,
79         [LCK_EX]      = MDL_EX,
80         [LCK_PW]      = MDL_PW,
81         [LCK_PR]      = MDL_PR,
82         [LCK_CW]      = MDL_CW,
83         [LCK_CR]      = MDL_CR,
84         [LCK_NL]      = MDL_NL,
85         [LCK_GROUP]   = MDL_GROUP
86 };
87
88 ldlm_mode_t mdt_dlm_lock_modes[] = {
89         [MDL_MINMODE] = LCK_MINMODE,
90         [MDL_EX]      = LCK_EX,
91         [MDL_PW]      = LCK_PW,
92         [MDL_PR]      = LCK_PR,
93         [MDL_CW]      = LCK_CW,
94         [MDL_CR]      = LCK_CR,
95         [MDL_NL]      = LCK_NL,
96         [MDL_GROUP]   = LCK_GROUP
97 };
98
99 /*
100  * Initialized in mdt_mod_init().
101  */
102 unsigned long mdt_num_threads;
103
104 /* ptlrpc request handler for MDT. All handlers are
105  * grouped into several slices - struct mdt_opc_slice,
106  * and stored in an array - mdt_handlers[].
107  */
108 struct mdt_handler {
109         /* The name of this handler. */
110         const char *mh_name;
111         /* Fail id for this handler, checked at the beginning of this handler*/
112         int         mh_fail_id;
113         /* Operation code for this handler */
114         __u32       mh_opc;
115         /* flags are listed in enum mdt_handler_flags below. */
116         __u32       mh_flags;
117         /* The actual handler function to execute. */
118         int (*mh_act)(struct mdt_thread_info *info);
119         /* Request format for this request. */
120         const struct req_format *mh_fmt;
121 };
122
123 enum mdt_handler_flags {
124         /*
125          * struct mdt_body is passed in the incoming message, and object
126          * identified by this fid exists on disk.
127          *
128          * "habeo corpus" == "I have a body"
129          */
130         HABEO_CORPUS = (1 << 0),
131         /*
132          * struct ldlm_request is passed in the incoming message.
133          *
134          * "habeo clavis" == "I have a key"
135          */
136         HABEO_CLAVIS = (1 << 1),
137         /*
138          * this request has fixed reply format, so that reply message can be
139          * packed by generic code.
140          *
141          * "habeo refero" == "I have a reply"
142          */
143         HABEO_REFERO = (1 << 2),
144         /*
145          * this request will modify something, so check whether the filesystem
146          * is readonly or not, then return -EROFS to client asap if necessary.
147          *
148          * "mutabor" == "I shall modify"
149          */
150         MUTABOR      = (1 << 3)
151 };
152
153 struct mdt_opc_slice {
154         __u32               mos_opc_start;
155         int                 mos_opc_end;
156         struct mdt_handler *mos_hs;
157 };
158
159 static struct mdt_opc_slice mdt_regular_handlers[];
160 static struct mdt_opc_slice mdt_readpage_handlers[];
161 static struct mdt_opc_slice mdt_xmds_handlers[];
162 static struct mdt_opc_slice mdt_seq_handlers[];
163 static struct mdt_opc_slice mdt_fld_handlers[];
164
165 static struct mdt_device *mdt_dev(struct lu_device *d);
166 static int mdt_regular_handle(struct ptlrpc_request *req);
167 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
168 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
169                         struct getinfo_fid2path *fp);
170
171 static const struct lu_object_operations mdt_obj_ops;
172
173 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
174 {
175         if (!rep)
176                 return 0;
177         return (rep->lock_policy_res1 & flag);
178 }
179
180 void mdt_clear_disposition(struct mdt_thread_info *info,
181                            struct ldlm_reply *rep, int flag)
182 {
183         if (info)
184                 info->mti_opdata &= ~flag;
185         if (rep)
186                 rep->lock_policy_res1 &= ~flag;
187 }
188
189 void mdt_set_disposition(struct mdt_thread_info *info,
190                          struct ldlm_reply *rep, int flag)
191 {
192         if (info)
193                 info->mti_opdata |= flag;
194         if (rep)
195                 rep->lock_policy_res1 |= flag;
196 }
197
198 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
199 {
200         lh->mlh_pdo_hash = 0;
201         lh->mlh_reg_mode = lm;
202         lh->mlh_type = MDT_REG_LOCK;
203 }
204
205 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
206                        const char *name, int namelen)
207 {
208         lh->mlh_reg_mode = lm;
209         lh->mlh_type = MDT_PDO_LOCK;
210
211         if (name != NULL) {
212                 LASSERT(namelen > 0);
213                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
214         } else {
215                 LASSERT(namelen == 0);
216                 lh->mlh_pdo_hash = 0ull;
217         }
218 }
219
220 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
221                               struct mdt_lock_handle *lh)
222 {
223         mdl_mode_t mode;
224         ENTRY;
225
226         /*
227          * Any dir access needs couple of locks:
228          *
229          * 1) on part of dir we gonna take lookup/modify;
230          *
231          * 2) on whole dir to protect it from concurrent splitting and/or to
232          * flush client's cache for readdir().
233          *
234          * so, for a given mode and object this routine decides what lock mode
235          * to use for lock #2:
236          *
237          * 1) if caller's gonna lookup in dir then we need to protect dir from
238          * being splitted only - LCK_CR
239          *
240          * 2) if caller's gonna modify dir then we need to protect dir from
241          * being splitted and to flush cache - LCK_CW
242          *
243          * 3) if caller's gonna modify dir and that dir seems ready for
244          * splitting then we need to protect it from any type of access
245          * (lookup/modify/split) - LCK_EX --bzzz
246          */
247
248         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
249         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
250
251         /*
252          * Ask underlaying level its opinion about preferable PDO lock mode
253          * having access type passed as regular lock mode:
254          *
255          * - MDL_MINMODE means that lower layer does not want to specify lock
256          * mode;
257          *
258          * - MDL_NL means that no PDO lock should be taken. This is used in some
259          * cases. Say, for non-splittable directories no need to use PDO locks
260          * at all.
261          */
262         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
263                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
264
265         if (mode != MDL_MINMODE) {
266                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
267         } else {
268                 /*
269                  * Lower layer does not want to specify locking mode. We do it
270                  * our selves. No special protection is needed, just flush
271                  * client's cache on modification and allow concurrent
272                  * mondification.
273                  */
274                 switch (lh->mlh_reg_mode) {
275                 case LCK_EX:
276                         lh->mlh_pdo_mode = LCK_EX;
277                         break;
278                 case LCK_PR:
279                         lh->mlh_pdo_mode = LCK_CR;
280                         break;
281                 case LCK_PW:
282                         lh->mlh_pdo_mode = LCK_CW;
283                         break;
284                 default:
285                         CERROR("Not expected lock type (0x%x)\n",
286                                (int)lh->mlh_reg_mode);
287                         LBUG();
288                 }
289         }
290
291         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
292         EXIT;
293 }
294
295 static int mdt_getstatus(struct mdt_thread_info *info)
296 {
297         struct mdt_device *mdt  = info->mti_mdt;
298         struct md_device  *next = mdt->mdt_child;
299         struct mdt_body   *repbody;
300         int                rc;
301
302         ENTRY;
303
304         rc = mdt_check_ucred(info);
305         if (rc)
306                 RETURN(err_serious(rc));
307
308         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
309                 RETURN(err_serious(-ENOMEM));
310
311         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
312         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
313         if (rc != 0)
314                 RETURN(rc);
315
316         repbody->valid |= OBD_MD_FLID;
317
318         if (mdt->mdt_opts.mo_mds_capa &&
319             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
320                 struct mdt_object  *root;
321                 struct lustre_capa *capa;
322
323                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
324                 if (IS_ERR(root))
325                         RETURN(PTR_ERR(root));
326
327                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
328                 LASSERT(capa);
329                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
330                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
331                                  0);
332                 mdt_object_put(info->mti_env, root);
333                 if (rc == 0)
334                         repbody->valid |= OBD_MD_FLMDSCAPA;
335         }
336
337         RETURN(rc);
338 }
339
340 static int mdt_statfs(struct mdt_thread_info *info)
341 {
342         struct md_device      *next  = info->mti_mdt->mdt_child;
343         struct ptlrpc_service *svc;
344         struct obd_statfs     *osfs;
345         int                    rc;
346
347         ENTRY;
348
349         svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
350
351         /* This will trigger a watchdog timeout */
352         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
353                          (MDT_SERVICE_WATCHDOG_FACTOR *
354                           at_get(&svc->srv_at_estimate)) + 1);
355
356         rc = mdt_check_ucred(info);
357         if (rc)
358                 RETURN(err_serious(rc));
359
360         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
361                 rc = err_serious(-ENOMEM);
362         } else {
363                 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
364                 rc = next->md_ops->mdo_statfs(info->mti_env, next,
365                                               &info->mti_u.ksfs);
366                 statfs_pack(osfs, &info->mti_u.ksfs);
367         }
368         RETURN(rc);
369 }
370
371 void mdt_pack_size2body(struct mdt_thread_info *info, struct mdt_object *o)
372 {
373         struct mdt_body *b;
374         struct lu_attr *attr = &info->mti_attr.ma_attr;
375
376         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
377
378         /* Check if Size-on-MDS is enabled. */
379         if ((mdt_conn_flags(info) & OBD_CONNECT_SOM) &&
380             S_ISREG(attr->la_mode) && mdt_sizeonmds_enabled(o)) {
381                 b->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
382                 b->size = attr->la_size;
383                 b->blocks = attr->la_blocks;
384         }
385 }
386
387 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
388                         const struct lu_attr *attr, const struct lu_fid *fid)
389 {
390         /*XXX should pack the reply body according to lu_valid*/
391         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
392                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
393                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
394                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
395
396         if (!S_ISREG(attr->la_mode))
397                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
398
399         b->atime      = attr->la_atime;
400         b->mtime      = attr->la_mtime;
401         b->ctime      = attr->la_ctime;
402         b->mode       = attr->la_mode;
403         b->size       = attr->la_size;
404         b->blocks     = attr->la_blocks;
405         b->uid        = attr->la_uid;
406         b->gid        = attr->la_gid;
407         b->flags      = attr->la_flags;
408         b->nlink      = attr->la_nlink;
409         b->rdev       = attr->la_rdev;
410
411         if (fid) {
412                 b->fid1 = *fid;
413                 b->valid |= OBD_MD_FLID;
414
415                 /* FIXME: these should be fixed when new igif ready.*/
416                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
417                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
418                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
419
420                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
421                                 PFID(fid), b->nlink, b->mode, b->size);
422         }
423
424         if (info)
425                 mdt_body_reverse_idmap(info, b);
426 }
427
428 static inline int mdt_body_has_lov(const struct lu_attr *la,
429                                    const struct mdt_body *body)
430 {
431         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
432                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
433 }
434
435 static int mdt_getattr_internal(struct mdt_thread_info *info,
436                                 struct mdt_object *o)
437 {
438         struct md_object        *next = mdt_object_child(o);
439         const struct mdt_body   *reqbody = info->mti_body;
440         struct ptlrpc_request   *req = mdt_info_req(info);
441         struct md_attr          *ma = &info->mti_attr;
442         struct lu_attr          *la = &ma->ma_attr;
443         struct req_capsule      *pill = info->mti_pill;
444         const struct lu_env     *env = info->mti_env;
445         struct mdt_body         *repbody;
446         struct lu_buf           *buffer = &info->mti_buf;
447         int                     rc;
448         ENTRY;
449
450         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
451                 RETURN(err_serious(-ENOMEM));
452
453         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
454
455         ma->ma_valid = 0;
456
457         rc = mdt_object_exists(o);
458         if (rc < 0) {
459                 /* This object is located on remote node.*/
460                 repbody->fid1 = *mdt_object_fid(o);
461                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
462                 RETURN(0);
463         }
464
465         buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
466         buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
467
468         /* If it is dir object and client require MEA, then we got MEA */
469         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
470             reqbody->valid & OBD_MD_MEA) {
471                 /* Assumption: MDT_MD size is enough for lmv size. */
472                 ma->ma_lmv = buffer->lb_buf;
473                 ma->ma_lmv_size = buffer->lb_len;
474                 ma->ma_need = MA_LMV | MA_INODE;
475         } else {
476                 ma->ma_lmm = buffer->lb_buf;
477                 ma->ma_lmm_size = buffer->lb_len;
478                 ma->ma_need = MA_LOV | MA_INODE;
479         }
480
481         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
482             reqbody->valid & OBD_MD_FLDIREA  &&
483             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
484                 /* get default stripe info for this dir. */
485                 ma->ma_need |= MA_LOV_DEF;
486         }
487         rc = mo_attr_get(env, next, ma);
488         if (unlikely(rc)) {
489                 CERROR("getattr error for "DFID": %d\n",
490                         PFID(mdt_object_fid(o)), rc);
491                 RETURN(rc);
492         }
493
494         if (likely(ma->ma_valid & MA_INODE))
495                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
496         else
497                 RETURN(-EFAULT);
498
499         if (mdt_body_has_lov(la, reqbody)) {
500                 if (ma->ma_valid & MA_LOV) {
501                         LASSERT(ma->ma_lmm_size);
502                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
503                         repbody->eadatasize = ma->ma_lmm_size;
504                         if (S_ISDIR(la->la_mode))
505                                 repbody->valid |= OBD_MD_FLDIREA;
506                         else
507                                 repbody->valid |= OBD_MD_FLEASIZE;
508                 }
509                 if (ma->ma_valid & MA_LMV) {
510                         LASSERT(S_ISDIR(la->la_mode));
511                         repbody->eadatasize = ma->ma_lmv_size;
512                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
513                 }
514                 if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
515                         repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
516                 }
517         } else if (S_ISLNK(la->la_mode) &&
518                    reqbody->valid & OBD_MD_LINKNAME) {
519                 buffer->lb_buf = ma->ma_lmm;
520                 buffer->lb_len = reqbody->eadatasize;
521                 rc = mo_readlink(env, next, buffer);
522                 if (unlikely(rc <= 0)) {
523                         CERROR("readlink failed: %d\n", rc);
524                         rc = -EFAULT;
525                 } else {
526                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
527                                  rc -= 2;
528                         repbody->valid |= OBD_MD_LINKNAME;
529                         repbody->eadatasize = rc;
530                         /* NULL terminate */
531                         ((char*)ma->ma_lmm)[rc - 1] = 0;
532                         CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
533                                (char*)ma->ma_lmm, rc);
534                         rc = 0;
535                 }
536         }
537
538         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
539                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
540                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
541                 repbody->valid |= OBD_MD_FLMODEASIZE;
542                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
543                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
544                        repbody->max_cookiesize);
545         }
546
547         if (exp_connect_rmtclient(info->mti_exp) &&
548             reqbody->valid & OBD_MD_FLRMTPERM) {
549                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
550
551                 /* mdt_getattr_lock only */
552                 rc = mdt_pack_remote_perm(info, o, buf);
553                 if (rc) {
554                         repbody->valid &= ~OBD_MD_FLRMTPERM;
555                         repbody->aclsize = 0;
556                         RETURN(rc);
557                 } else {
558                         repbody->valid |= OBD_MD_FLRMTPERM;
559                         repbody->aclsize = sizeof(struct mdt_remote_perm);
560                 }
561         }
562 #ifdef CONFIG_FS_POSIX_ACL
563         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
564                  (reqbody->valid & OBD_MD_FLACL)) {
565                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
566                 buffer->lb_len = req_capsule_get_size(pill,
567                                                       &RMF_ACL, RCL_SERVER);
568                 if (buffer->lb_len > 0) {
569                         rc = mo_xattr_get(env, next, buffer,
570                                           XATTR_NAME_ACL_ACCESS);
571                         if (rc < 0) {
572                                 if (rc == -ENODATA) {
573                                         repbody->aclsize = 0;
574                                         repbody->valid |= OBD_MD_FLACL;
575                                         rc = 0;
576                                 } else if (rc == -EOPNOTSUPP) {
577                                         rc = 0;
578                                 } else {
579                                         CERROR("got acl size: %d\n", rc);
580                                 }
581                         } else {
582                                 repbody->aclsize = rc;
583                                 repbody->valid |= OBD_MD_FLACL;
584                                 rc = 0;
585                         }
586                 }
587         }
588 #endif
589
590         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
591             info->mti_mdt->mdt_opts.mo_mds_capa &&
592             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
593                 struct lustre_capa *capa;
594
595                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
596                 LASSERT(capa);
597                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
598                 rc = mo_capa_get(env, next, capa, 0);
599                 if (rc)
600                         RETURN(rc);
601                 repbody->valid |= OBD_MD_FLMDSCAPA;
602         }
603         RETURN(rc);
604 }
605
606 static int mdt_renew_capa(struct mdt_thread_info *info)
607 {
608         struct mdt_object  *obj = info->mti_object;
609         struct mdt_body    *body;
610         struct lustre_capa *capa, *c;
611         int rc;
612         ENTRY;
613
614         /* if object doesn't exist, or server has disabled capability,
615          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
616          * flag not set.
617          */
618         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
619             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
620                 RETURN(0);
621
622         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
623         LASSERT(body != NULL);
624
625         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
626         LASSERT(c);
627
628         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
629         LASSERT(capa);
630
631         *capa = *c;
632         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
633         if (rc == 0)
634                 body->valid |= OBD_MD_FLOSSCAPA;
635         RETURN(rc);
636 }
637
638 static int mdt_getattr(struct mdt_thread_info *info)
639 {
640         struct mdt_object       *obj = info->mti_object;
641         struct req_capsule      *pill = info->mti_pill;
642         struct mdt_body         *reqbody;
643         struct mdt_body         *repbody;
644         mode_t                   mode;
645         int                      md_size;
646         int rc;
647         ENTRY;
648
649         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
650         LASSERT(reqbody);
651
652         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
653                 rc = req_capsule_server_pack(pill);
654                 if (unlikely(rc))
655                         RETURN(err_serious(rc));
656                 rc = mdt_renew_capa(info);
657                 GOTO(out_shrink, rc);
658         }
659
660         LASSERT(obj != NULL);
661         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
662
663         mode = lu_object_attr(&obj->mot_obj.mo_lu);
664         if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
665             (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
666                 md_size = reqbody->eadatasize;
667         else
668                 md_size = info->mti_mdt->mdt_max_mdsize;
669
670         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
671
672         rc = req_capsule_server_pack(pill);
673         if (unlikely(rc != 0))
674                 RETURN(err_serious(rc));
675
676         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
677         LASSERT(repbody != NULL);
678         repbody->eadatasize = 0;
679         repbody->aclsize = 0;
680
681         if (reqbody->valid & OBD_MD_FLRMTPERM)
682                 rc = mdt_init_ucred(info, reqbody);
683         else
684                 rc = mdt_check_ucred(info);
685         if (unlikely(rc))
686                 GOTO(out_shrink, rc);
687
688         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
689         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
690
691         /*
692          * Don't check capability at all, because rename might getattr for
693          * remote obj, and at that time no capability is available.
694          */
695         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
696         rc = mdt_getattr_internal(info, obj);
697         if (reqbody->valid & OBD_MD_FLRMTPERM)
698                 mdt_exit_ucred(info);
699         EXIT;
700 out_shrink:
701         mdt_shrink_reply(info);
702         return rc;
703 }
704
705 static int mdt_is_subdir(struct mdt_thread_info *info)
706 {
707         struct mdt_object     *o = info->mti_object;
708         struct req_capsule    *pill = info->mti_pill;
709         const struct mdt_body *body = info->mti_body;
710         struct mdt_body       *repbody;
711         int                    rc;
712         ENTRY;
713
714         LASSERT(o != NULL);
715
716         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
717
718         /*
719          * We save last checked parent fid to @repbody->fid1 for remote
720          * directory case.
721          */
722         LASSERT(fid_is_sane(&body->fid2));
723         LASSERT(mdt_object_exists(o) > 0);
724         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
725                            &body->fid2, &repbody->fid1);
726         if (rc == 0 || rc == -EREMOTE)
727                 repbody->valid |= OBD_MD_FLID;
728
729         RETURN(rc);
730 }
731
732 static int mdt_raw_lookup(struct mdt_thread_info *info,
733                           struct mdt_object *parent,
734                           const struct lu_name *lname,
735                           struct ldlm_reply *ldlm_rep)
736 {
737         struct md_object *next = mdt_object_child(info->mti_object);
738         const struct mdt_body *reqbody = info->mti_body;
739         struct lu_fid *child_fid = &info->mti_tmp_fid1;
740         struct mdt_body *repbody;
741         int rc;
742         ENTRY;
743
744         if (reqbody->valid != OBD_MD_FLID)
745                 RETURN(0);
746
747         LASSERT(!info->mti_cross_ref);
748
749         /* Only got the fid of this obj by name */
750         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
751                         &info->mti_spec);
752 #if 0
753         /* XXX is raw_lookup possible as intent operation? */
754         if (rc != 0) {
755                 if (rc == -ENOENT)
756                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
757                 RETURN(rc);
758         } else
759                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
760
761         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
762 #endif
763         if (rc == 0) {
764                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
765                 repbody->fid1 = *child_fid;
766                 repbody->valid = OBD_MD_FLID;
767         }
768         RETURN(1);
769 }
770
771 /*
772  * UPDATE lock should be taken against parent, and be release before exit;
773  * child_bits lock should be taken against child, and be returned back:
774  *            (1)normal request should release the child lock;
775  *            (2)intent request will grant the lock to client.
776  */
777 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
778                                  struct mdt_lock_handle *lhc,
779                                  __u64 child_bits,
780                                  struct ldlm_reply *ldlm_rep)
781 {
782         struct ptlrpc_request  *req       = mdt_info_req(info);
783         struct mdt_body        *reqbody   = NULL;
784         struct mdt_object      *parent    = info->mti_object;
785         struct mdt_object      *child;
786         struct md_object       *next      = mdt_object_child(parent);
787         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
788         struct lu_name         *lname     = NULL;
789         const char             *name      = NULL;
790         int                     namelen   = 0;
791         struct mdt_lock_handle *lhp;
792         struct ldlm_lock       *lock;
793         struct ldlm_res_id     *res_id;
794         int                     is_resent;
795         int                     rc;
796
797         ENTRY;
798
799         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
800         LASSERT(ergo(is_resent,
801                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
802
803         LASSERT(parent != NULL);
804         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
805         if (name == NULL)
806                 RETURN(err_serious(-EFAULT));
807
808         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
809                                        RCL_CLIENT) - 1;
810         if (!info->mti_cross_ref) {
811                 /*
812                  * XXX: Check for "namelen == 0" is for getattr by fid
813                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
814                  * that is the name must contain at least one character and
815                  * the terminating '\0'
816                  */
817                 if (namelen == 0) {
818                         reqbody = req_capsule_client_get(info->mti_pill,
819                                                          &RMF_MDT_BODY);
820                         LASSERT(fid_is_sane(&reqbody->fid2));
821                         name = NULL;
822
823                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
824                                "ldlm_rep = %p\n",
825                                PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
826                                ldlm_rep);
827                 } else {
828                         lname = mdt_name(info->mti_env, (char *)name, namelen);
829                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
830                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
831                                name, ldlm_rep);
832                 }
833         }
834         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
835
836         rc = mdt_object_exists(parent);
837         if (unlikely(rc == 0)) {
838                 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
839                                 &parent->mot_obj.mo_lu,
840                                 "Parent doesn't exist!\n");
841                 RETURN(-ESTALE);
842         } else if (!info->mti_cross_ref) {
843                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
844                          PFID(mdt_object_fid(parent)));
845         }
846         if (lname) {
847                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
848                 if (rc != 0) {
849                         if (rc > 0)
850                                 rc = 0;
851                         RETURN(rc);
852                 }
853         }
854
855         if (info->mti_cross_ref) {
856                 /* Only getattr on the child. Parent is on another node. */
857                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
858                 child = parent;
859                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
860                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
861
862                 if (is_resent) {
863                         /* Do not take lock for resent case. */
864                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
865                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
866                                  lhc->mlh_reg_lh.cookie);
867                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
868                                                 &lock->l_resource->lr_name));
869                         LDLM_LOCK_PUT(lock);
870                         rc = 0;
871                 } else {
872                         mdt_lock_handle_init(lhc);
873                         mdt_lock_reg_init(lhc, LCK_PR);
874
875                         /*
876                          * Object's name is on another MDS, no lookup lock is
877                          * needed here but update is.
878                          */
879                         child_bits &= ~MDS_INODELOCK_LOOKUP;
880                         child_bits |= MDS_INODELOCK_UPDATE;
881
882                         rc = mdt_object_lock(info, child, lhc, child_bits,
883                                              MDT_LOCAL_LOCK);
884                 }
885                 if (rc == 0) {
886                         /* Finally, we can get attr for child. */
887                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
888                                          BYPASS_CAPA);
889                         rc = mdt_getattr_internal(info, child);
890                         if (unlikely(rc != 0))
891                                 mdt_object_unlock(info, child, lhc, 1);
892                 }
893                 RETURN(rc);
894         }
895
896         /* step 1: lock parent */
897         lhp = &info->mti_lh[MDT_LH_PARENT];
898         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
899         rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
900                              MDT_LOCAL_LOCK);
901
902         if (unlikely(rc != 0))
903                 RETURN(rc);
904
905         if (lname) {
906                 /* step 2: lookup child's fid by name */
907                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
908                                 &info->mti_spec);
909
910                 if (rc != 0) {
911                         if (rc == -ENOENT)
912                                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
913                         GOTO(out_parent, rc);
914                 } else
915                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
916         } else {
917                 *child_fid = reqbody->fid2;
918                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
919         }
920
921         /*
922          *step 3: find the child object by fid & lock it.
923          *        regardless if it is local or remote.
924          */
925         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
926
927         if (unlikely(IS_ERR(child)))
928                 GOTO(out_parent, rc = PTR_ERR(child));
929         if (is_resent) {
930                 /* Do not take lock for resent case. */
931                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
932                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
933                          lhc->mlh_reg_lh.cookie);
934
935                 res_id = &lock->l_resource->lr_name;
936                 if (!fid_res_name_eq(mdt_object_fid(child),
937                                     &lock->l_resource->lr_name)) {
938                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
939                                                  &lock->l_resource->lr_name),
940                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
941                                  (unsigned long)res_id->name[0],
942                                  (unsigned long)res_id->name[1],
943                                  (unsigned long)res_id->name[2],
944                                  PFID(mdt_object_fid(parent)));
945                           CWARN("Although resent, but still not get child lock"
946                                 "parent:"DFID" child:"DFID"\n",
947                                 PFID(mdt_object_fid(parent)),
948                                 PFID(mdt_object_fid(child)));
949                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
950                           LDLM_LOCK_PUT(lock);
951                           GOTO(relock, 0);
952                 }
953                 LDLM_LOCK_PUT(lock);
954                 rc = 0;
955         } else {
956                 struct md_attr *ma;
957 relock:
958                 ma = &info->mti_attr;
959
960                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
961                 mdt_lock_handle_init(lhc);
962                 mdt_lock_reg_init(lhc, LCK_PR);
963
964                 if (mdt_object_exists(child) == 0) {
965                         LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
966                                         &child->mot_obj.mo_lu,
967                                         "Object doesn't exist!\n");
968                         GOTO(out_child, rc = -ESTALE);
969                 }
970
971                 ma->ma_valid = 0;
972                 ma->ma_need = MA_INODE;
973                 rc = mo_attr_get(info->mti_env, next, ma);
974                 if (unlikely(rc != 0))
975                         GOTO(out_child, rc);
976
977                 /* If the file has not been changed for some time, we return
978                  * not only a LOOKUP lock, but also an UPDATE lock and this
979                  * might save us RPC on later STAT. For directories, it also
980                  * let negative dentry starts working for this dir. */
981                 if (ma->ma_valid & MA_INODE &&
982                     ma->ma_attr.la_valid & LA_CTIME &&
983                     info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
984                     ma->ma_attr.la_ctime < cfs_time_current_sec())
985                         child_bits |= MDS_INODELOCK_UPDATE;
986
987                 rc = mdt_object_lock(info, child, lhc, child_bits,
988                                      MDT_CROSS_LOCK);
989
990                 if (unlikely(rc != 0))
991                         GOTO(out_child, rc);
992         }
993
994         /* finally, we can get attr for child. */
995         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
996         rc = mdt_getattr_internal(info, child);
997         if (unlikely(rc != 0)) {
998                 mdt_object_unlock(info, child, lhc, 1);
999         } else {
1000                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1001                 if (lock) {
1002                         struct mdt_body *repbody;
1003
1004                         /* Debugging code. */
1005                         res_id = &lock->l_resource->lr_name;
1006                         LDLM_DEBUG(lock, "Returning lock to client");
1007                         LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1008                                                  &lock->l_resource->lr_name),
1009                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1010                                  (unsigned long)res_id->name[0],
1011                                  (unsigned long)res_id->name[1],
1012                                  (unsigned long)res_id->name[2],
1013                                  PFID(mdt_object_fid(child)));
1014                         /*
1015                          * Pack Size-on-MDS inode attributes to the body if
1016                          * update lock is given.
1017                          */
1018                         repbody = req_capsule_server_get(info->mti_pill,
1019                                                          &RMF_MDT_BODY);
1020                         if (lock->l_policy_data.l_inodebits.bits &
1021                             MDS_INODELOCK_UPDATE)
1022                                 mdt_pack_size2body(info, child);
1023                         LDLM_LOCK_PUT(lock);
1024                 }
1025         }
1026         EXIT;
1027 out_child:
1028         mdt_object_put(info->mti_env, child);
1029 out_parent:
1030         mdt_object_unlock(info, parent, lhp, 1);
1031         return rc;
1032 }
1033
1034 /* normal handler: should release the child lock */
1035 static int mdt_getattr_name(struct mdt_thread_info *info)
1036 {
1037         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1038         struct mdt_body        *reqbody;
1039         struct mdt_body        *repbody;
1040         int rc;
1041         ENTRY;
1042
1043         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1044         LASSERT(reqbody != NULL);
1045         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1046         LASSERT(repbody != NULL);
1047
1048         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1049         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1050         repbody->eadatasize = 0;
1051         repbody->aclsize = 0;
1052
1053         rc = mdt_init_ucred(info, reqbody);
1054         if (unlikely(rc))
1055                 GOTO(out_shrink, rc);
1056
1057         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1058         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1059                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1060                 lhc->mlh_reg_lh.cookie = 0;
1061         }
1062         mdt_exit_ucred(info);
1063         EXIT;
1064 out_shrink:
1065         mdt_shrink_reply(info);
1066         return rc;
1067 }
1068
1069 static const struct lu_device_operations mdt_lu_ops;
1070
1071 static int lu_device_is_mdt(struct lu_device *d)
1072 {
1073         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1074 }
1075
1076 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1077                          void *karg, void *uarg);
1078
1079 static int mdt_set_info(struct mdt_thread_info *info)
1080 {
1081         struct ptlrpc_request *req = mdt_info_req(info);
1082         char *key;
1083         void *val;
1084         int keylen, vallen, rc = 0;
1085         ENTRY;
1086
1087         rc = req_capsule_server_pack(info->mti_pill);
1088         if (rc)
1089                 RETURN(rc);
1090
1091         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1092         if (key == NULL) {
1093                 DEBUG_REQ(D_HA, req, "no set_info key");
1094                 RETURN(-EFAULT);
1095         }
1096
1097         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1098                                       RCL_CLIENT);
1099
1100         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1101         if (val == NULL) {
1102                 DEBUG_REQ(D_HA, req, "no set_info val");
1103                 RETURN(-EFAULT);
1104         }
1105
1106         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1107                                       RCL_CLIENT);
1108
1109         /* Swab any part of val you need to here */
1110         if (KEY_IS(KEY_READ_ONLY)) {
1111                 req->rq_status = 0;
1112                 lustre_msg_set_status(req->rq_repmsg, 0);
1113
1114                 spin_lock(&req->rq_export->exp_lock);
1115                 if (*(__u32 *)val)
1116                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1117                 else
1118                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1119                 spin_unlock(&req->rq_export->exp_lock);
1120
1121         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1122                 struct changelog_setinfo *cs =
1123                         (struct changelog_setinfo *)val;
1124                 if (vallen != sizeof(*cs)) {
1125                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1126                         RETURN(-EINVAL);
1127                 }
1128                 if (ptlrpc_req_need_swab(req)) {
1129                         __swab64s(&cs->cs_recno);
1130                         __swab32s(&cs->cs_id);
1131                 }
1132
1133                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1134                                    vallen, val, NULL);
1135                 lustre_msg_set_status(req->rq_repmsg, rc);
1136
1137         } else {
1138                 RETURN(-EINVAL);
1139         }
1140         RETURN(0);
1141 }
1142
1143 static int mdt_connect(struct mdt_thread_info *info)
1144 {
1145         int rc;
1146         struct ptlrpc_request *req;
1147
1148         req = mdt_info_req(info);
1149         rc = target_handle_connect(req);
1150         if (rc == 0) {
1151                 LASSERT(req->rq_export != NULL);
1152                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1153                 rc = mdt_init_sec_level(info);
1154                 if (rc == 0)
1155                         rc = mdt_init_idmap(info);
1156                 if (rc != 0)
1157                         obd_disconnect(class_export_get(req->rq_export));
1158         } else {
1159                 rc = err_serious(rc);
1160         }
1161         return rc;
1162 }
1163
1164 static int mdt_disconnect(struct mdt_thread_info *info)
1165 {
1166         int rc;
1167         ENTRY;
1168
1169         rc = target_handle_disconnect(mdt_info_req(info));
1170         if (rc)
1171                 rc = err_serious(rc);
1172         RETURN(rc);
1173 }
1174
1175 static int mdt_sendpage(struct mdt_thread_info *info,
1176                         struct lu_rdpg *rdpg)
1177 {
1178         struct ptlrpc_request   *req = mdt_info_req(info);
1179         struct ptlrpc_bulk_desc *desc;
1180         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1181         int                      tmpcount;
1182         int                      tmpsize;
1183         int                      timeout;
1184         int                      i;
1185         int                      rc;
1186         ENTRY;
1187
1188         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1189                                     MDS_BULK_PORTAL);
1190         if (desc == NULL)
1191                 RETURN(-ENOMEM);
1192
1193         for (i = 0, tmpcount = rdpg->rp_count;
1194                 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1195                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1196                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1197         }
1198
1199         LASSERT(desc->bd_nob == rdpg->rp_count);
1200         rc = sptlrpc_svc_wrap_bulk(req, desc);
1201         if (rc)
1202                 GOTO(free_desc, rc);
1203
1204         rc = ptlrpc_start_bulk_transfer(desc);
1205         if (rc)
1206                 GOTO(free_desc, rc);
1207
1208         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1209                 GOTO(abort_bulk, rc = 0);
1210
1211         timeout = (int) req->rq_deadline - cfs_time_current_sec();
1212         if (timeout < 0)
1213                 CERROR("Req deadline already passed %lu (now: %lu)\n",
1214                        req->rq_deadline, cfs_time_current_sec());
1215         *lwi = LWI_TIMEOUT(cfs_time_seconds(max(timeout, 1)), NULL, NULL);
1216         rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc), lwi);
1217         LASSERT (rc == 0 || rc == -ETIMEDOUT);
1218
1219         if (rc == 0) {
1220                 if (desc->bd_success &&
1221                     desc->bd_nob_transferred == rdpg->rp_count)
1222                         GOTO(free_desc, rc);
1223
1224                 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
1225         }
1226
1227         DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1228                   (rc == -ETIMEDOUT) ? "timeout" : "network error",
1229                   desc->bd_nob_transferred, rdpg->rp_count,
1230                   req->rq_export->exp_client_uuid.uuid,
1231                   req->rq_export->exp_connection->c_remote_uuid.uuid);
1232
1233         class_fail_export(req->rq_export);
1234
1235         EXIT;
1236 abort_bulk:
1237         ptlrpc_abort_bulk(desc);
1238 free_desc:
1239         ptlrpc_free_bulk(desc);
1240         return rc;
1241 }
1242
1243 #ifdef HAVE_SPLIT_SUPPORT
1244 /*
1245  * Retrieve dir entry from the page and insert it to the slave object, actually,
1246  * this should be in osd layer, but since it will not in the final product, so
1247  * just do it here and do not define more moo api anymore for this.
1248  */
1249 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1250                               int size)
1251 {
1252         struct mdt_object *object = info->mti_object;
1253         struct lu_fid *lf = &info->mti_tmp_fid2;
1254         struct md_attr *ma = &info->mti_attr;
1255         struct lu_dirpage *dp;
1256         struct lu_dirent *ent;
1257         int rc = 0, offset = 0;
1258         ENTRY;
1259
1260         /* Make sure we have at least one entry. */
1261         if (size == 0)
1262                 RETURN(-EINVAL);
1263
1264         /*
1265          * Disable trans for this name insert, since it will include many trans
1266          * for this.
1267          */
1268         info->mti_no_need_trans = 1;
1269         /*
1270          * When write_dir_page, no need update parent's ctime,
1271          * and no permission check for name_insert.
1272          */
1273         ma->ma_attr.la_ctime = 0;
1274         ma->ma_attr.la_valid = LA_MODE;
1275         ma->ma_valid = MA_INODE;
1276
1277         cfs_kmap(page);
1278         dp = page_address(page);
1279         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1280
1281         for (ent = lu_dirent_start(dp); ent != NULL;
1282              ent = lu_dirent_next(ent)) {
1283                 struct lu_name *lname;
1284                 char *name;
1285
1286                 if (le16_to_cpu(ent->lde_namelen) == 0)
1287                         continue;
1288
1289                 fid_le_to_cpu(lf, &ent->lde_fid);
1290                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1291                         ma->ma_attr.la_mode = S_IFDIR;
1292                 else
1293                         ma->ma_attr.la_mode = 0;
1294                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1295                 if (name == NULL)
1296                         GOTO(out, rc = -ENOMEM);
1297
1298                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1299                 lname = mdt_name(info->mti_env, name,
1300                                  le16_to_cpu(ent->lde_namelen));
1301                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1302                 rc = mdo_name_insert(info->mti_env,
1303                                      md_object_next(&object->mot_obj),
1304                                      lname, lf, ma);
1305                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1306                 if (rc) {
1307                         CERROR("Can't insert %*.*s, rc %d\n",
1308                                le16_to_cpu(ent->lde_namelen),
1309                                le16_to_cpu(ent->lde_namelen),
1310                                ent->lde_name, rc);
1311                         GOTO(out, rc);
1312                 }
1313
1314                 offset += lu_dirent_size(ent);
1315                 if (offset >= size)
1316                         break;
1317         }
1318         EXIT;
1319 out:
1320         cfs_kunmap(page);
1321         return rc;
1322 }
1323
1324 static int mdt_bulk_timeout(void *data)
1325 {
1326         ENTRY;
1327
1328         CERROR("mdt bulk transfer timeout \n");
1329
1330         RETURN(1);
1331 }
1332
1333 static int mdt_writepage(struct mdt_thread_info *info)
1334 {
1335         struct ptlrpc_request   *req = mdt_info_req(info);
1336         struct mdt_body         *reqbody;
1337         struct l_wait_info      *lwi;
1338         struct ptlrpc_bulk_desc *desc;
1339         struct page             *page;
1340         int                rc;
1341         ENTRY;
1342
1343
1344         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1345         if (reqbody == NULL)
1346                 RETURN(err_serious(-EFAULT));
1347
1348         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1349         if (desc == NULL)
1350                 RETURN(err_serious(-ENOMEM));
1351
1352         /* allocate the page for the desc */
1353         page = cfs_alloc_page(CFS_ALLOC_STD);
1354         if (page == NULL)
1355                 GOTO(desc_cleanup, rc = -ENOMEM);
1356
1357         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1358                (int)reqbody->size, (int)reqbody->nlink);
1359
1360         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1361                               (int)reqbody->nlink);
1362
1363         rc = sptlrpc_svc_prep_bulk(req, desc);
1364         if (rc != 0)
1365                 GOTO(cleanup_page, rc);
1366         /*
1367          * Check if client was evicted while we were doing i/o before touching
1368          * network.
1369          */
1370         OBD_ALLOC_PTR(lwi);
1371         if (!lwi)
1372                 GOTO(cleanup_page, rc = -ENOMEM);
1373
1374         if (desc->bd_export->exp_failed)
1375                 rc = -ENOTCONN;
1376         else
1377                 rc = ptlrpc_start_bulk_transfer (desc);
1378         if (rc == 0) {
1379                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ,
1380                                             mdt_bulk_timeout, desc);
1381                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1382                                   desc->bd_export->exp_failed, lwi);
1383                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1384                 if (rc == -ETIMEDOUT) {
1385                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1386                         ptlrpc_abort_bulk(desc);
1387                 } else if (desc->bd_export->exp_failed) {
1388                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1389                         rc = -ENOTCONN;
1390                         ptlrpc_abort_bulk(desc);
1391                 } else if (!desc->bd_success ||
1392                            desc->bd_nob_transferred != desc->bd_nob) {
1393                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1394                                   desc->bd_success ?
1395                                   "truncated" : "network error on",
1396                                   desc->bd_nob_transferred, desc->bd_nob);
1397                         /* XXX should this be a different errno? */
1398                         rc = -ETIMEDOUT;
1399                 }
1400         } else {
1401                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1402         }
1403         if (rc)
1404                 GOTO(cleanup_lwi, rc);
1405         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1406
1407 cleanup_lwi:
1408         OBD_FREE_PTR(lwi);
1409 cleanup_page:
1410         cfs_free_page(page);
1411 desc_cleanup:
1412         ptlrpc_free_bulk(desc);
1413         RETURN(rc);
1414 }
1415 #endif
1416
1417 static int mdt_readpage(struct mdt_thread_info *info)
1418 {
1419         struct mdt_object *object = info->mti_object;
1420         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1421         struct mdt_body   *reqbody;
1422         struct mdt_body   *repbody;
1423         int                rc;
1424         int                i;
1425         ENTRY;
1426
1427         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1428                 RETURN(err_serious(-ENOMEM));
1429
1430         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1431         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1432         if (reqbody == NULL || repbody == NULL)
1433                 RETURN(err_serious(-EFAULT));
1434
1435         /*
1436          * prepare @rdpg before calling lower layers and transfer itself. Here
1437          * reqbody->size contains offset of where to start to read and
1438          * reqbody->nlink contains number bytes to read.
1439          */
1440         rdpg->rp_hash = reqbody->size;
1441         if (rdpg->rp_hash != reqbody->size) {
1442                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1443                        rdpg->rp_hash, reqbody->size);
1444                 RETURN(-EFAULT);
1445         }
1446
1447         rdpg->rp_attrs = reqbody->mode;
1448         rdpg->rp_count  = reqbody->nlink;
1449         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1450         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1451         if (rdpg->rp_pages == NULL)
1452                 RETURN(-ENOMEM);
1453
1454         for (i = 0; i < rdpg->rp_npages; ++i) {
1455                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1456                 if (rdpg->rp_pages[i] == NULL)
1457                         GOTO(free_rdpg, rc = -ENOMEM);
1458         }
1459
1460         /* call lower layers to fill allocated pages with directory data */
1461         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1462         if (rc)
1463                 GOTO(free_rdpg, rc);
1464
1465         /* send pages to client */
1466         rc = mdt_sendpage(info, rdpg);
1467
1468         EXIT;
1469 free_rdpg:
1470
1471         for (i = 0; i < rdpg->rp_npages; i++)
1472                 if (rdpg->rp_pages[i] != NULL)
1473                         cfs_free_page(rdpg->rp_pages[i]);
1474         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1475
1476         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1477                 RETURN(0);
1478
1479         return rc;
1480 }
1481
1482 static int mdt_reint_internal(struct mdt_thread_info *info,
1483                               struct mdt_lock_handle *lhc,
1484                               __u32 op)
1485 {
1486         struct req_capsule      *pill = info->mti_pill;
1487         struct mdt_device       *mdt = info->mti_mdt;
1488         struct mdt_body         *repbody;
1489         int                      rc = 0;
1490         ENTRY;
1491
1492         /* pack reply */
1493         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1494                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1495                                      mdt->mdt_max_mdsize);
1496         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1497                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1498                                      mdt->mdt_max_cookiesize);
1499
1500         rc = req_capsule_server_pack(pill);
1501         if (rc != 0) {
1502                 CERROR("Can't pack response, rc %d\n", rc);
1503                 RETURN(err_serious(rc));
1504         }
1505
1506         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1507                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1508                 LASSERT(repbody);
1509                 repbody->eadatasize = 0;
1510                 repbody->aclsize = 0;
1511         }
1512
1513         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1514                 GOTO(out_shrink, rc = err_serious(-EFAULT));
1515
1516         rc = mdt_reint_unpack(info, op);
1517         if (rc != 0) {
1518                 CERROR("Can't unpack reint, rc %d\n", rc);
1519                 GOTO(out_shrink, rc = err_serious(rc));
1520         }
1521
1522         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1523
1524         /* for replay no cookkie / lmm need, because client have this already */
1525         if (info->mti_spec.no_create == 1)  {
1526                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1527                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1528
1529                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1530                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1531                                              0);
1532         }
1533
1534         rc = mdt_init_ucred_reint(info);
1535         if (rc)
1536                 GOTO(out_shrink, rc);
1537
1538         rc = mdt_fix_attr_ucred(info, op);
1539         if (rc != 0)
1540                 GOTO(out_ucred, rc = err_serious(rc));
1541
1542         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1543                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1544                 GOTO(out_ucred, rc);
1545         }
1546         rc = mdt_reint_rec(info, lhc);
1547         EXIT;
1548 out_ucred:
1549         mdt_exit_ucred(info);
1550 out_shrink:
1551         mdt_shrink_reply(info);
1552         return rc;
1553 }
1554
1555 static long mdt_reint_opcode(struct mdt_thread_info *info,
1556                              const struct req_format **fmt)
1557 {
1558         struct mdt_rec_reint *rec;
1559         long opc;
1560
1561         opc = err_serious(-EFAULT);
1562         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1563         if (rec != NULL) {
1564                 opc = rec->rr_opcode;
1565                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1566                 if (opc < REINT_MAX && fmt[opc] != NULL)
1567                         req_capsule_extend(info->mti_pill, fmt[opc]);
1568                 else {
1569                         CERROR("Unsupported opc: %ld\n", opc);
1570                         opc = err_serious(opc);
1571                 }
1572         }
1573         return opc;
1574 }
1575
1576 static int mdt_reint(struct mdt_thread_info *info)
1577 {
1578         long opc;
1579         int  rc;
1580
1581         static const struct req_format *reint_fmts[REINT_MAX] = {
1582                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1583                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1584                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1585                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1586                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1587                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1588                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1589         };
1590
1591         ENTRY;
1592
1593         opc = mdt_reint_opcode(info, reint_fmts);
1594         if (opc >= 0) {
1595                 /*
1596                  * No lock possible here from client to pass it to reint code
1597                  * path.
1598                  */
1599                 rc = mdt_reint_internal(info, NULL, opc);
1600         } else {
1601                 rc = opc;
1602         }
1603
1604         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1605         RETURN(rc);
1606 }
1607
1608 /* this should sync the whole device */
1609 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1610 {
1611         struct dt_device *dt = mdt->mdt_bottom;
1612         int rc;
1613         ENTRY;
1614
1615         rc = dt->dd_ops->dt_sync(env, dt);
1616         RETURN(rc);
1617 }
1618
1619 /* this should sync this object */
1620 static int mdt_object_sync(struct mdt_thread_info *info)
1621 {
1622         struct md_object *next;
1623         int rc;
1624         ENTRY;
1625
1626         if (!mdt_object_exists(info->mti_object)) {
1627                 CWARN("Non existing object  "DFID"!\n",
1628                       PFID(mdt_object_fid(info->mti_object)));
1629                 RETURN(-ESTALE);
1630         }
1631         next = mdt_object_child(info->mti_object);
1632         rc = mo_object_sync(info->mti_env, next);
1633
1634         RETURN(rc);
1635 }
1636
1637 static int mdt_sync(struct mdt_thread_info *info)
1638 {
1639         struct req_capsule *pill = info->mti_pill;
1640         struct mdt_body *body;
1641         int rc;
1642         ENTRY;
1643
1644         /* The fid may be zero, so we req_capsule_set manually */
1645         req_capsule_set(pill, &RQF_MDS_SYNC);
1646
1647         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1648         if (body == NULL)
1649                 RETURN(err_serious(-EINVAL));
1650
1651         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1652                 RETURN(err_serious(-ENOMEM));
1653
1654         if (fid_seq(&body->fid1) == 0) {
1655                 /* sync the whole device */
1656                 rc = req_capsule_server_pack(pill);
1657                 if (rc == 0)
1658                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1659                 else
1660                         rc = err_serious(rc);
1661         } else {
1662                 /* sync an object */
1663                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1664                 if (rc == 0) {
1665                         rc = mdt_object_sync(info);
1666                         if (rc == 0) {
1667                                 struct md_object *next;
1668                                 const struct lu_fid *fid;
1669                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1670
1671                                 next = mdt_object_child(info->mti_object);
1672                                 info->mti_attr.ma_need = MA_INODE;
1673                                 info->mti_attr.ma_valid = 0;
1674                                 rc = mo_attr_get(info->mti_env, next,
1675                                                  &info->mti_attr);
1676                                 if (rc == 0) {
1677                                         body = req_capsule_server_get(pill,
1678                                                                 &RMF_MDT_BODY);
1679                                         fid = mdt_object_fid(info->mti_object);
1680                                         mdt_pack_attr2body(info, body, la, fid);
1681                                 }
1682                         }
1683                 } else
1684                         rc = err_serious(rc);
1685         }
1686         RETURN(rc);
1687 }
1688
1689 #ifdef HAVE_QUOTA_SUPPORT
1690 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1691 {
1692         struct obd_quotactl *oqctl;
1693         struct req_capsule *pill = info->mti_pill;
1694         struct obd_export *exp = info->mti_exp;
1695         struct md_device *next = info->mti_mdt->mdt_child;
1696         int rc;
1697         ENTRY;
1698
1699         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1700         if (oqctl == NULL)
1701                 RETURN(-EPROTO);
1702
1703         /* remote client has no permission for quotacheck */
1704         if (unlikely(exp_connect_rmtclient(exp)))
1705                 RETURN(-EPERM);
1706
1707         rc = req_capsule_server_pack(pill);
1708         if (rc)
1709                 RETURN(rc);
1710
1711         rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next, exp,
1712                                                oqctl->qc_type);
1713         RETURN(rc);
1714 }
1715
1716 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1717 {
1718         struct obd_quotactl *oqctl, *repoqc;
1719         struct req_capsule *pill = info->mti_pill;
1720         struct obd_export *exp = info->mti_exp;
1721         struct md_device *next = info->mti_mdt->mdt_child;
1722         const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
1723         int id, rc;
1724         ENTRY;
1725
1726         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1727         if (oqctl == NULL)
1728                 RETURN(-EPROTO);
1729
1730         id = oqctl->qc_id;
1731         if (exp_connect_rmtclient(exp)) {
1732                 struct ptlrpc_request *req = mdt_info_req(info);
1733                 struct mdt_export_data *med = mdt_req2med(req);
1734                 struct lustre_idmap_table *idmap = med->med_idmap;
1735
1736                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1737                              oqctl->qc_cmd != Q_GETINFO))
1738                         RETURN(-EPERM);
1739
1740
1741                 if (oqctl->qc_type == USRQUOTA)
1742                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1743                                                      oqctl->qc_id);
1744                 else if (oqctl->qc_type == GRPQUOTA)
1745                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1746                                                      oqctl->qc_id);
1747                 else
1748                         RETURN(-EINVAL);
1749
1750                 if (id == CFS_IDMAP_NOTFOUND) {
1751                         CDEBUG(D_QUOTA, "no mapping for id %u\n",
1752                                oqctl->qc_id);
1753                         RETURN(-EACCES);
1754                 }
1755         }
1756
1757         rc = req_capsule_server_pack(pill);
1758         if (rc)
1759                 RETURN(rc);
1760
1761         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
1762         LASSERT(repoqc != NULL);
1763
1764         switch (oqctl->qc_cmd) {
1765         case Q_QUOTAON:
1766                 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
1767                 break;
1768         case Q_QUOTAOFF:
1769                 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type);
1770                 break;
1771         case Q_SETINFO:
1772                 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
1773                                       &oqctl->qc_dqinfo);
1774                 break;
1775         case Q_GETINFO:
1776                 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
1777                                       &oqctl->qc_dqinfo);
1778                 break;
1779         case Q_SETQUOTA:
1780                 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
1781                                        &oqctl->qc_dqblk);
1782                 break;
1783         case Q_GETQUOTA:
1784                 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
1785                                        &oqctl->qc_dqblk);
1786                 break;
1787         case Q_GETOINFO:
1788                 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
1789                                        &oqctl->qc_dqinfo);
1790                 break;
1791         case Q_GETOQUOTA:
1792                 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
1793                                         &oqctl->qc_dqblk);
1794                 break;
1795         case LUSTRE_Q_INVALIDATE:
1796                 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
1797                 break;
1798         case LUSTRE_Q_FINVALIDATE:
1799                 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
1800                 break;
1801         default:
1802                 CERROR("unsupported mdt_quotactl command: %d\n",
1803                        oqctl->qc_cmd);
1804                 RETURN(-EFAULT);
1805         }
1806
1807         *repoqc = *oqctl;
1808         RETURN(rc);
1809 }
1810 #endif
1811
1812
1813 /*
1814  * OBD PING and other handlers.
1815  */
1816 static int mdt_obd_ping(struct mdt_thread_info *info)
1817 {
1818         int rc;
1819         ENTRY;
1820
1821         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1822
1823         rc = target_handle_ping(mdt_info_req(info));
1824         if (rc < 0)
1825                 rc = err_serious(rc);
1826         RETURN(rc);
1827 }
1828
1829 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1830 {
1831         return err_serious(-EOPNOTSUPP);
1832 }
1833
1834 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1835 {
1836         return err_serious(-EOPNOTSUPP);
1837 }
1838
1839
1840 /*
1841  * LLOG handlers.
1842  */
1843
1844 /** clone llog ctxt from child (mdd)
1845  * This allows remote llog (replicator) access.
1846  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
1847  * context was originally set up, or we can handle them directly.
1848  * I choose the latter, but that means I need any llog
1849  * contexts set up by child to be accessable by the mdt.  So we clone the
1850  * context into our context list here.
1851  */
1852 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
1853                                int idx)
1854 {
1855         struct md_device  *next = mdt->mdt_child;
1856         struct llog_ctxt *ctxt;
1857         int rc;
1858
1859         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
1860                 return 0;
1861
1862         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
1863         if (rc || ctxt == NULL) {
1864                 CERROR("Can't get mdd ctxt %d\n", rc);
1865                 return rc;
1866         }
1867
1868         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
1869         if (rc)
1870                 CERROR("Can't set mdt ctxt %d\n", rc);
1871
1872         return rc;
1873 }
1874
1875 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
1876                                  struct mdt_device *mdt, int idx)
1877 {
1878         struct llog_ctxt *ctxt;
1879
1880         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
1881         if (ctxt == NULL)
1882                 return 0;
1883         /* Put once for the get we just did, and once for the clone */
1884         llog_ctxt_put(ctxt);
1885         llog_ctxt_put(ctxt);
1886         return 0;
1887 }
1888
1889 static int mdt_llog_create(struct mdt_thread_info *info)
1890 {
1891         int rc;
1892
1893         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1894         rc = llog_origin_handle_create(mdt_info_req(info));
1895         return (rc < 0 ? err_serious(rc) : rc);
1896 }
1897
1898 static int mdt_llog_destroy(struct mdt_thread_info *info)
1899 {
1900         int rc;
1901
1902         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
1903         rc = llog_origin_handle_destroy(mdt_info_req(info));
1904         return (rc < 0 ? err_serious(rc) : rc);
1905 }
1906
1907 static int mdt_llog_read_header(struct mdt_thread_info *info)
1908 {
1909         int rc;
1910
1911         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1912         rc = llog_origin_handle_read_header(mdt_info_req(info));
1913         return (rc < 0 ? err_serious(rc) : rc);
1914 }
1915
1916 static int mdt_llog_next_block(struct mdt_thread_info *info)
1917 {
1918         int rc;
1919
1920         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1921         rc = llog_origin_handle_next_block(mdt_info_req(info));
1922         return (rc < 0 ? err_serious(rc) : rc);
1923 }
1924
1925 static int mdt_llog_prev_block(struct mdt_thread_info *info)
1926 {
1927         int rc;
1928
1929         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
1930         rc = llog_origin_handle_prev_block(mdt_info_req(info));
1931         return (rc < 0 ? err_serious(rc) : rc);
1932 }
1933
1934
1935 /*
1936  * DLM handlers.
1937  */
1938 static struct ldlm_callback_suite cbs = {
1939         .lcs_completion = ldlm_server_completion_ast,
1940         .lcs_blocking   = ldlm_server_blocking_ast,
1941         .lcs_glimpse    = NULL
1942 };
1943
1944 static int mdt_enqueue(struct mdt_thread_info *info)
1945 {
1946         struct ptlrpc_request *req;
1947         __u64 req_bits;
1948         int rc;
1949
1950         /*
1951          * info->mti_dlm_req already contains swapped and (if necessary)
1952          * converted dlm request.
1953          */
1954         LASSERT(info->mti_dlm_req != NULL);
1955
1956         req = mdt_info_req(info);
1957
1958         /*
1959          * Lock without inodebits makes no sense and will oops later in
1960          * ldlm. Let's check it now to see if we have wrong lock from client or
1961          * bits get corrupted somewhere in mdt_intent_policy().
1962          */
1963         req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1964         /* This is disabled because we need to support liblustre flock.
1965          * LASSERT(req_bits != 0);
1966          */
1967
1968         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
1969                                   req, info->mti_dlm_req, &cbs);
1970         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
1971         return rc ? err_serious(rc) : req->rq_status;
1972 }
1973
1974 static int mdt_convert(struct mdt_thread_info *info)
1975 {
1976         int rc;
1977         struct ptlrpc_request *req;
1978
1979         LASSERT(info->mti_dlm_req);
1980         req = mdt_info_req(info);
1981         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
1982         return rc ? err_serious(rc) : req->rq_status;
1983 }
1984
1985 static int mdt_bl_callback(struct mdt_thread_info *info)
1986 {
1987         CERROR("bl callbacks should not happen on MDS\n");
1988         LBUG();
1989         return err_serious(-EOPNOTSUPP);
1990 }
1991
1992 static int mdt_cp_callback(struct mdt_thread_info *info)
1993 {
1994         CERROR("cp callbacks should not happen on MDS\n");
1995         LBUG();
1996         return err_serious(-EOPNOTSUPP);
1997 }
1998
1999 /*
2000  * sec context handlers
2001  */
2002 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2003 {
2004         int rc;
2005
2006         rc = mdt_handle_idmap(info);
2007
2008         if (unlikely(rc)) {
2009                 struct ptlrpc_request *req = mdt_info_req(info);
2010                 __u32                  opc;
2011
2012                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2013                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2014                         sptlrpc_svc_ctx_invalidate(req);
2015         }
2016
2017         OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
2018
2019         return rc;
2020 }
2021
2022 static struct mdt_object *mdt_obj(struct lu_object *o)
2023 {
2024         LASSERT(lu_device_is_mdt(o->lo_dev));
2025         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2026 }
2027
2028 struct mdt_object *mdt_object_find(const struct lu_env *env,
2029                                    struct mdt_device *d,
2030                                    const struct lu_fid *f)
2031 {
2032         struct lu_object *o;
2033         struct mdt_object *m;
2034         ENTRY;
2035
2036         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2037         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2038         if (unlikely(IS_ERR(o)))
2039                 m = (struct mdt_object *)o;
2040         else
2041                 m = mdt_obj(o);
2042         RETURN(m);
2043 }
2044
2045 /**
2046  * Asyncronous commit for mdt device.
2047  *
2048  * Pass asynchonous commit call down the MDS stack.
2049  *
2050  * \param env environment
2051  * \param mdt the mdt device
2052  */
2053 static void mdt_device_commit_async(const struct lu_env *env,
2054                                     struct mdt_device *mdt)
2055 {
2056         struct dt_device *dt = mdt->mdt_bottom;
2057         int rc;
2058
2059         rc = dt->dd_ops->dt_commit_async(env, dt);
2060         if (unlikely(rc != 0))
2061                 CWARN("async commit start failed with rc = %d", rc);
2062 }
2063
2064 /**
2065  * Mark the lock as "synchonous".
2066  *
2067  * Mark the lock to deffer transaction commit to the unlock time.
2068  *
2069  * \param lock the lock to mark as "synchonous"
2070  *
2071  * \see mdt_is_lock_sync
2072  * \see mdt_save_lock
2073  */
2074 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2075 {
2076         lock->l_ast_data = (void*)1;
2077 }
2078
2079 /**
2080  * Check whehter the lock "synchonous" or not.
2081  *
2082  * \param lock the lock to check
2083  * \retval 1 the lock is "synchonous"
2084  * \retval 0 the lock isn't "synchronous"
2085  *
2086  * \see mdt_set_lock_sync
2087  * \see mdt_save_lock
2088  */
2089 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2090 {
2091         return lock->l_ast_data != NULL;
2092 }
2093
2094 /**
2095  * Blocking AST for mdt locks.
2096  *
2097  * Starts transaction commit if in case of COS lock conflict or
2098  * deffers such a commit to the mdt_save_lock.
2099  *
2100  * \param lock the lock which blocks a request or cancelling lock
2101  * \param desc unused
2102  * \param data unused
2103  * \param flag indicates whether this cancelling or blocking callback
2104  * \retval 0
2105  * \see ldlm_blocking_ast_nocheck
2106  */
2107 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2108                      void *data, int flag)
2109 {
2110         struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
2111         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2112         int rc;
2113         ENTRY;
2114
2115         if (flag == LDLM_CB_CANCELING)
2116                 RETURN(0);
2117         lock_res_and_lock(lock);
2118         if (lock->l_blocking_ast != mdt_blocking_ast) {
2119                 unlock_res_and_lock(lock);
2120                 RETURN(0);
2121         }
2122         if (mdt_cos_is_enabled(mdt) &&
2123             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2124             lock->l_blocking_lock != NULL &&
2125             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2126                 mdt_set_lock_sync(lock);
2127         }
2128         rc = ldlm_blocking_ast_nocheck(lock);
2129
2130         /* There is no lock conflict if l_blocking_lock == NULL,
2131          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2132          * when the last reference to a local lock was released */
2133         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2134                 struct lu_env env;
2135
2136                 rc = lu_env_init(&env, LCT_MD_THREAD);
2137                 if (unlikely(rc != 0))
2138                         CWARN("lu_env initialization failed with rc = %d,"
2139                               "cannot start asynchronous commit\n", rc);
2140                 else
2141                         mdt_device_commit_async(&env, mdt);
2142                 lu_env_fini(&env);
2143         }
2144         RETURN(rc);
2145 }
2146
2147 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2148                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2149 {
2150         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2151         ldlm_policy_data_t *policy = &info->mti_policy;
2152         struct ldlm_res_id *res_id = &info->mti_res_id;
2153         int rc;
2154         ENTRY;
2155
2156         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2157         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2158         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2159         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2160
2161         if (mdt_object_exists(o) < 0) {
2162                 if (locality == MDT_CROSS_LOCK) {
2163                         /* cross-ref object fix */
2164                         ibits &= ~MDS_INODELOCK_UPDATE;
2165                         ibits |= MDS_INODELOCK_LOOKUP;
2166                 } else {
2167                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2168                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2169                 }
2170                 /* No PDO lock on remote object */
2171                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2172         }
2173
2174         if (lh->mlh_type == MDT_PDO_LOCK) {
2175                 /* check for exists after object is locked */
2176                 if (mdt_object_exists(o) == 0) {
2177                         /* Non-existent object shouldn't have PDO lock */
2178                         RETURN(-ESTALE);
2179                 } else {
2180                         /* Non-dir object shouldn't have PDO lock */
2181                         LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
2182                 }
2183         }
2184
2185         memset(policy, 0, sizeof(*policy));
2186         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2187
2188         /*
2189          * Take PDO lock on whole directory and build correct @res_id for lock
2190          * on part of directory.
2191          */
2192         if (lh->mlh_pdo_hash != 0) {
2193                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2194                 mdt_lock_pdo_mode(info, o, lh);
2195                 if (lh->mlh_pdo_mode != LCK_NL) {
2196                         /*
2197                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2198                          * is never going to be sent to client and we do not
2199                          * want it slowed down due to possible cancels.
2200                          */
2201                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2202                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2203                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2204                                           &info->mti_exp->exp_handle.h_cookie);
2205                         if (unlikely(rc))
2206                                 RETURN(rc);
2207                 }
2208
2209                 /*
2210                  * Finish res_id initializing by name hash marking part of
2211                  * directory which is taking modification.
2212                  */
2213                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2214         }
2215
2216         policy->l_inodebits.bits = ibits;
2217
2218         /*
2219          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2220          * going to be sent to client. If it is - mdt_intent_policy() path will
2221          * fix it up and turn FL_LOCAL flag off.
2222          */
2223         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2224                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2225                           &info->mti_exp->exp_handle.h_cookie);
2226         if (rc)
2227                 GOTO(out, rc);
2228
2229 out:
2230         if (rc)
2231                 mdt_object_unlock(info, o, lh, 1);
2232
2233
2234         RETURN(rc);
2235 }
2236
2237 /**
2238  * Save a lock within request object.
2239  *
2240  * Keep the lock referenced until whether client ACK or transaction
2241  * commit happens or release the lock immediately depending on input
2242  * parameters. If COS is ON, a write lock is converted to COS lock
2243  * before saving.
2244  *
2245  * \param info thead info object
2246  * \param h lock handle
2247  * \param mode lock mode
2248  * \param decref force immediate lock releasing
2249  */
2250 static
2251 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2252                    ldlm_mode_t mode, int decref)
2253 {
2254         ENTRY;
2255
2256         if (lustre_handle_is_used(h)) {
2257                 if (decref || !info->mti_has_trans ||
2258                     !(mode & (LCK_PW | LCK_EX))){
2259                         mdt_fid_unlock(h, mode);
2260                 } else {
2261                         struct mdt_device *mdt = info->mti_mdt;
2262                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2263                         struct ptlrpc_request *req = mdt_info_req(info);
2264                         int no_ack = 0;
2265
2266                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2267                                  h->cookie);
2268                         CDEBUG(D_HA, "request = %p reply state = %p"
2269                                " transno = "LPD64"\n",
2270                                req, req->rq_reply_state, req->rq_transno);
2271                         if (mdt_cos_is_enabled(mdt)) {
2272                                 no_ack = 1;
2273                                 ldlm_lock_downgrade(lock, LCK_COS);
2274                                 mode = LCK_COS;
2275                         }
2276                         ptlrpc_save_lock(req, h, mode, no_ack);
2277                         if (mdt_is_lock_sync(lock)) {
2278                                 CDEBUG(D_HA, "found sync-lock,"
2279                                        " async commit started\n");
2280                                 mdt_device_commit_async(info->mti_env,
2281                                                         mdt);
2282                         }
2283                         LDLM_LOCK_PUT(lock);
2284                 }
2285                 h->cookie = 0ull;
2286         }
2287
2288         EXIT;
2289 }
2290
2291 /**
2292  * Unlock mdt object.
2293  *
2294  * Immeditely release the regular lock and the PDO lock or save the
2295  * lock in reqeuest and keep them referenced until client ACK or
2296  * transaction commit.
2297  *
2298  * \param info thread info object
2299  * \param o mdt object
2300  * \param lh mdt lock handle referencing regular and PDO locks
2301  * \param decref force immediate lock releasing
2302  */
2303 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2304                        struct mdt_lock_handle *lh, int decref)
2305 {
2306         ENTRY;
2307
2308         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2309         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2310
2311         EXIT;
2312 }
2313
2314 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2315                                         const struct lu_fid *f,
2316                                         struct mdt_lock_handle *lh,
2317                                         __u64 ibits)
2318 {
2319         struct mdt_object *o;
2320
2321         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2322         if (!IS_ERR(o)) {
2323                 int rc;
2324
2325                 rc = mdt_object_lock(info, o, lh, ibits,
2326                                      MDT_LOCAL_LOCK);
2327                 if (rc != 0) {
2328                         mdt_object_put(info->mti_env, o);
2329                         o = ERR_PTR(rc);
2330                 }
2331         }
2332         return o;
2333 }
2334
2335 void mdt_object_unlock_put(struct mdt_thread_info * info,
2336                            struct mdt_object * o,
2337                            struct mdt_lock_handle *lh,
2338                            int decref)
2339 {
2340         mdt_object_unlock(info, o, lh, decref);
2341         mdt_object_put(info->mti_env, o);
2342 }
2343
2344 static struct mdt_handler *mdt_handler_find(__u32 opc,
2345                                             struct mdt_opc_slice *supported)
2346 {
2347         struct mdt_opc_slice *s;
2348         struct mdt_handler   *h;
2349
2350         h = NULL;
2351         for (s = supported; s->mos_hs != NULL; s++) {
2352                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2353                         h = s->mos_hs + (opc - s->mos_opc_start);
2354                         if (likely(h->mh_opc != 0))
2355                                 LASSERTF(h->mh_opc == opc,
2356                                          "opcode mismatch %d != %d\n",
2357                                          h->mh_opc, opc);
2358                         else
2359                                 h = NULL; /* unsupported opc */
2360                         break;
2361                 }
2362         }
2363         return h;
2364 }
2365
2366 static int mdt_lock_resname_compat(struct mdt_device *m,
2367                                    struct ldlm_request *req)
2368 {
2369         /* XXX something... later. */
2370         return 0;
2371 }
2372
2373 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2374 {
2375         /* XXX something... later. */
2376         return 0;
2377 }
2378
2379 /*
2380  * Generic code handling requests that have struct mdt_body passed in:
2381  *
2382  *  - extract mdt_body from request and save it in @info, if present;
2383  *
2384  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2385  *  @info;
2386  *
2387  *  - if HABEO_CORPUS flag is set for this request type check whether object
2388  *  actually exists on storage (lu_object_exists()).
2389  *
2390  */
2391 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2392 {
2393         const struct mdt_body    *body;
2394         struct mdt_object        *obj;
2395         const struct lu_env      *env;
2396         struct req_capsule       *pill;
2397         int                       rc;
2398         ENTRY;
2399
2400         env = info->mti_env;
2401         pill = info->mti_pill;
2402
2403         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2404         if (body == NULL)
2405                 RETURN(-EFAULT);
2406
2407         if (!(body->valid & OBD_MD_FLID))
2408                 RETURN(0);
2409
2410         if (!fid_is_sane(&body->fid1)) {
2411                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2412                 RETURN(-EINVAL);
2413         }
2414
2415         /*
2416          * Do not get size or any capa fields before we check that request
2417          * contains capa actually. There are some requests which do not, for
2418          * instance MDS_IS_SUBDIR.
2419          */
2420         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2421             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2422                 mdt_set_capainfo(info, 0, &body->fid1,
2423                                  req_capsule_client_get(pill, &RMF_CAPA1));
2424
2425         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2426         if (!IS_ERR(obj)) {
2427                 if ((flags & HABEO_CORPUS) &&
2428                     !mdt_object_exists(obj)) {
2429                         mdt_object_put(env, obj);
2430                         /* for capability renew ENOENT will be handled in
2431                          * mdt_renew_capa */
2432                         if (body->valid & OBD_MD_FLOSSCAPA)
2433                                 rc = 0;
2434                         else
2435                                 rc = -ENOENT;
2436                 } else {
2437                         info->mti_object = obj;
2438                         rc = 0;
2439                 }
2440         } else
2441                 rc = PTR_ERR(obj);
2442
2443         RETURN(rc);
2444 }
2445
2446 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2447 {
2448         struct req_capsule *pill = info->mti_pill;
2449         int rc;
2450         ENTRY;
2451
2452         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2453                 rc = mdt_body_unpack(info, flags);
2454         else
2455                 rc = 0;
2456
2457         if (rc == 0 && (flags & HABEO_REFERO)) {
2458                 struct mdt_device *mdt = info->mti_mdt;
2459
2460                 /* Pack reply. */
2461
2462                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2463                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2464                                              mdt->mdt_max_mdsize);
2465                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2466                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2467                                              mdt->mdt_max_cookiesize);
2468
2469                 rc = req_capsule_server_pack(pill);
2470         }
2471         RETURN(rc);
2472 }
2473
2474 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2475 {
2476         struct md_device *next = m->mdt_child;
2477
2478         return next->md_ops->mdo_init_capa_ctxt(env, next,
2479                                                 m->mdt_opts.mo_mds_capa,
2480                                                 m->mdt_capa_timeout,
2481                                                 m->mdt_capa_alg,
2482                                                 m->mdt_capa_keys);
2483 }
2484
2485 /*
2486  * Invoke handler for this request opc. Also do necessary preprocessing
2487  * (according to handler ->mh_flags), and post-processing (setting of
2488  * ->last_{xid,committed}).
2489  */
2490 static int mdt_req_handle(struct mdt_thread_info *info,
2491                           struct mdt_handler *h, struct ptlrpc_request *req)
2492 {
2493         int   rc, serious = 0;
2494         __u32 flags;
2495
2496         ENTRY;
2497
2498         LASSERT(h->mh_act != NULL);
2499         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2500         LASSERT(current->journal_info == NULL);
2501
2502         /*
2503          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2504          * to put same checks into handlers like mdt_close(), mdt_reint(),
2505          * etc., without talking to mdt authors first. Checking same thing
2506          * there again is useless and returning 0 error without packing reply
2507          * is buggy! Handlers either pack reply or return error.
2508          *
2509          * We return 0 here and do not send any reply in order to emulate
2510          * network failure. Do not send any reply in case any of NET related
2511          * fail_id has occured.
2512          */
2513         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2514                 RETURN(0);
2515
2516         rc = 0;
2517         flags = h->mh_flags;
2518         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2519
2520         if (h->mh_fmt != NULL) {
2521                 req_capsule_set(info->mti_pill, h->mh_fmt);
2522                 rc = mdt_unpack_req_pack_rep(info, flags);
2523         }
2524
2525         if (rc == 0 && flags & MUTABOR &&
2526             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2527                 /* should it be rq_status? */
2528                 rc = -EROFS;
2529
2530         if (rc == 0 && flags & HABEO_CLAVIS) {
2531                 struct ldlm_request *dlm_req;
2532
2533                 LASSERT(h->mh_fmt != NULL);
2534
2535                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2536                 if (dlm_req != NULL) {
2537                         if (info->mti_mdt->mdt_opts.mo_compat_resname)
2538                                 rc = mdt_lock_resname_compat(info->mti_mdt,
2539                                                              dlm_req);
2540                         info->mti_dlm_req = dlm_req;
2541                 } else {
2542                         rc = -EFAULT;
2543                 }
2544         }
2545
2546         /* capability setting changed via /proc, needs reinitialize ctxt */
2547         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2548                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2549                 info->mti_mdt->mdt_capa_conf = 0;
2550         }
2551
2552         if (likely(rc == 0)) {
2553                 /*
2554                  * Process request, there can be two types of rc:
2555                  * 1) errors with msg unpack/pack, other failures outside the
2556                  * operation itself. This is counted as serious errors;
2557                  * 2) errors during fs operation, should be placed in rq_status
2558                  * only
2559                  */
2560                 rc = h->mh_act(info);
2561                 if (rc == 0 &&
2562                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2563                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2564                                   "pack reply and returned 0 error\n",
2565                                   h->mh_name);
2566                         LBUG();
2567                 }
2568                 serious = is_serious(rc);
2569                 rc = clear_serious(rc);
2570         } else
2571                 serious = 1;
2572
2573         req->rq_status = rc;
2574
2575         /*
2576          * ELDLM_* codes which > 0 should be in rq_status only as well as
2577          * all non-serious errors.
2578          */
2579         if (rc > 0 || !serious)
2580                 rc = 0;
2581
2582         LASSERT(current->journal_info == NULL);
2583
2584         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2585             info->mti_mdt->mdt_opts.mo_compat_resname) {
2586                 struct ldlm_reply *dlmrep;
2587
2588                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2589                 if (dlmrep != NULL)
2590                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2591         }
2592
2593         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2594         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
2595                 target_committed_to_req(req);
2596
2597         if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
2598                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2599                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2600                 LBUG();
2601         }
2602
2603         target_send_reply(req, rc, info->mti_fail_id);
2604         RETURN(0);
2605 }
2606
2607 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2608 {
2609         lh->mlh_type = MDT_NUL_LOCK;
2610         lh->mlh_reg_lh.cookie = 0ull;
2611         lh->mlh_reg_mode = LCK_MINMODE;
2612         lh->mlh_pdo_lh.cookie = 0ull;
2613         lh->mlh_pdo_mode = LCK_MINMODE;
2614 }
2615
2616 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2617 {
2618         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2619         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2620 }
2621
2622 /*
2623  * Initialize fields of struct mdt_thread_info. Other fields are left in
2624  * uninitialized state, because it's too expensive to zero out whole
2625  * mdt_thread_info (> 1K) on each request arrival.
2626  */
2627 static void mdt_thread_info_init(struct ptlrpc_request *req,
2628                                  struct mdt_thread_info *info)
2629 {
2630         int i;
2631         struct md_capainfo *ci;
2632
2633         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2634         info->mti_pill = &req->rq_pill;
2635
2636         /* lock handle */
2637         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2638                 mdt_lock_handle_init(&info->mti_lh[i]);
2639
2640         /* mdt device: it can be NULL while CONNECT */
2641         if (req->rq_export) {
2642                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2643                 info->mti_exp = req->rq_export;
2644         } else
2645                 info->mti_mdt = NULL;
2646         info->mti_env = req->rq_svc_thread->t_env;
2647         ci = md_capainfo(info->mti_env);
2648         memset(ci, 0, sizeof *ci);
2649         if (req->rq_export) {
2650                 if (exp_connect_rmtclient(req->rq_export))
2651                         ci->mc_auth = LC_ID_CONVERT;
2652                 else if (req->rq_export->exp_connect_flags &
2653                          OBD_CONNECT_MDS_CAPA)
2654                         ci->mc_auth = LC_ID_PLAIN;
2655                 else
2656                         ci->mc_auth = LC_ID_NONE;
2657         }
2658
2659         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2660         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2661         info->mti_mos[0] = NULL;
2662         info->mti_mos[1] = NULL;
2663         info->mti_mos[2] = NULL;
2664         info->mti_mos[3] = NULL;
2665
2666         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2667         info->mti_body = NULL;
2668         info->mti_object = NULL;
2669         info->mti_dlm_req = NULL;
2670         info->mti_has_trans = 0;
2671         info->mti_no_need_trans = 0;
2672         info->mti_cross_ref = 0;
2673         info->mti_opdata = 0;
2674
2675         /* To not check for split by default. */
2676         info->mti_spec.sp_ck_split = 0;
2677         info->mti_spec.no_create = 0;
2678 }
2679
2680 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2681 {
2682         int i;
2683
2684         req_capsule_fini(info->mti_pill);
2685         if (info->mti_object != NULL) {
2686                 /*
2687                  * freeing an object may lead to OSD level transaction, do not
2688                  * let it mess with MDT. bz19385.
2689                  */
2690                 info->mti_no_need_trans = 1;
2691                 mdt_object_put(info->mti_env, info->mti_object);
2692                 info->mti_object = NULL;
2693         }
2694         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2695                 mdt_lock_handle_fini(&info->mti_lh[i]);
2696         info->mti_env = NULL;
2697 }
2698
2699 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
2700                                        struct obd_device *obd, int *process)
2701 {
2702         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2703         case MDS_CONNECT: /* This will never get here, but for completeness. */
2704         case OST_CONNECT: /* This will never get here, but for completeness. */
2705         case MDS_DISCONNECT:
2706         case OST_DISCONNECT:
2707                *process = 1;
2708                RETURN(0);
2709
2710         case MDS_CLOSE:
2711         case MDS_DONE_WRITING:
2712         case MDS_SYNC: /* used in unmounting */
2713         case OBD_PING:
2714         case MDS_REINT:
2715         case SEQ_QUERY:
2716         case FLD_QUERY:
2717         case LDLM_ENQUEUE:
2718                 *process = target_queue_recovery_request(req, obd);
2719                 RETURN(0);
2720
2721         default:
2722                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2723                 *process = -EAGAIN;
2724                 RETURN(0);
2725         }
2726 }
2727
2728 /*
2729  * Handle recovery. Return:
2730  *        +1: continue request processing;
2731  *       -ve: abort immediately with the given error code;
2732  *         0: send reply with error code in req->rq_status;
2733  */
2734 static int mdt_recovery(struct mdt_thread_info *info)
2735 {
2736         struct ptlrpc_request *req = mdt_info_req(info);
2737         int recovering;
2738         struct obd_device *obd;
2739
2740         ENTRY;
2741
2742         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2743         case MDS_CONNECT:
2744         case SEC_CTX_INIT:
2745         case SEC_CTX_INIT_CONT:
2746         case SEC_CTX_FINI:
2747                 {
2748 #if 0
2749                         int rc;
2750
2751                         rc = mdt_handle_idmap(info);
2752                         if (rc)
2753                                 RETURN(rc);
2754                         else
2755 #endif
2756                                 RETURN(+1);
2757                 }
2758         }
2759
2760         if (unlikely(req->rq_export == NULL)) {
2761                 CERROR("operation %d on unconnected MDS from %s\n",
2762                        lustre_msg_get_opc(req->rq_reqmsg),
2763                        libcfs_id2str(req->rq_peer));
2764                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2765                  * mds_A will get -ENOTCONN(especially for ping req),
2766                  * which will cause that mds_A deactive timeout, then when
2767                  * mds_A cleanup, the cleanup process will be suspended since
2768                  * deactive timeout is not zero.
2769                  */
2770                 req->rq_status = -ENOTCONN;
2771                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2772                 RETURN(0);
2773         }
2774
2775         /* sanity check: if the xid matches, the request must be marked as a
2776          * resent or replayed */
2777         if (req_xid_is_last(req)) {
2778                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2779                       (MSG_RESENT | MSG_REPLAY))) {
2780                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2781                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2782                                   lustre_msg_get_flags(req->rq_reqmsg));
2783                         LBUG();
2784                         req->rq_status = -ENOTCONN;
2785                         RETURN(-ENOTCONN);
2786                 }
2787         }
2788
2789         /* else: note the opposite is not always true; a RESENT req after a
2790          * failover will usually not match the last_xid, since it was likely
2791          * never committed. A REPLAYed request will almost never match the
2792          * last xid, however it could for a committed, but still retained,
2793          * open. */
2794
2795         obd = req->rq_export->exp_obd;
2796
2797         /* Check for aborted recovery... */
2798         spin_lock_bh(&obd->obd_processing_task_lock);
2799         recovering = obd->obd_recovering;
2800         spin_unlock_bh(&obd->obd_processing_task_lock);
2801         if (unlikely(recovering)) {
2802                 int rc;
2803                 int should_process;
2804                 DEBUG_REQ(D_INFO, req, "Got new replay");
2805                 rc = mdt_filter_recovery_request(req, obd, &should_process);
2806                 if (rc != 0 || !should_process)
2807                         RETURN(rc);
2808                 else if (should_process < 0) {
2809                         req->rq_status = should_process;
2810                         rc = ptlrpc_error(req);
2811                         RETURN(rc);
2812                 }
2813         }
2814         RETURN(+1);
2815 }
2816
2817 static int mdt_msg_check_version(struct lustre_msg *msg)
2818 {
2819         int rc;
2820
2821         switch (lustre_msg_get_opc(msg)) {
2822         case MDS_CONNECT:
2823         case MDS_DISCONNECT:
2824         case MDS_SET_INFO:
2825         case OBD_PING:
2826         case SEC_CTX_INIT:
2827         case SEC_CTX_INIT_CONT:
2828         case SEC_CTX_FINI:
2829                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2830                 if (rc)
2831                         CERROR("bad opc %u version %08x, expecting %08x\n",
2832                                lustre_msg_get_opc(msg),
2833                                lustre_msg_get_version(msg),
2834                                LUSTRE_OBD_VERSION);
2835                 break;
2836         case MDS_GETSTATUS:
2837         case MDS_GETATTR:
2838         case MDS_GETATTR_NAME:
2839         case MDS_STATFS:
2840         case MDS_READPAGE:
2841         case MDS_WRITEPAGE:
2842         case MDS_IS_SUBDIR:
2843         case MDS_REINT:
2844         case MDS_CLOSE:
2845         case MDS_DONE_WRITING:
2846         case MDS_PIN:
2847         case MDS_SYNC:
2848         case MDS_GETXATTR:
2849         case MDS_SETXATTR:
2850         case MDS_GET_INFO:
2851         case MDS_QUOTACHECK:
2852         case MDS_QUOTACTL:
2853         case QUOTA_DQACQ:
2854         case QUOTA_DQREL:
2855         case SEQ_QUERY:
2856         case FLD_QUERY:
2857                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2858                 if (rc)
2859                         CERROR("bad opc %u version %08x, expecting %08x\n",
2860                                lustre_msg_get_opc(msg),
2861                                lustre_msg_get_version(msg),
2862                                LUSTRE_MDS_VERSION);
2863                 break;
2864         case LDLM_ENQUEUE:
2865         case LDLM_CONVERT:
2866         case LDLM_BL_CALLBACK:
2867         case LDLM_CP_CALLBACK:
2868                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2869                 if (rc)
2870                         CERROR("bad opc %u version %08x, expecting %08x\n",
2871                                lustre_msg_get_opc(msg),
2872                                lustre_msg_get_version(msg),
2873                                LUSTRE_DLM_VERSION);
2874                 break;
2875         case OBD_LOG_CANCEL:
2876         case LLOG_ORIGIN_HANDLE_CREATE:
2877         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2878         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2879         case LLOG_ORIGIN_HANDLE_CLOSE:
2880         case LLOG_ORIGIN_HANDLE_DESTROY:
2881         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2882         case LLOG_CATINFO:
2883                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2884                 if (rc)
2885                         CERROR("bad opc %u version %08x, expecting %08x\n",
2886                                lustre_msg_get_opc(msg),
2887                                lustre_msg_get_version(msg),
2888                                LUSTRE_LOG_VERSION);
2889                 break;
2890         default:
2891                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
2892                 rc = -ENOTSUPP;
2893         }
2894         return rc;
2895 }
2896
2897 static int mdt_handle0(struct ptlrpc_request *req,
2898                        struct mdt_thread_info *info,
2899                        struct mdt_opc_slice *supported)
2900 {
2901         struct mdt_handler *h;
2902         struct lustre_msg  *msg;
2903         int                 rc;
2904
2905         ENTRY;
2906
2907         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2908                 RETURN(0);
2909
2910         LASSERT(current->journal_info == NULL);
2911
2912         msg = req->rq_reqmsg;
2913         rc = mdt_msg_check_version(msg);
2914         if (likely(rc == 0)) {
2915                 rc = mdt_recovery(info);
2916                 if (likely(rc == +1)) {
2917                         h = mdt_handler_find(lustre_msg_get_opc(msg),
2918                                              supported);
2919                         if (likely(h != NULL)) {
2920                                 rc = mdt_req_handle(info, h, req);
2921                         } else {
2922                                 CERROR("The unsupported opc: 0x%x\n",
2923                                        lustre_msg_get_opc(msg) );
2924                                 req->rq_status = -ENOTSUPP;
2925                                 rc = ptlrpc_error(req);
2926                                 RETURN(rc);
2927                         }
2928                 }
2929         } else
2930                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2931         RETURN(rc);
2932 }
2933
2934 /*
2935  * MDT handler function called by ptlrpc service thread when request comes.
2936  *
2937  * XXX common "target" functionality should be factored into separate module
2938  * shared by mdt, ost and stand-alone services like fld.
2939  */
2940 static int mdt_handle_common(struct ptlrpc_request *req,
2941                              struct mdt_opc_slice *supported)
2942 {
2943         struct lu_env          *env;
2944         struct mdt_thread_info *info;
2945         int                     rc;
2946         ENTRY;
2947
2948         env = req->rq_svc_thread->t_env;
2949         LASSERT(env != NULL);
2950         LASSERT(env->le_ses != NULL);
2951         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2952         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2953         LASSERT(info != NULL);
2954
2955         mdt_thread_info_init(req, info);
2956
2957         rc = mdt_handle0(req, info, supported);
2958
2959         mdt_thread_info_fini(info);
2960         RETURN(rc);
2961 }
2962
2963 /*
2964  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
2965  * as well.
2966  */
2967 int mdt_recovery_handle(struct ptlrpc_request *req)
2968 {
2969         int rc;
2970         ENTRY;
2971
2972         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2973         case FLD_QUERY:
2974                 rc = mdt_handle_common(req, mdt_fld_handlers);
2975                 break;
2976         case SEQ_QUERY:
2977                 rc = mdt_handle_common(req, mdt_seq_handlers);
2978                 break;
2979         default:
2980                 rc = mdt_handle_common(req, mdt_regular_handlers);
2981                 break;
2982         }
2983
2984         RETURN(rc);
2985 }
2986
2987 static int mdt_regular_handle(struct ptlrpc_request *req)
2988 {
2989         return mdt_handle_common(req, mdt_regular_handlers);
2990 }
2991
2992 static int mdt_readpage_handle(struct ptlrpc_request *req)
2993 {
2994         return mdt_handle_common(req, mdt_readpage_handlers);
2995 }
2996
2997 static int mdt_xmds_handle(struct ptlrpc_request *req)
2998 {
2999         return mdt_handle_common(req, mdt_xmds_handlers);
3000 }
3001
3002 static int mdt_mdsc_handle(struct ptlrpc_request *req)
3003 {
3004         return mdt_handle_common(req, mdt_seq_handlers);
3005 }
3006
3007 static int mdt_mdss_handle(struct ptlrpc_request *req)
3008 {
3009         return mdt_handle_common(req, mdt_seq_handlers);
3010 }
3011
3012 static int mdt_dtss_handle(struct ptlrpc_request *req)
3013 {
3014         return mdt_handle_common(req, mdt_seq_handlers);
3015 }
3016
3017 static int mdt_fld_handle(struct ptlrpc_request *req)
3018 {
3019         return mdt_handle_common(req, mdt_fld_handlers);
3020 }
3021
3022 enum mdt_it_code {
3023         MDT_IT_OPEN,
3024         MDT_IT_OCREAT,
3025         MDT_IT_CREATE,
3026         MDT_IT_GETATTR,
3027         MDT_IT_READDIR,
3028         MDT_IT_LOOKUP,
3029         MDT_IT_UNLINK,
3030         MDT_IT_TRUNC,
3031         MDT_IT_GETXATTR,
3032         MDT_IT_NR
3033 };
3034
3035 static int mdt_intent_getattr(enum mdt_it_code opcode,
3036                               struct mdt_thread_info *info,
3037                               struct ldlm_lock **,
3038                               int);
3039 static int mdt_intent_reint(enum mdt_it_code opcode,
3040                             struct mdt_thread_info *info,
3041                             struct ldlm_lock **,
3042                             int);
3043
3044 static struct mdt_it_flavor {
3045         const struct req_format *it_fmt;
3046         __u32                    it_flags;
3047         int                    (*it_act)(enum mdt_it_code ,
3048                                          struct mdt_thread_info *,
3049                                          struct ldlm_lock **,
3050                                          int);
3051         long                     it_reint;
3052 } mdt_it_flavor[] = {
3053         [MDT_IT_OPEN]     = {
3054                 .it_fmt   = &RQF_LDLM_INTENT,
3055                 /*.it_flags = HABEO_REFERO,*/
3056                 .it_flags = 0,
3057                 .it_act   = mdt_intent_reint,
3058                 .it_reint = REINT_OPEN
3059         },
3060         [MDT_IT_OCREAT]   = {
3061                 .it_fmt   = &RQF_LDLM_INTENT,
3062                 .it_flags = MUTABOR,
3063                 .it_act   = mdt_intent_reint,
3064                 .it_reint = REINT_OPEN
3065         },
3066         [MDT_IT_CREATE]   = {
3067                 .it_fmt   = &RQF_LDLM_INTENT,
3068                 .it_flags = MUTABOR,
3069                 .it_act   = mdt_intent_reint,
3070                 .it_reint = REINT_CREATE
3071         },
3072         [MDT_IT_GETATTR]  = {
3073                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3074                 .it_flags = HABEO_REFERO,
3075                 .it_act   = mdt_intent_getattr
3076         },
3077         [MDT_IT_READDIR]  = {
3078                 .it_fmt   = NULL,
3079                 .it_flags = 0,
3080                 .it_act   = NULL
3081         },
3082         [MDT_IT_LOOKUP]   = {
3083                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3084                 .it_flags = HABEO_REFERO,
3085                 .it_act   = mdt_intent_getattr
3086         },
3087         [MDT_IT_UNLINK]   = {
3088                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3089                 .it_flags = MUTABOR,
3090                 .it_act   = NULL,
3091                 .it_reint = REINT_UNLINK
3092         },
3093         [MDT_IT_TRUNC]    = {
3094                 .it_fmt   = NULL,
3095                 .it_flags = MUTABOR,
3096                 .it_act   = NULL
3097         },
3098         [MDT_IT_GETXATTR] = {
3099                 .it_fmt   = NULL,
3100                 .it_flags = 0,
3101                 .it_act   = NULL
3102         }
3103 };
3104
3105 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3106                             struct ldlm_lock **lockp,
3107                             struct ldlm_lock *new_lock,
3108                             struct mdt_lock_handle *lh,
3109                             int flags)
3110 {
3111         struct ptlrpc_request  *req = mdt_info_req(info);
3112         struct ldlm_lock       *lock = *lockp;
3113
3114         /*
3115          * Get new lock only for cases when possible resent did not find any
3116          * lock.
3117          */
3118         if (new_lock == NULL)
3119                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3120
3121         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3122                 lh->mlh_reg_lh.cookie = 0;
3123                 RETURN(0);
3124         }
3125
3126         LASSERTF(new_lock != NULL,
3127                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3128
3129         /*
3130          * If we've already given this lock to a client once, then we should
3131          * have no readers or writers.  Otherwise, we should have one reader
3132          * _or_ writer ref (which will be zeroed below) before returning the
3133          * lock to a client.
3134          */
3135         if (new_lock->l_export == req->rq_export) {
3136                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3137         } else {
3138                 LASSERT(new_lock->l_export == NULL);
3139                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3140         }
3141
3142         *lockp = new_lock;
3143
3144         if (new_lock->l_export == req->rq_export) {
3145                 /*
3146                  * Already gave this to the client, which means that we
3147                  * reconstructed a reply.
3148                  */
3149                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3150                         MSG_RESENT);
3151                 lh->mlh_reg_lh.cookie = 0;
3152                 RETURN(ELDLM_LOCK_REPLACED);
3153         }
3154
3155         /*
3156          * Fixup the lock to be given to the client.
3157          */
3158         lock_res_and_lock(new_lock);
3159         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3160          * possible blocking AST. */
3161         while (new_lock->l_readers > 0) {
3162                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3163                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3164                 new_lock->l_readers--;
3165         }
3166         while (new_lock->l_writers > 0) {
3167                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3168                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3169                 new_lock->l_writers--;
3170         }
3171
3172         new_lock->l_export = class_export_lock_get(req->rq_export);
3173         new_lock->l_blocking_ast = lock->l_blocking_ast;
3174         new_lock->l_completion_ast = lock->l_completion_ast;
3175         new_lock->l_remote_handle = lock->l_remote_handle;
3176         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3177
3178         unlock_res_and_lock(new_lock);
3179
3180         lustre_hash_add(new_lock->l_export->exp_lock_hash,
3181                         &new_lock->l_remote_handle,
3182                         &new_lock->l_exp_hash);
3183
3184         LDLM_LOCK_RELEASE(new_lock);
3185         lh->mlh_reg_lh.cookie = 0;
3186
3187         RETURN(ELDLM_LOCK_REPLACED);
3188 }
3189
3190 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3191                                     struct ldlm_lock *new_lock,
3192                                     struct ldlm_lock **old_lock,
3193                                     struct mdt_lock_handle *lh)
3194 {
3195         struct ptlrpc_request  *req = mdt_info_req(info);
3196         struct obd_export      *exp = req->rq_export;
3197         struct lustre_handle    remote_hdl;
3198         struct ldlm_request    *dlmreq;
3199         struct ldlm_lock       *lock;
3200
3201         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3202                 return;
3203
3204         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3205         remote_hdl = dlmreq->lock_handle[0];
3206
3207         lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3208         if (lock) {
3209                 if (lock != new_lock) {
3210                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3211                         lh->mlh_reg_mode = lock->l_granted_mode;
3212
3213                         LDLM_DEBUG(lock, "Restoring lock cookie");
3214                         DEBUG_REQ(D_DLMTRACE, req,
3215                                   "restoring lock cookie "LPX64,
3216                                   lh->mlh_reg_lh.cookie);
3217                         if (old_lock)
3218                                 *old_lock = LDLM_LOCK_GET(lock);
3219                         lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
3220                         return;
3221                 }
3222
3223                 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
3224         }
3225
3226         /*
3227          * If the xid matches, then we know this is a resent request, and allow
3228          * it. (It's probably an OPEN, for which we don't send a lock.
3229          */
3230         if (req_xid_is_last(req))
3231                 return;
3232
3233         /*
3234          * This remote handle isn't enqueued, so we never received or processed
3235          * this request.  Clear MSG_RESENT, because it can be handled like any
3236          * normal request now.
3237          */
3238         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3239
3240         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3241                   remote_hdl.cookie);
3242 }
3243
3244 static int mdt_intent_getattr(enum mdt_it_code opcode,
3245                               struct mdt_thread_info *info,
3246                               struct ldlm_lock **lockp,
3247                               int flags)
3248 {
3249         struct md