Whamcloud - gitweb
b=17896
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52 #define DEBUG_SUBSYSTEM S_MDS
53
54 #include <linux/module.h>
55 /*
56  * struct OBD_{ALLOC,FREE}*()
57  */
58 #include <obd_support.h>
59 /* struct ptlrpc_request */
60 #include <lustre_net.h>
61 /* struct obd_export */
62 #include <lustre_export.h>
63 /* struct obd_device */
64 #include <obd.h>
65 /* lu2dt_dev() */
66 #include <dt_object.h>
67 #include <lustre_mds.h>
68 #include <lustre_mdt.h>
69 #include "mdt_internal.h"
70 #ifdef HAVE_QUOTA_SUPPORT
71 # include <lustre_quota.h>
72 #endif
73 #include <lustre_acl.h>
74 #include <lustre_param.h>
75 #include <lustre_fsfilt.h>
76
77 mdl_mode_t mdt_mdl_lock_modes[] = {
78         [LCK_MINMODE] = MDL_MINMODE,
79         [LCK_EX]      = MDL_EX,
80         [LCK_PW]      = MDL_PW,
81         [LCK_PR]      = MDL_PR,
82         [LCK_CW]      = MDL_CW,
83         [LCK_CR]      = MDL_CR,
84         [LCK_NL]      = MDL_NL,
85         [LCK_GROUP]   = MDL_GROUP
86 };
87
88 ldlm_mode_t mdt_dlm_lock_modes[] = {
89         [MDL_MINMODE] = LCK_MINMODE,
90         [MDL_EX]      = LCK_EX,
91         [MDL_PW]      = LCK_PW,
92         [MDL_PR]      = LCK_PR,
93         [MDL_CW]      = LCK_CW,
94         [MDL_CR]      = LCK_CR,
95         [MDL_NL]      = LCK_NL,
96         [MDL_GROUP]   = LCK_GROUP
97 };
98
99 /*
100  * Initialized in mdt_mod_init().
101  */
102 unsigned long mdt_num_threads;
103
104 /* ptlrpc request handler for MDT. All handlers are
105  * grouped into several slices - struct mdt_opc_slice,
106  * and stored in an array - mdt_handlers[].
107  */
108 struct mdt_handler {
109         /* The name of this handler. */
110         const char *mh_name;
111         /* Fail id for this handler, checked at the beginning of this handler*/
112         int         mh_fail_id;
113         /* Operation code for this handler */
114         __u32       mh_opc;
115         /* flags are listed in enum mdt_handler_flags below. */
116         __u32       mh_flags;
117         /* The actual handler function to execute. */
118         int (*mh_act)(struct mdt_thread_info *info);
119         /* Request format for this request. */
120         const struct req_format *mh_fmt;
121 };
122
123 enum mdt_handler_flags {
124         /*
125          * struct mdt_body is passed in the incoming message, and object
126          * identified by this fid exists on disk.
127          *
128          * "habeo corpus" == "I have a body"
129          */
130         HABEO_CORPUS = (1 << 0),
131         /*
132          * struct ldlm_request is passed in the incoming message.
133          *
134          * "habeo clavis" == "I have a key"
135          */
136         HABEO_CLAVIS = (1 << 1),
137         /*
138          * this request has fixed reply format, so that reply message can be
139          * packed by generic code.
140          *
141          * "habeo refero" == "I have a reply"
142          */
143         HABEO_REFERO = (1 << 2),
144         /*
145          * this request will modify something, so check whether the filesystem
146          * is readonly or not, then return -EROFS to client asap if necessary.
147          *
148          * "mutabor" == "I shall modify"
149          */
150         MUTABOR      = (1 << 3)
151 };
152
153 struct mdt_opc_slice {
154         __u32               mos_opc_start;
155         int                 mos_opc_end;
156         struct mdt_handler *mos_hs;
157 };
158
159 static struct mdt_opc_slice mdt_regular_handlers[];
160 static struct mdt_opc_slice mdt_readpage_handlers[];
161 static struct mdt_opc_slice mdt_xmds_handlers[];
162 static struct mdt_opc_slice mdt_seq_handlers[];
163 static struct mdt_opc_slice mdt_fld_handlers[];
164
165 static struct mdt_device *mdt_dev(struct lu_device *d);
166 static int mdt_regular_handle(struct ptlrpc_request *req);
167 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
168
169 static const struct lu_object_operations mdt_obj_ops;
170
171 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
172 {
173         if (!rep)
174                 return 0;
175         return (rep->lock_policy_res1 & flag);
176 }
177
178 void mdt_clear_disposition(struct mdt_thread_info *info,
179                            struct ldlm_reply *rep, int flag)
180 {
181         if (info)
182                 info->mti_opdata &= ~flag;
183         if (rep)
184                 rep->lock_policy_res1 &= ~flag;
185 }
186
187 void mdt_set_disposition(struct mdt_thread_info *info,
188                          struct ldlm_reply *rep, int flag)
189 {
190         if (info)
191                 info->mti_opdata |= flag;
192         if (rep)
193                 rep->lock_policy_res1 |= flag;
194 }
195
196 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
197 {
198         lh->mlh_pdo_hash = 0;
199         lh->mlh_reg_mode = lm;
200         lh->mlh_type = MDT_REG_LOCK;
201 }
202
203 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
204                        const char *name, int namelen)
205 {
206         lh->mlh_reg_mode = lm;
207         lh->mlh_type = MDT_PDO_LOCK;
208
209         if (name != NULL) {
210                 LASSERT(namelen > 0);
211                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
212         } else {
213                 LASSERT(namelen == 0);
214                 lh->mlh_pdo_hash = 0ull;
215         }
216 }
217
218 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
219                               struct mdt_lock_handle *lh)
220 {
221         mdl_mode_t mode;
222         ENTRY;
223
224         /*
225          * Any dir access needs couple of locks:
226          *
227          * 1) on part of dir we gonna take lookup/modify;
228          *
229          * 2) on whole dir to protect it from concurrent splitting and/or to
230          * flush client's cache for readdir().
231          *
232          * so, for a given mode and object this routine decides what lock mode
233          * to use for lock #2:
234          *
235          * 1) if caller's gonna lookup in dir then we need to protect dir from
236          * being splitted only - LCK_CR
237          *
238          * 2) if caller's gonna modify dir then we need to protect dir from
239          * being splitted and to flush cache - LCK_CW
240          *
241          * 3) if caller's gonna modify dir and that dir seems ready for
242          * splitting then we need to protect it from any type of access
243          * (lookup/modify/split) - LCK_EX --bzzz
244          */
245
246         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
247         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
248
249         /*
250          * Ask underlaying level its opinion about preferable PDO lock mode
251          * having access type passed as regular lock mode:
252          *
253          * - MDL_MINMODE means that lower layer does not want to specify lock
254          * mode;
255          *
256          * - MDL_NL means that no PDO lock should be taken. This is used in some
257          * cases. Say, for non-splittable directories no need to use PDO locks
258          * at all.
259          */
260         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
261                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
262
263         if (mode != MDL_MINMODE) {
264                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
265         } else {
266                 /*
267                  * Lower layer does not want to specify locking mode. We do it
268                  * our selves. No special protection is needed, just flush
269                  * client's cache on modification and allow concurrent
270                  * mondification.
271                  */
272                 switch (lh->mlh_reg_mode) {
273                 case LCK_EX:
274                         lh->mlh_pdo_mode = LCK_EX;
275                         break;
276                 case LCK_PR:
277                         lh->mlh_pdo_mode = LCK_CR;
278                         break;
279                 case LCK_PW:
280                         lh->mlh_pdo_mode = LCK_CW;
281                         break;
282                 default:
283                         CERROR("Not expected lock type (0x%x)\n",
284                                (int)lh->mlh_reg_mode);
285                         LBUG();
286                 }
287         }
288
289         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
290         EXIT;
291 }
292
293 static int mdt_getstatus(struct mdt_thread_info *info)
294 {
295         struct mdt_device *mdt  = info->mti_mdt;
296         struct md_device  *next = mdt->mdt_child;
297         struct mdt_body   *repbody;
298         int                rc;
299
300         ENTRY;
301
302         rc = mdt_check_ucred(info);
303         if (rc)
304                 RETURN(err_serious(rc));
305
306         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
307                 RETURN(err_serious(-ENOMEM));
308
309         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
310         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
311         if (rc != 0)
312                 RETURN(rc);
313
314         repbody->valid |= OBD_MD_FLID;
315
316         if (mdt->mdt_opts.mo_mds_capa &&
317             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
318                 struct mdt_object  *root;
319                 struct lustre_capa *capa;
320
321                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
322                 if (IS_ERR(root))
323                         RETURN(PTR_ERR(root));
324
325                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
326                 LASSERT(capa);
327                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
328                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
329                                  0);
330                 mdt_object_put(info->mti_env, root);
331                 if (rc == 0)
332                         repbody->valid |= OBD_MD_FLMDSCAPA;
333         }
334
335         RETURN(rc);
336 }
337
338 static int mdt_statfs(struct mdt_thread_info *info)
339 {
340         struct md_device      *next  = info->mti_mdt->mdt_child;
341         struct ptlrpc_service *svc;
342         struct obd_statfs     *osfs;
343         int                    rc;
344
345         ENTRY;
346
347         svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
348
349         /* This will trigger a watchdog timeout */
350         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
351                          (MDT_SERVICE_WATCHDOG_FACTOR *
352                           at_get(&svc->srv_at_estimate) / 1000) + 1);
353
354         rc = mdt_check_ucred(info);
355         if (rc)
356                 RETURN(err_serious(rc));
357
358         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
359                 rc = err_serious(-ENOMEM);
360         } else {
361                 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
362                 rc = next->md_ops->mdo_statfs(info->mti_env, next,
363                                               &info->mti_u.ksfs);
364                 statfs_pack(osfs, &info->mti_u.ksfs);
365         }
366         RETURN(rc);
367 }
368
369 void mdt_pack_size2body(struct mdt_thread_info *info, struct mdt_object *o)
370 {
371         struct mdt_body *b;
372         struct lu_attr *attr = &info->mti_attr.ma_attr;
373
374         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
375
376         /* Check if Size-on-MDS is enabled. */
377         if ((mdt_conn_flags(info) & OBD_CONNECT_SOM) &&
378             S_ISREG(attr->la_mode) && mdt_sizeonmds_enabled(o)) {
379                 b->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
380                 b->size = attr->la_size;
381                 b->blocks = attr->la_blocks;
382         }
383 }
384
385 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
386                         const struct lu_attr *attr, const struct lu_fid *fid)
387 {
388         /*XXX should pack the reply body according to lu_valid*/
389         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
390                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
391                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
392                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
393
394         if (!S_ISREG(attr->la_mode))
395                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
396
397         b->atime      = attr->la_atime;
398         b->mtime      = attr->la_mtime;
399         b->ctime      = attr->la_ctime;
400         b->mode       = attr->la_mode;
401         b->size       = attr->la_size;
402         b->blocks     = attr->la_blocks;
403         b->uid        = attr->la_uid;
404         b->gid        = attr->la_gid;
405         b->flags      = attr->la_flags;
406         b->nlink      = attr->la_nlink;
407         b->rdev       = attr->la_rdev;
408
409         if (fid) {
410                 b->fid1 = *fid;
411                 b->valid |= OBD_MD_FLID;
412
413                 /* FIXME: these should be fixed when new igif ready.*/
414                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
415                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
416                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
417
418                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
419                                 PFID(fid), b->nlink, b->mode, b->size);
420         }
421
422         if (info)
423                 mdt_body_reverse_idmap(info, b);
424 }
425
426 static inline int mdt_body_has_lov(const struct lu_attr *la,
427                                    const struct mdt_body *body)
428 {
429         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
430                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
431 }
432
433 static int mdt_getattr_internal(struct mdt_thread_info *info,
434                                 struct mdt_object *o)
435 {
436         struct md_object        *next = mdt_object_child(o);
437         const struct mdt_body   *reqbody = info->mti_body;
438         struct ptlrpc_request   *req = mdt_info_req(info);
439         struct md_attr          *ma = &info->mti_attr;
440         struct lu_attr          *la = &ma->ma_attr;
441         struct req_capsule      *pill = info->mti_pill;
442         const struct lu_env     *env = info->mti_env;
443         struct mdt_body         *repbody;
444         struct lu_buf           *buffer = &info->mti_buf;
445         int                     rc;
446         ENTRY;
447
448         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
449                 RETURN(err_serious(-ENOMEM));
450
451         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
452
453         ma->ma_valid = 0;
454
455         rc = mdt_object_exists(o);
456         if (rc < 0) {
457                 /* This object is located on remote node.*/
458                 repbody->fid1 = *mdt_object_fid(o);
459                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
460                 RETURN(0);
461         }
462
463         buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
464         buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
465
466         /* If it is dir object and client require MEA, then we got MEA */
467         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
468             reqbody->valid & OBD_MD_MEA) {
469                 /* Assumption: MDT_MD size is enough for lmv size. */
470                 ma->ma_lmv = buffer->lb_buf;
471                 ma->ma_lmv_size = buffer->lb_len;
472                 ma->ma_need = MA_LMV | MA_INODE;
473         } else {
474                 ma->ma_lmm = buffer->lb_buf;
475                 ma->ma_lmm_size = buffer->lb_len;
476                 ma->ma_need = MA_LOV | MA_INODE;
477         }
478
479         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
480             reqbody->valid & OBD_MD_FLDIREA  &&
481             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
482                 /* get default stripe info for this dir. */
483                 ma->ma_need |= MA_LOV_DEF;
484         }
485         rc = mo_attr_get(env, next, ma);
486         if (unlikely(rc)) {
487                 CERROR("getattr error for "DFID": %d\n",
488                         PFID(mdt_object_fid(o)), rc);
489                 RETURN(rc);
490         }
491
492         if (likely(ma->ma_valid & MA_INODE))
493                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
494         else
495                 RETURN(-EFAULT);
496
497         if (mdt_body_has_lov(la, reqbody)) {
498                 if (ma->ma_valid & MA_LOV) {
499                         LASSERT(ma->ma_lmm_size);
500                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
501                         repbody->eadatasize = ma->ma_lmm_size;
502                         if (S_ISDIR(la->la_mode))
503                                 repbody->valid |= OBD_MD_FLDIREA;
504                         else
505                                 repbody->valid |= OBD_MD_FLEASIZE;
506                 }
507                 if (ma->ma_valid & MA_LMV) {
508                         LASSERT(S_ISDIR(la->la_mode));
509                         repbody->eadatasize = ma->ma_lmv_size;
510                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
511                 }
512                 if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
513                         repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
514                 }
515         } else if (S_ISLNK(la->la_mode) &&
516                    reqbody->valid & OBD_MD_LINKNAME) {
517                 buffer->lb_buf = ma->ma_lmm;
518                 buffer->lb_len = reqbody->eadatasize;
519                 rc = mo_readlink(env, next, buffer);
520                 if (unlikely(rc <= 0)) {
521                         CERROR("readlink failed: %d\n", rc);
522                         rc = -EFAULT;
523                 } else {
524                         repbody->valid |= OBD_MD_LINKNAME;
525                         repbody->eadatasize = rc;
526                         /* NULL terminate */
527                         ((char*)ma->ma_lmm)[rc - 1] = 0;
528                         CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
529                                (char*)ma->ma_lmm, rc);
530                         rc = 0;
531                 }
532         }
533
534         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
535                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
536                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
537                 repbody->valid |= OBD_MD_FLMODEASIZE;
538                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
539                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
540                        repbody->max_cookiesize);
541         }
542
543         if (exp_connect_rmtclient(info->mti_exp) &&
544             reqbody->valid & OBD_MD_FLRMTPERM) {
545                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
546
547                 /* mdt_getattr_lock only */
548                 rc = mdt_pack_remote_perm(info, o, buf);
549                 if (rc) {
550                         repbody->valid &= ~OBD_MD_FLRMTPERM;
551                         repbody->aclsize = 0;
552                         RETURN(rc);
553                 } else {
554                         repbody->valid |= OBD_MD_FLRMTPERM;
555                         repbody->aclsize = sizeof(struct mdt_remote_perm);
556                 }
557         }
558 #ifdef CONFIG_FS_POSIX_ACL
559         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
560                  (reqbody->valid & OBD_MD_FLACL)) {
561                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
562                 buffer->lb_len = req_capsule_get_size(pill,
563                                                       &RMF_ACL, RCL_SERVER);
564                 if (buffer->lb_len > 0) {
565                         rc = mo_xattr_get(env, next, buffer,
566                                           XATTR_NAME_ACL_ACCESS);
567                         if (rc < 0) {
568                                 if (rc == -ENODATA) {
569                                         repbody->aclsize = 0;
570                                         repbody->valid |= OBD_MD_FLACL;
571                                         rc = 0;
572                                 } else if (rc == -EOPNOTSUPP) {
573                                         rc = 0;
574                                 } else {
575                                         CERROR("got acl size: %d\n", rc);
576                                 }
577                         } else {
578                                 repbody->aclsize = rc;
579                                 repbody->valid |= OBD_MD_FLACL;
580                                 rc = 0;
581                         }
582                 }
583         }
584 #endif
585
586         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
587             info->mti_mdt->mdt_opts.mo_mds_capa &&
588             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
589                 struct lustre_capa *capa;
590
591                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
592                 LASSERT(capa);
593                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
594                 rc = mo_capa_get(env, next, capa, 0);
595                 if (rc)
596                         RETURN(rc);
597                 repbody->valid |= OBD_MD_FLMDSCAPA;
598         }
599         RETURN(rc);
600 }
601
602 static int mdt_renew_capa(struct mdt_thread_info *info)
603 {
604         struct mdt_object  *obj = info->mti_object;
605         struct mdt_body    *body;
606         struct lustre_capa *capa, *c;
607         int rc;
608         ENTRY;
609
610         /* if object doesn't exist, or server has disabled capability,
611          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
612          * flag not set.
613          */
614         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
615             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
616                 RETURN(0);
617
618         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
619         LASSERT(body != NULL);
620
621         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
622         LASSERT(c);
623
624         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
625         LASSERT(capa);
626
627         *capa = *c;
628         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
629         if (rc == 0)
630                 body->valid |= OBD_MD_FLOSSCAPA;
631         RETURN(rc);
632 }
633
634 static int mdt_getattr(struct mdt_thread_info *info)
635 {
636         struct mdt_object       *obj = info->mti_object;
637         struct req_capsule      *pill = info->mti_pill;
638         struct mdt_body         *reqbody;
639         struct mdt_body         *repbody;
640         mode_t                   mode;
641         int                      md_size;
642         int rc;
643         ENTRY;
644
645         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
646         LASSERT(reqbody);
647
648         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
649                 rc = req_capsule_server_pack(pill);
650                 if (unlikely(rc))
651                         RETURN(err_serious(rc));
652                 rc = mdt_renew_capa(info);
653                 GOTO(out_shrink, rc);
654         }
655
656         LASSERT(obj != NULL);
657         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
658
659         mode = lu_object_attr(&obj->mot_obj.mo_lu);
660         if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
661             (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
662                 md_size = reqbody->eadatasize;
663         else
664                 md_size = info->mti_mdt->mdt_max_mdsize;
665
666         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
667
668         rc = req_capsule_server_pack(pill);
669         if (unlikely(rc != 0))
670                 RETURN(err_serious(rc));
671
672         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
673         LASSERT(repbody != NULL);
674         repbody->eadatasize = 0;
675         repbody->aclsize = 0;
676
677         if (reqbody->valid & OBD_MD_FLRMTPERM)
678                 rc = mdt_init_ucred(info, reqbody);
679         else
680                 rc = mdt_check_ucred(info);
681         if (unlikely(rc))
682                 GOTO(out_shrink, rc);
683
684         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
685         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
686
687         /*
688          * Don't check capability at all, because rename might getattr for
689          * remote obj, and at that time no capability is available.
690          */
691         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
692         rc = mdt_getattr_internal(info, obj);
693         if (reqbody->valid & OBD_MD_FLRMTPERM)
694                 mdt_exit_ucred(info);
695         EXIT;
696 out_shrink:
697         mdt_shrink_reply(info);
698         return rc;
699 }
700
701 static int mdt_is_subdir(struct mdt_thread_info *info)
702 {
703         struct mdt_object     *o = info->mti_object;
704         struct req_capsule    *pill = info->mti_pill;
705         const struct mdt_body *body = info->mti_body;
706         struct mdt_body       *repbody;
707         int                    rc;
708         ENTRY;
709
710         LASSERT(o != NULL);
711
712         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
713
714         /*
715          * We save last checked parent fid to @repbody->fid1 for remote
716          * directory case.
717          */
718         LASSERT(fid_is_sane(&body->fid2));
719         LASSERT(mdt_object_exists(o) > 0);
720         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
721                            &body->fid2, &repbody->fid1);
722         if (rc == 0 || rc == -EREMOTE)
723                 repbody->valid |= OBD_MD_FLID;
724
725         RETURN(rc);
726 }
727
728 static int mdt_raw_lookup(struct mdt_thread_info *info,
729                           struct mdt_object *parent,
730                           const struct lu_name *lname,
731                           struct ldlm_reply *ldlm_rep)
732 {
733         struct md_object *next = mdt_object_child(info->mti_object);
734         const struct mdt_body *reqbody = info->mti_body;
735         struct lu_fid *child_fid = &info->mti_tmp_fid1;
736         struct mdt_body *repbody;
737         int rc;
738         ENTRY;
739
740         if (reqbody->valid != OBD_MD_FLID)
741                 RETURN(0);
742
743         LASSERT(!info->mti_cross_ref);
744
745         /* Only got the fid of this obj by name */
746         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
747                         &info->mti_spec);
748 #if 0
749         /* XXX is raw_lookup possible as intent operation? */
750         if (rc != 0) {
751                 if (rc == -ENOENT)
752                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
753                 RETURN(rc);
754         } else
755                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
756
757         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
758 #endif
759         if (rc == 0) {
760                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
761                 repbody->fid1 = *child_fid;
762                 repbody->valid = OBD_MD_FLID;
763         }
764         RETURN(1);
765 }
766
767 /*
768  * UPDATE lock should be taken against parent, and be release before exit;
769  * child_bits lock should be taken against child, and be returned back:
770  *            (1)normal request should release the child lock;
771  *            (2)intent request will grant the lock to client.
772  */
773 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
774                                  struct mdt_lock_handle *lhc,
775                                  __u64 child_bits,
776                                  struct ldlm_reply *ldlm_rep)
777 {
778         struct ptlrpc_request  *req       = mdt_info_req(info);
779         struct mdt_body        *reqbody   = NULL;
780         struct mdt_object      *parent    = info->mti_object;
781         struct mdt_object      *child;
782         struct md_object       *next      = mdt_object_child(parent);
783         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
784         struct lu_name         *lname     = NULL;
785         const char             *name      = NULL;
786         int                     namelen   = 0;
787         struct mdt_lock_handle *lhp;
788         struct ldlm_lock       *lock;
789         struct ldlm_res_id     *res_id;
790         int                     is_resent;
791         int                     rc;
792
793         ENTRY;
794
795         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
796         LASSERT(ergo(is_resent,
797                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
798
799         LASSERT(parent != NULL);
800         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
801         if (name == NULL)
802                 RETURN(err_serious(-EFAULT));
803
804         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
805                                        RCL_CLIENT) - 1;
806         if (!info->mti_cross_ref) {
807                 /*
808                  * XXX: Check for "namelen == 0" is for getattr by fid
809                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
810                  * that is the name must contain at least one character and
811                  * the terminating '\0'
812                  */
813                 if (namelen == 0) {
814                         reqbody = req_capsule_client_get(info->mti_pill,
815                                                          &RMF_MDT_BODY);
816                         LASSERT(fid_is_sane(&reqbody->fid2));
817                         name = NULL;
818
819                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
820                                "ldlm_rep = %p\n",
821                                PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
822                                ldlm_rep);
823                 } else {
824                         lname = mdt_name(info->mti_env, (char *)name, namelen);
825                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
826                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
827                                name, ldlm_rep);
828                 }
829         }
830         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
831
832         rc = mdt_object_exists(parent);
833         if (unlikely(rc == 0)) {
834                 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
835                                 &parent->mot_obj.mo_lu,
836                                 "Parent doesn't exist!\n");
837                 RETURN(-ESTALE);
838         } else if (!info->mti_cross_ref) {
839                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
840                          PFID(mdt_object_fid(parent)));
841         }
842         if (lname) {
843                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
844                 if (rc != 0) {
845                         if (rc > 0)
846                                 rc = 0;
847                         RETURN(rc);
848                 }
849         }
850
851         if (info->mti_cross_ref) {
852                 /* Only getattr on the child. Parent is on another node. */
853                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
854                 child = parent;
855                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
856                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
857
858                 if (is_resent) {
859                         /* Do not take lock for resent case. */
860                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
861                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
862                                  lhc->mlh_reg_lh.cookie);
863                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
864                                                 &lock->l_resource->lr_name));
865                         LDLM_LOCK_PUT(lock);
866                         rc = 0;
867                 } else {
868                         mdt_lock_handle_init(lhc);
869                         mdt_lock_reg_init(lhc, LCK_PR);
870
871                         /*
872                          * Object's name is on another MDS, no lookup lock is
873                          * needed here but update is.
874                          */
875                         child_bits &= ~MDS_INODELOCK_LOOKUP;
876                         child_bits |= MDS_INODELOCK_UPDATE;
877
878                         rc = mdt_object_lock(info, child, lhc, child_bits,
879                                              MDT_LOCAL_LOCK);
880                 }
881                 if (rc == 0) {
882                         /* Finally, we can get attr for child. */
883                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
884                                          BYPASS_CAPA);
885                         rc = mdt_getattr_internal(info, child);
886                         if (unlikely(rc != 0))
887                                 mdt_object_unlock(info, child, lhc, 1);
888                 }
889                 RETURN(rc);
890         }
891
892         /* step 1: lock parent */
893         lhp = &info->mti_lh[MDT_LH_PARENT];
894         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
895         rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
896                              MDT_LOCAL_LOCK);
897
898         if (unlikely(rc != 0))
899                 RETURN(rc);
900
901         if (lname) {
902                 /* step 2: lookup child's fid by name */
903                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
904                                 &info->mti_spec);
905
906                 if (rc != 0) {
907                         if (rc == -ENOENT)
908                                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
909                         GOTO(out_parent, rc);
910                 } else
911                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
912         } else {
913                 *child_fid = reqbody->fid2;
914                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
915         }
916
917         /*
918          *step 3: find the child object by fid & lock it.
919          *        regardless if it is local or remote.
920          */
921         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
922
923         if (unlikely(IS_ERR(child)))
924                 GOTO(out_parent, rc = PTR_ERR(child));
925         if (is_resent) {
926                 /* Do not take lock for resent case. */
927                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
928                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
929                          lhc->mlh_reg_lh.cookie);
930
931                 res_id = &lock->l_resource->lr_name;
932                 if (!fid_res_name_eq(mdt_object_fid(child),
933                                     &lock->l_resource->lr_name)) {
934                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
935                                                  &lock->l_resource->lr_name),
936                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
937                                  (unsigned long)res_id->name[0],
938                                  (unsigned long)res_id->name[1],
939                                  (unsigned long)res_id->name[2],
940                                  PFID(mdt_object_fid(parent)));
941                           CWARN("Although resent, but still not get child lock"
942                                 "parent:"DFID" child:"DFID"\n",
943                                 PFID(mdt_object_fid(parent)),
944                                 PFID(mdt_object_fid(child)));
945                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
946                           LDLM_LOCK_PUT(lock);
947                           GOTO(relock, 0);
948                 }
949                 LDLM_LOCK_PUT(lock);
950                 rc = 0;
951         } else {
952                 struct md_attr *ma;
953 relock:
954                 ma = &info->mti_attr;
955
956                 mdt_lock_handle_init(lhc);
957                 mdt_lock_reg_init(lhc, LCK_PR);
958
959                 if (mdt_object_exists(child) == 0) {
960                         LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
961                                         &child->mot_obj.mo_lu,
962                                         "Object doesn't exist!\n");
963                         GOTO(out_child, rc = -ESTALE);
964                 }
965
966                 ma->ma_valid = 0;
967                 ma->ma_need = MA_INODE;
968                 rc = mo_attr_get(info->mti_env, next, ma);
969                 if (unlikely(rc != 0))
970                         GOTO(out_child, rc);
971
972                 /* If the file has not been changed for some time, we return
973                  * not only a LOOKUP lock, but also an UPDATE lock and this
974                  * might save us RPC on later STAT. For directories, it also
975                  * let negative dentry starts working for this dir. */
976                 if (ma->ma_valid & MA_INODE &&
977                     ma->ma_attr.la_valid & LA_CTIME &&
978                     info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
979                     ma->ma_attr.la_ctime < cfs_time_current_sec())
980                         child_bits |= MDS_INODELOCK_UPDATE;
981
982                 rc = mdt_object_lock(info, child, lhc, child_bits,
983                                      MDT_CROSS_LOCK);
984
985                 if (unlikely(rc != 0))
986                         GOTO(out_child, rc);
987         }
988
989         /* finally, we can get attr for child. */
990         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
991         rc = mdt_getattr_internal(info, child);
992         if (unlikely(rc != 0)) {
993                 mdt_object_unlock(info, child, lhc, 1);
994         } else {
995                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
996                 if (lock) {
997                         struct mdt_body *repbody;
998
999                         /* Debugging code. */
1000                         res_id = &lock->l_resource->lr_name;
1001                         LDLM_DEBUG(lock, "Returning lock to client\n");
1002                         LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1003                                                  &lock->l_resource->lr_name),
1004                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1005                                  (unsigned long)res_id->name[0],
1006                                  (unsigned long)res_id->name[1],
1007                                  (unsigned long)res_id->name[2],
1008                                  PFID(mdt_object_fid(child)));
1009                         /*
1010                          * Pack Size-on-MDS inode attributes to the body if
1011                          * update lock is given.
1012                          */
1013                         repbody = req_capsule_server_get(info->mti_pill,
1014                                                          &RMF_MDT_BODY);
1015                         if (lock->l_policy_data.l_inodebits.bits &
1016                             MDS_INODELOCK_UPDATE)
1017                                 mdt_pack_size2body(info, child);
1018                         LDLM_LOCK_PUT(lock);
1019                 }
1020         }
1021         EXIT;
1022 out_child:
1023         mdt_object_put(info->mti_env, child);
1024 out_parent:
1025         mdt_object_unlock(info, parent, lhp, 1);
1026         return rc;
1027 }
1028
1029 /* normal handler: should release the child lock */
1030 static int mdt_getattr_name(struct mdt_thread_info *info)
1031 {
1032         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1033         struct mdt_body        *reqbody;
1034         struct mdt_body        *repbody;
1035         int rc;
1036         ENTRY;
1037
1038         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1039         LASSERT(reqbody != NULL);
1040         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1041         LASSERT(repbody != NULL);
1042
1043         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1044         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1045         repbody->eadatasize = 0;
1046         repbody->aclsize = 0;
1047
1048         rc = mdt_init_ucred(info, reqbody);
1049         if (unlikely(rc))
1050                 GOTO(out_shrink, rc);
1051
1052         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1053         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1054                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1055                 lhc->mlh_reg_lh.cookie = 0;
1056         }
1057         mdt_exit_ucred(info);
1058         EXIT;
1059 out_shrink:
1060         mdt_shrink_reply(info);
1061         return rc;
1062 }
1063
1064 static const struct lu_device_operations mdt_lu_ops;
1065
1066 static int lu_device_is_mdt(struct lu_device *d)
1067 {
1068         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1069 }
1070
1071 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1072                          void *karg, void *uarg);
1073
1074 static int mdt_set_info(struct mdt_thread_info *info)
1075 {
1076         struct ptlrpc_request *req = mdt_info_req(info);
1077         char *key;
1078         void *val;
1079         int keylen, vallen, rc = 0;
1080         ENTRY;
1081
1082         rc = req_capsule_server_pack(info->mti_pill);
1083         if (rc)
1084                 RETURN(rc);
1085
1086         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1087         if (key == NULL) {
1088                 DEBUG_REQ(D_HA, req, "no set_info key");
1089                 RETURN(-EFAULT);
1090         }
1091
1092         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1093                                       RCL_CLIENT);
1094
1095         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1096         if (val == NULL) {
1097                 DEBUG_REQ(D_HA, req, "no set_info val");
1098                 RETURN(-EFAULT);
1099         }
1100
1101         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1102                                       RCL_CLIENT);
1103
1104         if (KEY_IS(KEY_READ_ONLY)) {
1105                 req->rq_status = 0;
1106                 lustre_msg_set_status(req->rq_repmsg, 0);
1107
1108                 spin_lock(&req->rq_export->exp_lock);
1109                 if (*(__u32 *)val)
1110                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1111                 else
1112                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1113                 spin_unlock(&req->rq_export->exp_lock);
1114
1115         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1116                 if (lustre_msg_swabbed(req->rq_reqmsg)) {
1117                         struct changelog_setinfo *cs =
1118                                 (struct changelog_setinfo *)val;
1119                         __swab64s(&cs->cs_recno);
1120                         __swab32s(&cs->cs_id);
1121                 }
1122
1123                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1124                                    vallen, val, NULL);
1125                 lustre_msg_set_status(req->rq_repmsg, rc);
1126
1127         } else {
1128                 RETURN(-EINVAL);
1129         }
1130         RETURN(0);
1131 }
1132
1133 static int mdt_connect(struct mdt_thread_info *info)
1134 {
1135         int rc;
1136         struct ptlrpc_request *req;
1137
1138         req = mdt_info_req(info);
1139         rc = target_handle_connect(req);
1140         if (rc == 0) {
1141                 LASSERT(req->rq_export != NULL);
1142                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1143                 rc = mdt_init_sec_level(info);
1144                 if (rc == 0)
1145                         rc = mdt_init_idmap(info);
1146                 if (rc != 0)
1147                         obd_disconnect(class_export_get(req->rq_export));
1148         } else {
1149                 rc = err_serious(rc);
1150         }
1151         return rc;
1152 }
1153
1154 static int mdt_disconnect(struct mdt_thread_info *info)
1155 {
1156         int rc;
1157         ENTRY;
1158
1159         rc = target_handle_disconnect(mdt_info_req(info));
1160         if (rc)
1161                 rc = err_serious(rc);
1162         RETURN(rc);
1163 }
1164
1165 static int mdt_sendpage(struct mdt_thread_info *info,
1166                         struct lu_rdpg *rdpg)
1167 {
1168         struct ptlrpc_request   *req = mdt_info_req(info);
1169         struct ptlrpc_bulk_desc *desc;
1170         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1171         int                      tmpcount;
1172         int                      tmpsize;
1173         int                      timeout;
1174         int                      i;
1175         int                      rc;
1176         ENTRY;
1177
1178         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1179                                     MDS_BULK_PORTAL);
1180         if (desc == NULL)
1181                 RETURN(-ENOMEM);
1182
1183         for (i = 0, tmpcount = rdpg->rp_count;
1184                 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1185                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1186                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1187         }
1188
1189         LASSERT(desc->bd_nob == rdpg->rp_count);
1190         rc = sptlrpc_svc_wrap_bulk(req, desc);
1191         if (rc)
1192                 GOTO(free_desc, rc);
1193
1194         rc = ptlrpc_start_bulk_transfer(desc);
1195         if (rc)
1196                 GOTO(free_desc, rc);
1197
1198         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1199                 GOTO(abort_bulk, rc = 0);
1200
1201         timeout = (int) req->rq_deadline - cfs_time_current_sec();
1202         if (timeout < 0)
1203                 CERROR("Req deadline already passed %lu (now: %lu)\n",
1204                        req->rq_deadline, cfs_time_current_sec());
1205         *lwi = LWI_TIMEOUT(cfs_time_seconds(max(timeout, 1)), NULL, NULL);
1206         rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc), lwi);
1207         LASSERT (rc == 0 || rc == -ETIMEDOUT);
1208
1209         if (rc == 0) {
1210                 if (desc->bd_success &&
1211                     desc->bd_nob_transferred == rdpg->rp_count)
1212                         GOTO(free_desc, rc);
1213
1214                 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
1215         }
1216
1217         DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1218                   (rc == -ETIMEDOUT) ? "timeout" : "network error",
1219                   desc->bd_nob_transferred, rdpg->rp_count,
1220                   req->rq_export->exp_client_uuid.uuid,
1221                   req->rq_export->exp_connection->c_remote_uuid.uuid);
1222
1223         class_fail_export(req->rq_export);
1224
1225         EXIT;
1226 abort_bulk:
1227         ptlrpc_abort_bulk(desc);
1228 free_desc:
1229         ptlrpc_free_bulk(desc);
1230         return rc;
1231 }
1232
1233 #ifdef HAVE_SPLIT_SUPPORT
1234 /*
1235  * Retrieve dir entry from the page and insert it to the slave object, actually,
1236  * this should be in osd layer, but since it will not in the final product, so
1237  * just do it here and do not define more moo api anymore for this.
1238  */
1239 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1240                               int size)
1241 {
1242         struct mdt_object *object = info->mti_object;
1243         struct lu_fid *lf = &info->mti_tmp_fid2;
1244         struct md_attr *ma = &info->mti_attr;
1245         struct lu_dirpage *dp;
1246         struct lu_dirent *ent;
1247         int rc = 0, offset = 0;
1248         ENTRY;
1249
1250         /* Make sure we have at least one entry. */
1251         if (size == 0)
1252                 RETURN(-EINVAL);
1253
1254         /*
1255          * Disable trans for this name insert, since it will include many trans
1256          * for this.
1257          */
1258         info->mti_no_need_trans = 1;
1259         /*
1260          * When write_dir_page, no need update parent's ctime,
1261          * and no permission check for name_insert.
1262          */
1263         ma->ma_attr.la_ctime = 0;
1264         ma->ma_attr.la_valid = LA_MODE;
1265         ma->ma_valid = MA_INODE;
1266
1267         cfs_kmap(page);
1268         dp = page_address(page);
1269         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1270
1271         for (ent = lu_dirent_start(dp); ent != NULL;
1272              ent = lu_dirent_next(ent)) {
1273                 struct lu_name *lname;
1274                 char *name;
1275
1276                 if (le16_to_cpu(ent->lde_namelen) == 0)
1277                         continue;
1278
1279                 fid_le_to_cpu(lf, &ent->lde_fid);
1280                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1281                         ma->ma_attr.la_mode = S_IFDIR;
1282                 else
1283                         ma->ma_attr.la_mode = 0;
1284                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1285                 if (name == NULL)
1286                         GOTO(out, rc = -ENOMEM);
1287
1288                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1289                 lname = mdt_name(info->mti_env, name,
1290                                  le16_to_cpu(ent->lde_namelen));
1291                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1292                 rc = mdo_name_insert(info->mti_env,
1293                                      md_object_next(&object->mot_obj),
1294                                      lname, lf, ma);
1295                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1296                 if (rc) {
1297                         CERROR("Can't insert %*.*s, rc %d\n",
1298                                le16_to_cpu(ent->lde_namelen),
1299                                le16_to_cpu(ent->lde_namelen),
1300                                ent->lde_name, rc);
1301                         GOTO(out, rc);
1302                 }
1303
1304                 offset += lu_dirent_size(ent);
1305                 if (offset >= size)
1306                         break;
1307         }
1308         EXIT;
1309 out:
1310         cfs_kunmap(page);
1311         return rc;
1312 }
1313
1314 static int mdt_bulk_timeout(void *data)
1315 {
1316         ENTRY;
1317
1318         CERROR("mdt bulk transfer timeout \n");
1319
1320         RETURN(1);
1321 }
1322
1323 static int mdt_writepage(struct mdt_thread_info *info)
1324 {
1325         struct ptlrpc_request   *req = mdt_info_req(info);
1326         struct mdt_body         *reqbody;
1327         struct l_wait_info      *lwi;
1328         struct ptlrpc_bulk_desc *desc;
1329         struct page             *page;
1330         int                rc;
1331         ENTRY;
1332
1333
1334         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1335         if (reqbody == NULL)
1336                 RETURN(err_serious(-EFAULT));
1337
1338         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1339         if (desc == NULL)
1340                 RETURN(err_serious(-ENOMEM));
1341
1342         /* allocate the page for the desc */
1343         page = cfs_alloc_page(CFS_ALLOC_STD);
1344         if (page == NULL)
1345                 GOTO(desc_cleanup, rc = -ENOMEM);
1346
1347         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1348                (int)reqbody->size, (int)reqbody->nlink);
1349
1350         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1351                               (int)reqbody->nlink);
1352
1353         rc = sptlrpc_svc_prep_bulk(req, desc);
1354         if (rc != 0)
1355                 GOTO(cleanup_page, rc);
1356         /*
1357          * Check if client was evicted while we were doing i/o before touching
1358          * network.
1359          */
1360         OBD_ALLOC_PTR(lwi);
1361         if (!lwi)
1362                 GOTO(cleanup_page, rc = -ENOMEM);
1363
1364         if (desc->bd_export->exp_failed)
1365                 rc = -ENOTCONN;
1366         else
1367                 rc = ptlrpc_start_bulk_transfer (desc);
1368         if (rc == 0) {
1369                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ,
1370                                             mdt_bulk_timeout, desc);
1371                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1372                                   desc->bd_export->exp_failed, lwi);
1373                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1374                 if (rc == -ETIMEDOUT) {
1375                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1376                         ptlrpc_abort_bulk(desc);
1377                 } else if (desc->bd_export->exp_failed) {
1378                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1379                         rc = -ENOTCONN;
1380                         ptlrpc_abort_bulk(desc);
1381                 } else if (!desc->bd_success ||
1382                            desc->bd_nob_transferred != desc->bd_nob) {
1383                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1384                                   desc->bd_success ?
1385                                   "truncated" : "network error on",
1386                                   desc->bd_nob_transferred, desc->bd_nob);
1387                         /* XXX should this be a different errno? */
1388                         rc = -ETIMEDOUT;
1389                 }
1390         } else {
1391                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1392         }
1393         if (rc)
1394                 GOTO(cleanup_lwi, rc);
1395         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1396
1397 cleanup_lwi:
1398         OBD_FREE_PTR(lwi);
1399 cleanup_page:
1400         cfs_free_page(page);
1401 desc_cleanup:
1402         ptlrpc_free_bulk(desc);
1403         RETURN(rc);
1404 }
1405 #endif
1406
1407 static int mdt_readpage(struct mdt_thread_info *info)
1408 {
1409         struct mdt_object *object = info->mti_object;
1410         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1411         struct mdt_body   *reqbody;
1412         struct mdt_body   *repbody;
1413         int                rc;
1414         int                i;
1415         ENTRY;
1416
1417         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1418                 RETURN(err_serious(-ENOMEM));
1419
1420         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1421         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1422         if (reqbody == NULL || repbody == NULL)
1423                 RETURN(err_serious(-EFAULT));
1424
1425         /*
1426          * prepare @rdpg before calling lower layers and transfer itself. Here
1427          * reqbody->size contains offset of where to start to read and
1428          * reqbody->nlink contains number bytes to read.
1429          */
1430         rdpg->rp_hash = reqbody->size;
1431         if (rdpg->rp_hash != reqbody->size) {
1432                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1433                        rdpg->rp_hash, reqbody->size);
1434                 RETURN(-EFAULT);
1435         }
1436
1437         rdpg->rp_attrs = reqbody->mode;
1438         rdpg->rp_count  = reqbody->nlink;
1439         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1440         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1441         if (rdpg->rp_pages == NULL)
1442                 RETURN(-ENOMEM);
1443
1444         for (i = 0; i < rdpg->rp_npages; ++i) {
1445                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1446                 if (rdpg->rp_pages[i] == NULL)
1447                         GOTO(free_rdpg, rc = -ENOMEM);
1448         }
1449
1450         /* call lower layers to fill allocated pages with directory data */
1451         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1452         if (rc)
1453                 GOTO(free_rdpg, rc);
1454
1455         /* send pages to client */
1456         rc = mdt_sendpage(info, rdpg);
1457
1458         EXIT;
1459 free_rdpg:
1460
1461         for (i = 0; i < rdpg->rp_npages; i++)
1462                 if (rdpg->rp_pages[i] != NULL)
1463                         cfs_free_page(rdpg->rp_pages[i]);
1464         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1465
1466         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1467                 RETURN(0);
1468
1469         return rc;
1470 }
1471
1472 static int mdt_reint_internal(struct mdt_thread_info *info,
1473                               struct mdt_lock_handle *lhc,
1474                               __u32 op)
1475 {
1476         struct req_capsule      *pill = info->mti_pill;
1477         struct mdt_device       *mdt = info->mti_mdt;
1478         struct mdt_body         *repbody;
1479         int                      rc = 0;
1480         ENTRY;
1481
1482         /* pack reply */
1483         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1484                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1485                                      mdt->mdt_max_mdsize);
1486         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1487                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1488                                      mdt->mdt_max_cookiesize);
1489
1490         rc = req_capsule_server_pack(pill);
1491         if (rc != 0) {
1492                 CERROR("Can't pack response, rc %d\n", rc);
1493                 RETURN(err_serious(rc));
1494         }
1495
1496         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1497                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1498                 LASSERT(repbody);
1499                 repbody->eadatasize = 0;
1500                 repbody->aclsize = 0;
1501         }
1502
1503         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1504                 GOTO(out_shrink, rc = err_serious(-EFAULT));
1505
1506         rc = mdt_reint_unpack(info, op);
1507         if (rc != 0) {
1508                 CERROR("Can't unpack reint, rc %d\n", rc);
1509                 GOTO(out_shrink, rc = err_serious(rc));
1510         }
1511
1512         /* for replay no cookkie / lmm need, because client have this already */
1513         if (info->mti_spec.no_create == 1)  {
1514                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1515                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1516
1517                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1518                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1519                                              0);
1520         }
1521
1522         rc = mdt_init_ucred_reint(info);
1523         if (rc)
1524                 GOTO(out_shrink, rc);
1525
1526         rc = mdt_fix_attr_ucred(info, op);
1527         if (rc != 0)
1528                 GOTO(out_ucred, rc = err_serious(rc));
1529
1530         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1531                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1532                 GOTO(out_ucred, rc);
1533         }
1534         rc = mdt_reint_rec(info, lhc);
1535         EXIT;
1536 out_ucred:
1537         mdt_exit_ucred(info);
1538 out_shrink:
1539         mdt_shrink_reply(info);
1540         return rc;
1541 }
1542
1543 static long mdt_reint_opcode(struct mdt_thread_info *info,
1544                              const struct req_format **fmt)
1545 {
1546         struct mdt_rec_reint *rec;
1547         long opc;
1548
1549         opc = err_serious(-EFAULT);
1550         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1551         if (rec != NULL) {
1552                 opc = rec->rr_opcode;
1553                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1554                 if (opc < REINT_MAX && fmt[opc] != NULL)
1555                         req_capsule_extend(info->mti_pill, fmt[opc]);
1556                 else {
1557                         CERROR("Unsupported opc: %ld\n", opc);
1558                         opc = err_serious(opc);
1559                 }
1560         }
1561         return opc;
1562 }
1563
1564 static int mdt_reint(struct mdt_thread_info *info)
1565 {
1566         long opc;
1567         int  rc;
1568
1569         static const struct req_format *reint_fmts[REINT_MAX] = {
1570                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1571                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1572                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1573                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1574                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1575                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1576                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1577         };
1578
1579         ENTRY;
1580
1581         opc = mdt_reint_opcode(info, reint_fmts);
1582         if (opc >= 0) {
1583                 /*
1584                  * No lock possible here from client to pass it to reint code
1585                  * path.
1586                  */
1587                 rc = mdt_reint_internal(info, NULL, opc);
1588         } else {
1589                 rc = opc;
1590         }
1591
1592         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1593         RETURN(rc);
1594 }
1595
1596 /* this should sync the whole device */
1597 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1598 {
1599         struct dt_device *dt = mdt->mdt_bottom;
1600         int rc;
1601         ENTRY;
1602
1603         rc = dt->dd_ops->dt_sync(env, dt);
1604         RETURN(rc);
1605 }
1606
1607 /* this should sync this object */
1608 static int mdt_object_sync(struct mdt_thread_info *info)
1609 {
1610         struct md_object *next;
1611         int rc;
1612         ENTRY;
1613
1614         if (!mdt_object_exists(info->mti_object)) {
1615                 CWARN("Non existing object  "DFID"!\n",
1616                       PFID(mdt_object_fid(info->mti_object)));
1617                 RETURN(-ESTALE);
1618         }
1619         next = mdt_object_child(info->mti_object);
1620         rc = mo_object_sync(info->mti_env, next);
1621
1622         RETURN(rc);
1623 }
1624
1625 static int mdt_sync(struct mdt_thread_info *info)
1626 {
1627         struct req_capsule *pill = info->mti_pill;
1628         struct mdt_body *body;
1629         int rc;
1630         ENTRY;
1631
1632         /* The fid may be zero, so we req_capsule_set manually */
1633         req_capsule_set(pill, &RQF_MDS_SYNC);
1634
1635         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1636         if (body == NULL)
1637                 RETURN(err_serious(-EINVAL));
1638
1639         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1640                 RETURN(err_serious(-ENOMEM));
1641
1642         if (fid_seq(&body->fid1) == 0) {
1643                 /* sync the whole device */
1644                 rc = req_capsule_server_pack(pill);
1645                 if (rc == 0)
1646                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1647                 else
1648                         rc = err_serious(rc);
1649         } else {
1650                 /* sync an object */
1651                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1652                 if (rc == 0) {
1653                         rc = mdt_object_sync(info);
1654                         if (rc == 0) {
1655                                 struct md_object *next;
1656                                 const struct lu_fid *fid;
1657                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1658
1659                                 next = mdt_object_child(info->mti_object);
1660                                 info->mti_attr.ma_need = MA_INODE;
1661                                 info->mti_attr.ma_valid = 0;
1662                                 rc = mo_attr_get(info->mti_env, next,
1663                                                  &info->mti_attr);
1664                                 if (rc == 0) {
1665                                         body = req_capsule_server_get(pill,
1666                                                                 &RMF_MDT_BODY);
1667                                         fid = mdt_object_fid(info->mti_object);
1668                                         mdt_pack_attr2body(info, body, la, fid);
1669                                 }
1670                         }
1671                 } else
1672                         rc = err_serious(rc);
1673         }
1674         RETURN(rc);
1675 }
1676
1677 #ifdef HAVE_QUOTA_SUPPORT
1678 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1679 {
1680         struct obd_quotactl *oqctl;
1681         struct req_capsule *pill = info->mti_pill;
1682         struct obd_export *exp = info->mti_exp;
1683         struct md_device *next = info->mti_mdt->mdt_child;
1684         int rc;
1685         ENTRY;
1686
1687         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_QUOTACHECK_NET))
1688                 RETURN(0);
1689
1690         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1691         if (oqctl == NULL)
1692                 RETURN(-EPROTO);
1693
1694         /* remote client has no permission for quotacheck */
1695         if (unlikely(exp_connect_rmtclient(exp)))
1696                 RETURN(-EPERM);
1697
1698         rc = req_capsule_server_pack(pill);
1699         if (rc)
1700                 RETURN(rc);
1701
1702         rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next, exp,
1703                                                oqctl->qc_type);
1704         RETURN(rc);
1705 }
1706
1707 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1708 {
1709         struct obd_quotactl *oqctl, *repoqc;
1710         struct req_capsule *pill = info->mti_pill;
1711         struct obd_export *exp = info->mti_exp;
1712         struct md_device *next = info->mti_mdt->mdt_child;
1713         const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
1714         int id, rc;
1715         ENTRY;
1716
1717         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_QUOTACTL_NET))
1718                 RETURN(0);
1719
1720         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1721         if (oqctl == NULL)
1722                 RETURN(-EPROTO);
1723
1724         id = oqctl->qc_id;
1725         if (exp_connect_rmtclient(exp)) {
1726                 struct ptlrpc_request *req = mdt_info_req(info);
1727                 struct mdt_export_data *med = mdt_req2med(req);
1728                 struct lustre_idmap_table *idmap = med->med_idmap;
1729
1730                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1731                              oqctl->qc_cmd != Q_GETINFO))
1732                         RETURN(-EPERM);
1733
1734
1735                 if (oqctl->qc_type == USRQUOTA)
1736                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1737                                                      oqctl->qc_id);
1738                 else if (oqctl->qc_type == GRPQUOTA)
1739                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1740                                                      oqctl->qc_id);
1741                 else
1742                         RETURN(-EINVAL);
1743
1744                 if (id == CFS_IDMAP_NOTFOUND) {
1745                         CDEBUG(D_QUOTA, "no mapping for id %u\n",
1746                                oqctl->qc_id);
1747                         RETURN(-EACCES);
1748                 }
1749         }
1750
1751         rc = req_capsule_server_pack(pill);
1752         if (rc)
1753                 RETURN(rc);
1754
1755         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
1756         LASSERT(repoqc != NULL);
1757
1758         switch (oqctl->qc_cmd) {
1759         case Q_QUOTAON:
1760                 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
1761                 break;
1762         case Q_QUOTAOFF:
1763                 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type);
1764                 break;
1765         case Q_SETINFO:
1766                 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
1767                                       &oqctl->qc_dqinfo);
1768                 break;
1769         case Q_GETINFO:
1770                 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
1771                                       &oqctl->qc_dqinfo);
1772                 break;
1773         case Q_SETQUOTA:
1774                 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
1775                                        &oqctl->qc_dqblk);
1776                 break;
1777         case Q_GETQUOTA:
1778                 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
1779                                        &oqctl->qc_dqblk);
1780                 break;
1781         case Q_GETOINFO:
1782                 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
1783                                        &oqctl->qc_dqinfo);
1784                 break;
1785         case Q_GETOQUOTA:
1786                 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
1787                                         &oqctl->qc_dqblk);
1788                 break;
1789         case LUSTRE_Q_INVALIDATE:
1790                 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
1791                 break;
1792         case LUSTRE_Q_FINVALIDATE:
1793                 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
1794                 break;
1795         default:
1796                 CERROR("unsupported mdt_quotactl command: %d\n",
1797                        oqctl->qc_cmd);
1798                 RETURN(-EFAULT);
1799         }
1800
1801         *repoqc = *oqctl;
1802         RETURN(rc);
1803 }
1804 #endif
1805
1806
1807 /*
1808  * OBD PING and other handlers.
1809  */
1810 static int mdt_obd_ping(struct mdt_thread_info *info)
1811 {
1812         int rc;
1813         ENTRY;
1814
1815         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1816
1817         rc = target_handle_ping(mdt_info_req(info));
1818         if (rc < 0)
1819                 rc = err_serious(rc);
1820         RETURN(rc);
1821 }
1822
1823 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1824 {
1825         return err_serious(-EOPNOTSUPP);
1826 }
1827
1828 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1829 {
1830         return err_serious(-EOPNOTSUPP);
1831 }
1832
1833
1834 /*
1835  * LLOG handlers.
1836  */
1837
1838 /** clone llog ctxt from child (mdd)
1839  * This allows remote llog (replicator) access.
1840  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
1841  * context was originally set up, or we can handle them directly.
1842  * I choose the latter, but that means I need any llog
1843  * contexts set up by child to be accessable by the mdt.  So we clone the
1844  * context into our context list here.
1845  */
1846 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
1847                                int idx)
1848 {
1849         struct md_device  *next = mdt->mdt_child;
1850         struct llog_ctxt *ctxt;
1851         int rc;
1852
1853         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
1854                 return 0;
1855
1856         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
1857         if (rc || ctxt == NULL) {
1858                 CERROR("Can't get mdd ctxt %d\n", rc);
1859                 return rc;
1860         }
1861
1862         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
1863         if (rc)
1864                 CERROR("Can't set mdt ctxt %d\n", rc);
1865
1866         return rc;
1867 }
1868
1869 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
1870                                  struct mdt_device *mdt, int idx)
1871 {
1872         struct llog_ctxt *ctxt;
1873
1874         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
1875         if (ctxt == NULL)
1876                 return 0;
1877         /* Put once for the get we just did, and once for the clone */
1878         llog_ctxt_put(ctxt);
1879         llog_ctxt_put(ctxt);
1880         return 0;
1881 }
1882
1883 static int mdt_llog_create(struct mdt_thread_info *info)
1884 {
1885         int rc;
1886
1887         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1888         rc = llog_origin_handle_create(mdt_info_req(info));
1889         return (rc < 0 ? err_serious(rc) : rc);
1890 }
1891
1892 static int mdt_llog_destroy(struct mdt_thread_info *info)
1893 {
1894         int rc;
1895
1896         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
1897         rc = llog_origin_handle_destroy(mdt_info_req(info));
1898         return (rc < 0 ? err_serious(rc) : rc);
1899 }
1900
1901 static int mdt_llog_read_header(struct mdt_thread_info *info)
1902 {
1903         int rc;
1904
1905         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1906         rc = llog_origin_handle_read_header(mdt_info_req(info));
1907         return (rc < 0 ? err_serious(rc) : rc);
1908 }
1909
1910 static int mdt_llog_next_block(struct mdt_thread_info *info)
1911 {
1912         int rc;
1913
1914         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1915         rc = llog_origin_handle_next_block(mdt_info_req(info));
1916         return (rc < 0 ? err_serious(rc) : rc);
1917 }
1918
1919 static int mdt_llog_prev_block(struct mdt_thread_info *info)
1920 {
1921         int rc;
1922
1923         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
1924         rc = llog_origin_handle_prev_block(mdt_info_req(info));
1925         return (rc < 0 ? err_serious(rc) : rc);
1926 }
1927
1928
1929 /*
1930  * DLM handlers.
1931  */
1932 static struct ldlm_callback_suite cbs = {
1933         .lcs_completion = ldlm_server_completion_ast,
1934         .lcs_blocking   = ldlm_server_blocking_ast,
1935         .lcs_glimpse    = NULL
1936 };
1937
1938 static int mdt_enqueue(struct mdt_thread_info *info)
1939 {
1940         struct ptlrpc_request *req;
1941         __u64 req_bits;
1942         int rc;
1943
1944         /*
1945          * info->mti_dlm_req already contains swapped and (if necessary)
1946          * converted dlm request.
1947          */
1948         LASSERT(info->mti_dlm_req != NULL);
1949
1950         req = mdt_info_req(info);
1951
1952         /*
1953          * Lock without inodebits makes no sense and will oops later in
1954          * ldlm. Let's check it now to see if we have wrong lock from client or
1955          * bits get corrupted somewhere in mdt_intent_policy().
1956          */
1957         req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1958         /* This is disabled because we need to support liblustre flock.
1959          * LASSERT(req_bits != 0);
1960          */
1961
1962         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
1963                                   req, info->mti_dlm_req, &cbs);
1964         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
1965         return rc ? err_serious(rc) : req->rq_status;
1966 }
1967
1968 static int mdt_convert(struct mdt_thread_info *info)
1969 {
1970         int rc;
1971         struct ptlrpc_request *req;
1972
1973         LASSERT(info->mti_dlm_req);
1974         req = mdt_info_req(info);
1975         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
1976         return rc ? err_serious(rc) : req->rq_status;
1977 }
1978
1979 static int mdt_bl_callback(struct mdt_thread_info *info)
1980 {
1981         CERROR("bl callbacks should not happen on MDS\n");
1982         LBUG();
1983         return err_serious(-EOPNOTSUPP);
1984 }
1985
1986 static int mdt_cp_callback(struct mdt_thread_info *info)
1987 {
1988         CERROR("cp callbacks should not happen on MDS\n");
1989         LBUG();
1990         return err_serious(-EOPNOTSUPP);
1991 }
1992
1993 /*
1994  * sec context handlers
1995  */
1996 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
1997 {
1998         int rc;
1999
2000         rc = mdt_handle_idmap(info);
2001
2002         if (unlikely(rc)) {
2003                 struct ptlrpc_request *req = mdt_info_req(info);
2004                 __u32                  opc;
2005
2006                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2007                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2008                         sptlrpc_svc_ctx_invalidate(req);
2009         }
2010
2011         OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
2012
2013         return rc;
2014 }
2015
2016 static struct mdt_object *mdt_obj(struct lu_object *o)
2017 {
2018         LASSERT(lu_device_is_mdt(o->lo_dev));
2019         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2020 }
2021
2022 struct mdt_object *mdt_object_find(const struct lu_env *env,
2023                                    struct mdt_device *d,
2024                                    const struct lu_fid *f)
2025 {
2026         struct lu_object *o;
2027         struct mdt_object *m;
2028         ENTRY;
2029
2030         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2031         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2032         if (unlikely(IS_ERR(o)))
2033                 m = (struct mdt_object *)o;
2034         else
2035                 m = mdt_obj(o);
2036         RETURN(m);
2037 }
2038
2039 /**
2040  * Asyncronous commit for mdt device.
2041  *
2042  * Pass asynchonous commit call down the MDS stack.
2043  *
2044  * \param env environment
2045  * \param mdt the mdt device
2046  */
2047 static void mdt_device_commit_async(const struct lu_env *env,
2048                                     struct mdt_device *mdt)
2049 {
2050         struct dt_device *dt = mdt->mdt_bottom;
2051         int rc;
2052
2053         rc = dt->dd_ops->dt_commit_async(env, dt);
2054         if (unlikely(rc != 0))
2055                 CWARN("async commit start failed with rc = %d", rc);
2056 }
2057
2058 /**
2059  * Mark the lock as "synchonous".
2060  *
2061  * Mark the lock to deffer transaction commit to the unlock time.
2062  *
2063  * \param lock the lock to mark as "synchonous"
2064  *
2065  * \see mdt_is_lock_sync
2066  * \see mdt_save_lock
2067  */
2068 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2069 {
2070         lock->l_ast_data = (void*)1;
2071 }
2072
2073 /**
2074  * Check whehter the lock "synchonous" or not.
2075  *
2076  * \param lock the lock to check
2077  * \retval 1 the lock is "synchonous"
2078  * \retval 0 the lock isn't "synchronous"
2079  *
2080  * \see mdt_set_lock_sync
2081  * \see mdt_save_lock
2082  */
2083 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2084 {
2085         return lock->l_ast_data != NULL;
2086 }
2087
2088 /**
2089  * Blocking AST for mdt locks.
2090  *
2091  * Starts transaction commit if in case of COS lock conflict or
2092  * deffers such a commit to the mdt_save_lock.
2093  *
2094  * \param lock the lock which blocks a request or cancelling lock
2095  * \param desc unused
2096  * \param data unused
2097  * \param flag indicates whether this cancelling or blocking callback
2098  * \retval 0
2099  * \see ldlm_blocking_ast_nocheck
2100  */
2101 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2102                      void *data, int flag)
2103 {
2104         struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
2105         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2106         int rc;
2107         ENTRY;
2108
2109         if (flag == LDLM_CB_CANCELING)
2110                 RETURN(0);
2111         lock_res_and_lock(lock);
2112         if (lock->l_blocking_ast != mdt_blocking_ast) {
2113                 unlock_res_and_lock(lock);
2114                 RETURN(0);
2115         }
2116         if (mdt_cos_is_enabled(mdt) &&
2117             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2118             lock->l_blocking_lock != NULL &&
2119             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2120                 mdt_set_lock_sync(lock);
2121         }
2122         rc = ldlm_blocking_ast_nocheck(lock);
2123
2124         /* There is no lock conflict if l_blocking_lock == NULL,
2125          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2126          * when the last reference to a local lock was released */
2127         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2128                 struct lu_env env;
2129
2130                 rc = lu_env_init(&env, LCT_MD_THREAD);
2131                 if (unlikely(rc != 0))
2132                         CWARN("lu_env initialization failed with rc = %d,"
2133                               "cannot start asynchronous commit\n", rc);
2134                 else
2135                         mdt_device_commit_async(&env, mdt);
2136                 lu_env_fini(&env);
2137         }
2138         RETURN(rc);
2139 }
2140
2141 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2142                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2143 {
2144         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2145         ldlm_policy_data_t *policy = &info->mti_policy;
2146         struct ldlm_res_id *res_id = &info->mti_res_id;
2147         int rc;
2148         ENTRY;
2149
2150         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2151         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2152         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2153         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2154
2155         if (mdt_object_exists(o) < 0) {
2156                 if (locality == MDT_CROSS_LOCK) {
2157                         /* cross-ref object fix */
2158                         ibits &= ~MDS_INODELOCK_UPDATE;
2159                         ibits |= MDS_INODELOCK_LOOKUP;
2160                 } else {
2161                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2162                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2163                 }
2164                 /* No PDO lock on remote object */
2165                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2166         }
2167
2168         if (lh->mlh_type == MDT_PDO_LOCK) {
2169                 /* check for exists after object is locked */
2170                 if (mdt_object_exists(o) == 0) {
2171                         /* Non-existent object shouldn't have PDO lock */
2172                         RETURN(-ESTALE);
2173                 } else {
2174                         /* Non-dir object shouldn't have PDO lock */
2175                         LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
2176                 }
2177         }
2178
2179         memset(policy, 0, sizeof(*policy));
2180         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2181
2182         /*
2183          * Take PDO lock on whole directory and build correct @res_id for lock
2184          * on part of directory.
2185          */
2186         if (lh->mlh_pdo_hash != 0) {
2187                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2188                 mdt_lock_pdo_mode(info, o, lh);
2189                 if (lh->mlh_pdo_mode != LCK_NL) {
2190                         /*
2191                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2192                          * is never going to be sent to client and we do not
2193                          * want it slowed down due to possible cancels.
2194                          */
2195                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2196                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2197                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2198                                           &info->mti_exp->exp_handle.h_cookie);
2199                         if (unlikely(rc))
2200                                 RETURN(rc);
2201                 }
2202
2203                 /*
2204                  * Finish res_id initializing by name hash marking part of
2205                  * directory which is taking modification.
2206                  */
2207                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2208         }
2209
2210         policy->l_inodebits.bits = ibits;
2211
2212         /*
2213          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2214          * going to be sent to client. If it is - mdt_intent_policy() path will
2215          * fix it up and turn FL_LOCAL flag off.
2216          */
2217         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2218                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2219                           &info->mti_exp->exp_handle.h_cookie);
2220         if (rc)
2221                 GOTO(out, rc);
2222
2223 out:
2224         if (rc)
2225                 mdt_object_unlock(info, o, lh, 1);
2226
2227
2228         RETURN(rc);
2229 }
2230
2231 /**
2232  * Save a lock within request object.
2233  *
2234  * Keep the lock referenced until whether client ACK or transaction
2235  * commit happens or release the lock immediately depending on input
2236  * parameters. If COS is ON, a write lock is converted to COS lock
2237  * before saving.
2238  *
2239  * \param info thead info object
2240  * \param h lock handle
2241  * \param mode lock mode
2242  * \param decref force immediate lock releasing
2243  */
2244 static
2245 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2246                    ldlm_mode_t mode, int decref)
2247 {
2248         ENTRY;
2249
2250         if (lustre_handle_is_used(h)) {
2251                 if (decref || !info->mti_has_trans ||
2252                     !(mode & (LCK_PW | LCK_EX))){
2253                         mdt_fid_unlock(h, mode);
2254                 } else {
2255                         struct mdt_device *mdt = info->mti_mdt;
2256                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2257                         struct ptlrpc_request *req = mdt_info_req(info);
2258                         int no_ack = 0;
2259
2260                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2261                                  h->cookie);
2262                         CDEBUG(D_HA, "request = %p reply state = %p"
2263                                " transno = "LPD64"\n",
2264                                req, req->rq_reply_state, req->rq_transno);
2265                         if (mdt_cos_is_enabled(mdt)) {
2266                                 no_ack = 1;
2267                                 ldlm_lock_downgrade(lock, LCK_COS);
2268                                 mode = LCK_COS;
2269                         }
2270                         ptlrpc_save_lock(req, h, mode, no_ack);
2271                         if (mdt_is_lock_sync(lock)) {
2272                                 CDEBUG(D_HA, "found sync-lock,"
2273                                        " async commit started\n");
2274                                 mdt_device_commit_async(info->mti_env,
2275                                                         mdt);
2276                         }
2277                         LDLM_LOCK_PUT(lock);
2278                 }
2279                 h->cookie = 0ull;
2280         }
2281
2282         EXIT;
2283 }
2284
2285 /**
2286  * Unlock mdt object.
2287  *
2288  * Immeditely release the regular lock and the PDO lock or save the
2289  * lock in reqeuest and keep them referenced until client ACK or
2290  * transaction commit.
2291  *
2292  * \param info thread info object
2293  * \param o mdt object
2294  * \param h mdt lock handle referencing regular and PDO locks
2295  * \param decref force immediate lock releasing
2296  */
2297 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2298                        struct mdt_lock_handle *lh, int decref)
2299 {
2300         ENTRY;
2301
2302         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2303         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2304
2305         EXIT;
2306 }
2307
2308 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2309                                         const struct lu_fid *f,
2310                                         struct mdt_lock_handle *lh,
2311                                         __u64 ibits)
2312 {
2313         struct mdt_object *o;
2314
2315         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2316         if (!IS_ERR(o)) {
2317                 int rc;
2318
2319                 rc = mdt_object_lock(info, o, lh, ibits,
2320                                      MDT_LOCAL_LOCK);
2321                 if (rc != 0) {
2322                         mdt_object_put(info->mti_env, o);
2323                         o = ERR_PTR(rc);
2324                 }
2325         }
2326         return o;
2327 }
2328
2329 void mdt_object_unlock_put(struct mdt_thread_info * info,
2330                            struct mdt_object * o,
2331                            struct mdt_lock_handle *lh,
2332                            int decref)
2333 {
2334         mdt_object_unlock(info, o, lh, decref);
2335         mdt_object_put(info->mti_env, o);
2336 }
2337
2338 static struct mdt_handler *mdt_handler_find(__u32 opc,
2339                                             struct mdt_opc_slice *supported)
2340 {
2341         struct mdt_opc_slice *s;
2342         struct mdt_handler   *h;
2343
2344         h = NULL;
2345         for (s = supported; s->mos_hs != NULL; s++) {
2346                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2347                         h = s->mos_hs + (opc - s->mos_opc_start);
2348                         if (likely(h->mh_opc != 0))
2349                                 LASSERTF(h->mh_opc == opc,
2350                                          "opcode mismatch %d != %d\n",
2351                                          h->mh_opc, opc);
2352                         else
2353                                 h = NULL; /* unsupported opc */
2354                         break;
2355                 }
2356         }
2357         return h;
2358 }
2359
2360 static int mdt_lock_resname_compat(struct mdt_device *m,
2361                                    struct ldlm_request *req)
2362 {
2363         /* XXX something... later. */
2364         return 0;
2365 }
2366
2367 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2368 {
2369         /* XXX something... later. */
2370         return 0;
2371 }
2372
2373 /*
2374  * Generic code handling requests that have struct mdt_body passed in:
2375  *
2376  *  - extract mdt_body from request and save it in @info, if present;
2377  *
2378  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2379  *  @info;
2380  *
2381  *  - if HABEO_CORPUS flag is set for this request type check whether object
2382  *  actually exists on storage (lu_object_exists()).
2383  *
2384  */
2385 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2386 {
2387         const struct mdt_body    *body;
2388         struct mdt_object        *obj;
2389         const struct lu_env      *env;
2390         struct req_capsule       *pill;
2391         int                       rc;
2392         ENTRY;
2393
2394         env = info->mti_env;
2395         pill = info->mti_pill;
2396
2397         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2398         if (body == NULL)
2399                 RETURN(-EFAULT);
2400
2401         if (!(body->valid & OBD_MD_FLID))
2402                 RETURN(0);
2403
2404         if (!fid_is_sane(&body->fid1)) {
2405                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2406                 RETURN(-EINVAL);
2407         }
2408
2409         /*
2410          * Do not get size or any capa fields before we check that request
2411          * contains capa actually. There are some requests which do not, for
2412          * instance MDS_IS_SUBDIR.
2413          */
2414         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2415             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2416                 mdt_set_capainfo(info, 0, &body->fid1,
2417                                  req_capsule_client_get(pill, &RMF_CAPA1));
2418
2419         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2420         if (!IS_ERR(obj)) {
2421                 if ((flags & HABEO_CORPUS) &&
2422                     !mdt_object_exists(obj)) {
2423                         mdt_object_put(env, obj);
2424                         /* for capability renew ENOENT will be handled in
2425                          * mdt_renew_capa */
2426                         if (body->valid & OBD_MD_FLOSSCAPA)
2427                                 rc = 0;
2428                         else
2429                                 rc = -ENOENT;
2430                 } else {
2431                         info->mti_object = obj;
2432                         rc = 0;
2433                 }
2434         } else
2435                 rc = PTR_ERR(obj);
2436
2437         RETURN(rc);
2438 }
2439
2440 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2441 {
2442         struct req_capsule *pill = info->mti_pill;
2443         int rc;
2444         ENTRY;
2445
2446         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2447                 rc = mdt_body_unpack(info, flags);
2448         else
2449                 rc = 0;
2450
2451         if (rc == 0 && (flags & HABEO_REFERO)) {
2452                 struct mdt_device *mdt = info->mti_mdt;
2453
2454                 /* Pack reply. */
2455
2456                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2457                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2458                                              mdt->mdt_max_mdsize);
2459                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2460                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2461                                              mdt->mdt_max_cookiesize);
2462
2463                 rc = req_capsule_server_pack(pill);
2464         }
2465         RETURN(rc);
2466 }
2467
2468 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2469 {
2470         struct md_device *next = m->mdt_child;
2471
2472         return next->md_ops->mdo_init_capa_ctxt(env, next,
2473                                                 m->mdt_opts.mo_mds_capa,
2474                                                 m->mdt_capa_timeout,
2475                                                 m->mdt_capa_alg,
2476                                                 m->mdt_capa_keys);
2477 }
2478
2479 /*
2480  * Invoke handler for this request opc. Also do necessary preprocessing
2481  * (according to handler ->mh_flags), and post-processing (setting of
2482  * ->last_{xid,committed}).
2483  */
2484 static int mdt_req_handle(struct mdt_thread_info *info,
2485                           struct mdt_handler *h, struct ptlrpc_request *req)
2486 {
2487         int   rc, serious = 0;
2488         __u32 flags;
2489
2490         ENTRY;
2491
2492         LASSERT(h->mh_act != NULL);
2493         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2494         LASSERT(current->journal_info == NULL);
2495
2496         /*
2497          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2498          * to put same checks into handlers like mdt_close(), mdt_reint(),
2499          * etc., without talking to mdt authors first. Checking same thing
2500          * there again is useless and returning 0 error wihtout packing reply
2501          * is buggy! Handlers either pack reply or return error.
2502          *
2503          * We return 0 here and do not send any reply in order to emulate
2504          * network failure. Do not send any reply in case any of NET related
2505          * fail_id has occured.
2506          */
2507         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2508                 RETURN(0);
2509
2510         rc = 0;
2511         flags = h->mh_flags;
2512         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2513
2514         if (h->mh_fmt != NULL) {
2515                 req_capsule_set(info->mti_pill, h->mh_fmt);
2516                 rc = mdt_unpack_req_pack_rep(info, flags);
2517         }
2518
2519         if (rc == 0 && flags & MUTABOR &&
2520             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2521                 /* should it be rq_status? */
2522                 rc = -EROFS;
2523
2524         if (rc == 0 && flags & HABEO_CLAVIS) {
2525                 struct ldlm_request *dlm_req;
2526
2527                 LASSERT(h->mh_fmt != NULL);
2528
2529                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2530                 if (dlm_req != NULL) {
2531                         if (info->mti_mdt->mdt_opts.mo_compat_resname)
2532                                 rc = mdt_lock_resname_compat(info->mti_mdt,
2533                                                              dlm_req);
2534                         info->mti_dlm_req = dlm_req;
2535                 } else {
2536                         rc = -EFAULT;
2537                 }
2538         }
2539
2540         /* capability setting changed via /proc, needs reinitialize ctxt */
2541         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2542                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2543                 info->mti_mdt->mdt_capa_conf = 0;
2544         }
2545
2546         if (likely(rc == 0)) {
2547                 /*
2548                  * Process request, there can be two types of rc:
2549                  * 1) errors with msg unpack/pack, other failures outside the
2550                  * operation itself. This is counted as serious errors;
2551                  * 2) errors during fs operation, should be placed in rq_status
2552                  * only
2553                  */
2554                 rc = h->mh_act(info);
2555                 if (rc == 0 &&
2556                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2557                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2558                                   "pack reply and returned 0 error\n",
2559                                   h->mh_name);
2560                         LBUG();
2561                 }
2562                 serious = is_serious(rc);
2563                 rc = clear_serious(rc);
2564         } else
2565                 serious = 1;
2566
2567         req->rq_status = rc;
2568
2569         /*
2570          * ELDLM_* codes which > 0 should be in rq_status only as well as
2571          * all non-serious errors.
2572          */
2573         if (rc > 0 || !serious)
2574                 rc = 0;
2575
2576         LASSERT(current->journal_info == NULL);
2577
2578         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2579             info->mti_mdt->mdt_opts.mo_compat_resname) {
2580                 struct ldlm_reply *dlmrep;
2581
2582                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2583                 if (dlmrep != NULL)
2584                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2585         }
2586
2587         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2588         if (likely(rc == 0 && h->mh_opc != MDS_DISCONNECT))
2589                 target_committed_to_req(req);
2590
2591         if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
2592                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2593                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2594                 LBUG();
2595         }
2596
2597         target_send_reply(req, rc, info->mti_fail_id);
2598         RETURN(0);
2599 }
2600
2601 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2602 {
2603         lh->mlh_type = MDT_NUL_LOCK;
2604         lh->mlh_reg_lh.cookie = 0ull;
2605         lh->mlh_reg_mode = LCK_MINMODE;
2606         lh->mlh_pdo_lh.cookie = 0ull;
2607         lh->mlh_pdo_mode = LCK_MINMODE;
2608 }
2609
2610 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2611 {
2612         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2613         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2614 }
2615
2616 /*
2617  * Initialize fields of struct mdt_thread_info. Other fields are left in
2618  * uninitialized state, because it's too expensive to zero out whole
2619  * mdt_thread_info (> 1K) on each request arrival.
2620  */
2621 static void mdt_thread_info_init(struct ptlrpc_request *req,
2622                                  struct mdt_thread_info *info)
2623 {
2624         int i;
2625         struct md_capainfo *ci;
2626
2627         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2628         info->mti_pill = &req->rq_pill;
2629
2630         /* lock handle */
2631         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2632                 mdt_lock_handle_init(&info->mti_lh[i]);
2633
2634         /* mdt device: it can be NULL while CONNECT */
2635         if (req->rq_export) {
2636                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2637                 info->mti_exp = req->rq_export;
2638         } else
2639                 info->mti_mdt = NULL;
2640         info->mti_env = req->rq_svc_thread->t_env;
2641         ci = md_capainfo(info->mti_env);
2642         memset(ci, 0, sizeof *ci);
2643         if (req->rq_export) {
2644                 if (exp_connect_rmtclient(req->rq_export))
2645                         ci->mc_auth = LC_ID_CONVERT;
2646                 else if (req->rq_export->exp_connect_flags &
2647                          OBD_CONNECT_MDS_CAPA)
2648                         ci->mc_auth = LC_ID_PLAIN;
2649                 else
2650                         ci->mc_auth = LC_ID_NONE;
2651         }
2652
2653         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2654         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2655
2656         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2657         info->mti_body = NULL;
2658         info->mti_object = NULL;
2659         info->mti_dlm_req = NULL;
2660         info->mti_has_trans = 0;
2661         info->mti_no_need_trans = 0;
2662         info->mti_cross_ref = 0;
2663         info->mti_opdata = 0;
2664
2665         /* To not check for split by default. */
2666         info->mti_spec.sp_ck_split = 0;
2667         info->mti_spec.no_create = 0;
2668 }
2669
2670 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2671 {
2672         int i;
2673
2674         req_capsule_fini(info->mti_pill);
2675         if (info->mti_object != NULL) {
2676                 mdt_object_put(info->mti_env, info->mti_object);
2677                 info->mti_object = NULL;
2678         }
2679         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2680                 mdt_lock_handle_fini(&info->mti_lh[i]);
2681         info->mti_env = NULL;
2682 }
2683
2684 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
2685                                        struct obd_device *obd, int *process)
2686 {
2687         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2688         case MDS_CONNECT: /* This will never get here, but for completeness. */
2689         case OST_CONNECT: /* This will never get here, but for completeness. */
2690         case MDS_DISCONNECT:
2691         case OST_DISCONNECT:
2692                *process = 1;
2693                RETURN(0);
2694
2695         case MDS_CLOSE:
2696         case MDS_DONE_WRITING:
2697         case MDS_SYNC: /* used in unmounting */
2698         case OBD_PING:
2699         case MDS_REINT:
2700         case SEQ_QUERY:
2701         case FLD_QUERY:
2702         case LDLM_ENQUEUE:
2703                 *process = target_queue_recovery_request(req, obd);
2704                 RETURN(0);
2705
2706         default:
2707                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2708                 *process = -EAGAIN;
2709                 RETURN(0);
2710         }
2711 }
2712
2713 /*
2714  * Handle recovery. Return:
2715  *        +1: continue request processing;
2716  *       -ve: abort immediately with the given error code;
2717  *         0: send reply with error code in req->rq_status;
2718  */
2719 static int mdt_recovery(struct mdt_thread_info *info)
2720 {
2721         struct ptlrpc_request *req = mdt_info_req(info);
2722         int recovering;
2723         struct obd_device *obd;
2724
2725         ENTRY;
2726
2727         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2728         case MDS_CONNECT:
2729         case SEC_CTX_INIT:
2730         case SEC_CTX_INIT_CONT:
2731         case SEC_CTX_FINI:
2732                 {
2733 #if 0
2734                         int rc;
2735
2736                         rc = mdt_handle_idmap(info);
2737                         if (rc)
2738                                 RETURN(rc);
2739                         else
2740 #endif
2741                                 RETURN(+1);
2742                 }
2743         }
2744
2745         if (unlikely(req->rq_export == NULL)) {
2746                 CERROR("operation %d on unconnected MDS from %s\n",
2747                        lustre_msg_get_opc(req->rq_reqmsg),
2748                        libcfs_id2str(req->rq_peer));
2749                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2750                  * mds_A will get -ENOTCONN(especially for ping req),
2751                  * which will cause that mds_A deactive timeout, then when
2752                  * mds_A cleanup, the cleanup process will be suspended since
2753                  * deactive timeout is not zero.
2754                  */
2755                 req->rq_status = -ENOTCONN;
2756                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2757                 RETURN(0);
2758         }
2759
2760         /* sanity check: if the xid matches, the request must be marked as a
2761          * resent or replayed */
2762         if (req_xid_is_last(req)) {
2763                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2764                       (MSG_RESENT | MSG_REPLAY))) {
2765                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2766                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2767                                   lustre_msg_get_flags(req->rq_reqmsg));
2768                         LBUG();
2769                         req->rq_status = -ENOTCONN;
2770                         RETURN(-ENOTCONN);
2771                 }
2772         }
2773
2774         /* else: note the opposite is not always true; a RESENT req after a
2775          * failover will usually not match the last_xid, since it was likely
2776          * never committed. A REPLAYed request will almost never match the
2777          * last xid, however it could for a committed, but still retained,
2778          * open. */
2779
2780         obd = req->rq_export->exp_obd;
2781
2782         /* Check for aborted recovery... */
2783         spin_lock_bh(&obd->obd_processing_task_lock);
2784         recovering = obd->obd_recovering;
2785         spin_unlock_bh(&obd->obd_processing_task_lock);
2786         if (unlikely(recovering)) {
2787                 int rc;
2788                 int should_process;
2789                 DEBUG_REQ(D_INFO, req, "Got new replay");
2790                 rc = mdt_filter_recovery_request(req, obd, &should_process);
2791                 if (rc != 0 || !should_process)
2792                         RETURN(rc);
2793                 else if (should_process < 0) {
2794                         req->rq_status = should_process;
2795                         rc = ptlrpc_error(req);
2796                         RETURN(rc);
2797                 }
2798         }
2799         RETURN(+1);
2800 }
2801
2802 static int mdt_msg_check_version(struct lustre_msg *msg)
2803 {
2804         int rc;
2805
2806         switch (lustre_msg_get_opc(msg)) {
2807         case MDS_CONNECT:
2808         case MDS_DISCONNECT:
2809         case OBD_PING:
2810         case SEC_CTX_INIT:
2811         case SEC_CTX_INIT_CONT:
2812         case SEC_CTX_FINI:
2813                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2814                 if (rc)
2815                         CERROR("bad opc %u version %08x, expecting %08x\n",
2816                                lustre_msg_get_opc(msg),
2817                                lustre_msg_get_version(msg),
2818                                LUSTRE_OBD_VERSION);
2819                 break;
2820         case MDS_GETSTATUS:
2821         case MDS_GETATTR:
2822         case MDS_GETATTR_NAME:
2823         case MDS_STATFS:
2824         case MDS_READPAGE:
2825         case MDS_WRITEPAGE:
2826         case MDS_IS_SUBDIR:
2827         case MDS_REINT:
2828         case MDS_CLOSE:
2829         case MDS_DONE_WRITING:
2830         case MDS_PIN:
2831         case MDS_SYNC:
2832         case MDS_GETXATTR:
2833         case MDS_SETXATTR:
2834         case MDS_SET_INFO:
2835         case MDS_QUOTACHECK:
2836         case MDS_QUOTACTL:
2837         case QUOTA_DQACQ:
2838         case QUOTA_DQREL:
2839         case SEQ_QUERY:
2840         case FLD_QUERY:
2841                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2842                 if (rc)
2843                         CERROR("bad opc %u version %08x, expecting %08x\n",
2844                                lustre_msg_get_opc(msg),
2845                                lustre_msg_get_version(msg),
2846                                LUSTRE_MDS_VERSION);
2847                 break;
2848         case LDLM_ENQUEUE:
2849         case LDLM_CONVERT:
2850         case LDLM_BL_CALLBACK:
2851         case LDLM_CP_CALLBACK:
2852                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2853                 if (rc)
2854                         CERROR("bad opc %u version %08x, expecting %08x\n",
2855                                lustre_msg_get_opc(msg),
2856                                lustre_msg_get_version(msg),
2857                                LUSTRE_DLM_VERSION);
2858                 break;
2859         case OBD_LOG_CANCEL:
2860         case LLOG_ORIGIN_HANDLE_CREATE:
2861         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2862         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2863         case LLOG_ORIGIN_HANDLE_CLOSE:
2864         case LLOG_ORIGIN_HANDLE_DESTROY:
2865         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2866         case LLOG_CATINFO:
2867                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2868                 if (rc)
2869                         CERROR("bad opc %u version %08x, expecting %08x\n",
2870                                lustre_msg_get_opc(msg),
2871                                lustre_msg_get_version(msg),
2872                                LUSTRE_LOG_VERSION);
2873                 break;
2874         default:
2875                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
2876                 rc = -ENOTSUPP;
2877         }
2878         return rc;
2879 }
2880
2881 static int mdt_handle0(struct ptlrpc_request *req,
2882                        struct mdt_thread_info *info,
2883                        struct mdt_opc_slice *supported)
2884 {
2885         struct mdt_handler *h;
2886         struct lustre_msg  *msg;
2887         int                 rc;
2888
2889         ENTRY;
2890
2891         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2892                 RETURN(0);
2893
2894         LASSERT(current->journal_info == NULL);
2895
2896         msg = req->rq_reqmsg;
2897         rc = mdt_msg_check_version(msg);
2898         if (likely(rc == 0)) {
2899                 rc = mdt_recovery(info);
2900                 if (likely(rc == +1)) {
2901                         switch (lustre_msg_get_opc(msg)) {
2902                         case MDS_READPAGE:
2903                                 req->rq_bulk_read = 1;
2904                                 break;
2905                         case MDS_WRITEPAGE:
2906                                 req->rq_bulk_write = 1;
2907                                 break;
2908                         }
2909
2910                         h = mdt_handler_find(lustre_msg_get_opc(msg),
2911                                              supported);
2912                         if (likely(h != NULL)) {
2913                                 rc = mdt_req_handle(info, h, req);
2914                         } else {
2915                                 CERROR("The unsupported opc: 0x%x\n",
2916                                        lustre_msg_get_opc(msg) );
2917                                 req->rq_status = -ENOTSUPP;
2918                                 rc = ptlrpc_error(req);
2919                                 RETURN(rc);
2920                         }
2921                 }
2922         } else
2923                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2924         RETURN(rc);
2925 }
2926
2927 /*
2928  * MDT handler function called by ptlrpc service thread when request comes.
2929  *
2930  * XXX common "target" functionality should be factored into separate module
2931  * shared by mdt, ost and stand-alone services like fld.
2932  */
2933 static int mdt_handle_common(struct ptlrpc_request *req,
2934                              struct mdt_opc_slice *supported)
2935 {
2936         struct lu_env          *env;
2937         struct mdt_thread_info *info;
2938         int                     rc;
2939         ENTRY;
2940
2941         env = req->rq_svc_thread->t_env;
2942         LASSERT(env != NULL);
2943         LASSERT(env->le_ses != NULL);
2944         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2945         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2946         LASSERT(info != NULL);
2947
2948         mdt_thread_info_init(req, info);
2949
2950         rc = mdt_handle0(req, info, supported);
2951
2952         mdt_thread_info_fini(info);
2953         RETURN(rc);
2954 }
2955
2956 /*
2957  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
2958  * as well.
2959  */
2960 int mdt_recovery_handle(struct ptlrpc_request *req)
2961 {
2962         int rc;
2963         ENTRY;
2964
2965         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2966         case FLD_QUERY:
2967                 rc = mdt_handle_common(req, mdt_fld_handlers);
2968                 break;
2969         case SEQ_QUERY:
2970                 rc = mdt_handle_common(req, mdt_seq_handlers);
2971                 break;
2972         default:
2973                 rc = mdt_handle_common(req, mdt_regular_handlers);
2974                 break;
2975         }
2976
2977         RETURN(rc);
2978 }
2979
2980 static int mdt_regular_handle(struct ptlrpc_request *req)
2981 {
2982         return mdt_handle_common(req, mdt_regular_handlers);
2983 }
2984
2985 static int mdt_readpage_handle(struct ptlrpc_request *req)
2986 {
2987         return mdt_handle_common(req, mdt_readpage_handlers);
2988 }
2989
2990 static int mdt_xmds_handle(struct ptlrpc_request *req)
2991 {
2992         return mdt_handle_common(req, mdt_xmds_handlers);
2993 }
2994
2995 static int mdt_mdsc_handle(struct ptlrpc_request *req)
2996 {
2997         return mdt_handle_common(req, mdt_seq_handlers);
2998 }
2999
3000 static int mdt_mdss_handle(struct ptlrpc_request *req)
3001 {
3002         return mdt_handle_common(req, mdt_seq_handlers);
3003 }
3004
3005 static int mdt_dtss_handle(struct ptlrpc_request *req)
3006 {
3007         return mdt_handle_common(req, mdt_seq_handlers);
3008 }
3009
3010 static int mdt_fld_handle(struct ptlrpc_request *req)
3011 {
3012         return mdt_handle_common(req, mdt_fld_handlers);
3013 }
3014
3015 enum mdt_it_code {
3016         MDT_IT_OPEN,
3017         MDT_IT_OCREAT,
3018         MDT_IT_CREATE,
3019         MDT_IT_GETATTR,
3020         MDT_IT_READDIR,
3021         MDT_IT_LOOKUP,
3022         MDT_IT_UNLINK,
3023         MDT_IT_TRUNC,
3024         MDT_IT_GETXATTR,
3025         MDT_IT_NR
3026 };
3027
3028 static int mdt_intent_getattr(enum mdt_it_code opcode,
3029                               struct mdt_thread_info *info,
3030                               struct ldlm_lock **,
3031                               int);
3032 static int mdt_intent_reint(enum mdt_it_code opcode,
3033                             struct mdt_thread_info *info,
3034                             struct ldlm_lock **,
3035                             int);
3036
3037 static struct mdt_it_flavor {
3038         const struct req_format *it_fmt;
3039         __u32                    it_flags;
3040         int                    (*it_act)(enum mdt_it_code ,
3041                                          struct mdt_thread_info *,
3042                                          struct ldlm_lock **,
3043                                          int);
3044         long                     it_reint;
3045 } mdt_it_flavor[] = {
3046         [MDT_IT_OPEN]     = {
3047                 .it_fmt   = &RQF_LDLM_INTENT,
3048                 /*.it_flags = HABEO_REFERO,*/
3049                 .it_flags = 0,
3050                 .it_act   = mdt_intent_reint,
3051                 .it_reint = REINT_OPEN
3052         },
3053         [MDT_IT_OCREAT]   = {
3054                 .it_fmt   = &RQF_LDLM_INTENT,
3055                 .it_flags = MUTABOR,
3056                 .it_act   = mdt_intent_reint,
3057                 .it_reint = REINT_OPEN
3058         },
3059         [MDT_IT_CREATE]   = {
3060                 .it_fmt   = &RQF_LDLM_INTENT,
3061                 .it_flags = MUTABOR,
3062                 .it_act   = mdt_intent_reint,
3063                 .it_reint = REINT_CREATE
3064         },
3065         [MDT_IT_GETATTR]  = {
3066                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3067                 .it_flags = HABEO_REFERO,
3068                 .it_act   = mdt_intent_getattr
3069         },
3070         [MDT_IT_READDIR]  = {
3071                 .it_fmt   = NULL,
3072                 .it_flags = 0,
3073                 .it_act   = NULL
3074         },
3075         [MDT_IT_LOOKUP]   = {
3076                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3077                 .it_flags = HABEO_REFERO,
3078                 .it_act   = mdt_intent_getattr
3079         },
3080         [MDT_IT_UNLINK]   = {
3081                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3082                 .it_flags = MUTABOR,
3083                 .it_act   = NULL,
3084                 .it_reint = REINT_UNLINK
3085         },
3086         [MDT_IT_TRUNC]    = {
3087                 .it_fmt   = NULL,
3088                 .it_flags = MUTABOR,
3089                 .it_act   = NULL
3090         },
3091         [MDT_IT_GETXATTR] = {
3092                 .it_fmt   = NULL,
3093                 .it_flags = 0,
3094                 .it_act   = NULL
3095         }
3096 };
3097
3098 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3099                             struct ldlm_lock **lockp,
3100                             struct ldlm_lock *new_lock,
3101                             struct mdt_lock_handle *lh,
3102                             int flags)
3103 {
3104         struct ptlrpc_request  *req = mdt_info_req(info);
3105         struct ldlm_lock       *lock = *lockp;
3106
3107         /*
3108          * Get new lock only for cases when possible resent did not find any
3109          * lock.
3110          */
3111         if (new_lock == NULL)
3112                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3113
3114         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3115                 lh->mlh_reg_lh.cookie = 0;
3116                 RETURN(0);
3117         }
3118
3119         LASSERTF(new_lock != NULL,
3120                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3121
3122         /*
3123          * If we've already given this lock to a client once, then we should
3124          * have no readers or writers.  Otherwise, we should have one reader
3125          * _or_ writer ref (which will be zeroed below) before returning the
3126          * lock to a client.
3127          */
3128         if (new_lock->l_export == req->rq_export) {
3129                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3130         } else {
3131                 LASSERT(new_lock->l_export == NULL);
3132                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3133         }
3134
3135         *lockp = new_lock;
3136
3137         if (new_lock->l_export == req->rq_export) {
3138                 /*
3139                  * Already gave this to the client, which means that we
3140                  * reconstructed a reply.
3141                  */
3142                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3143                         MSG_RESENT);
3144                 lh->mlh_reg_lh.cookie = 0;
3145                 RETURN(ELDLM_LOCK_REPLACED);
3146         }
3147
3148         /*
3149          * Fixup the lock to be given to the client.
3150          */
3151         lock_res_and_lock(new_lock);
3152         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3153          * possible blocking AST. */
3154         while (new_lock->l_readers > 0) {
3155                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3156                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3157                 new_lock->l_readers--;
3158         }
3159         while (new_lock->l_writers > 0) {
3160                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3161                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3162                 new_lock->l_writers--;
3163         }
3164
3165         new_lock->l_export = class_export_get(req->rq_export);
3166         new_lock->l_blocking_ast = lock->l_blocking_ast;
3167         new_lock->l_completion_ast = lock->l_completion_ast;
3168         new_lock->l_remote_handle = lock->l_remote_handle;
3169         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3170
3171         unlock_res_and_lock(new_lock);
3172
3173         lustre_hash_add(new_lock->l_export->exp_lock_hash,
3174                         &new_lock->l_remote_handle,
3175                         &new_lock->l_exp_hash);
3176
3177         LDLM_LOCK_RELEASE(new_lock);
3178         lh->mlh_reg_lh.cookie = 0;
3179
3180         RETURN(ELDLM_LOCK_REPLACED);
3181 }
3182
3183 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3184                                     struct ldlm_lock *new_lock,
3185                                     struct ldlm_lock **old_lock,
3186                                     struct mdt_lock_handle *lh)
3187 {
3188         struct ptlrpc_request  *req = mdt_info_req(info);
3189         struct obd_export      *exp = req->rq_export;
3190         struct lustre_handle    remote_hdl;
3191         struct ldlm_request    *dlmreq;
3192         struct ldlm_lock       *lock;
3193
3194         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3195                 return;
3196
3197         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3198         remote_hdl = dlmreq->lock_handle[0];
3199
3200         lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3201         if (lock) {
3202                 if (lock != new_lock) {
3203                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3204                         lh->mlh_reg_mode = lock->l_granted_mode;
3205
3206                         LDLM_DEBUG(lock, "Restoring lock cookie");
3207                         DEBUG_REQ(D_DLMTRACE, req,
3208                                   "restoring lock cookie "LPX64,
3209                                   lh->mlh_reg_lh.cookie);
3210                         if (old_lock)
3211                                 *old_lock = LDLM_LOCK_GET(lock);
3212                         lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
3213                         return;
3214                 }
3215
3216                 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
3217         }
3218
3219         /*
3220          * If the xid matches, then we know this is a resent request, and allow
3221          * it. (It's probably an OPEN, for which we don't send a lock.
3222          */
3223         if (req_xid_is_last(req))
3224                 return;
3225
3226         /*
3227          * This remote handle isn't enqueued, so we never received or processed
3228          * this request.  Clear MSG_RESENT, because it can be handled like any
3229          * normal request now.
3230          */
3231         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3232
3233         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3234                   remote_hdl.cookie);
3235 }
3236
3237 static int mdt_intent_getattr(enum mdt_it_code opcode,
3238                               struct mdt_thread_info *info,
3239                               struct ldlm_lock **lockp,
3240                               int flags)
3241 {
3242         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3243         struct ldlm_lock       *new_lock = NULL;
3244         __u64                   child_bits;
3245         struct ldlm_reply      *ldlm_rep;
3246         struct ptlrpc_request  *req;
3247         struct mdt_body        *reqbody;
3248         struct mdt_body        *repbody;
3249