Whamcloud - gitweb
451ea3efcfed6ad22b861bed5e7fddfcbad0b202
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52 #define DEBUG_SUBSYSTEM S_MDS
53
54 #include <linux/module.h>
55 /*
56  * struct OBD_{ALLOC,FREE}*()
57  */
58 #include <obd_support.h>
59 /* struct ptlrpc_request */
60 #include <lustre_net.h>
61 /* struct obd_export */
62 #include <lustre_export.h>
63 /* struct obd_device */
64 #include <obd.h>
65 /* lu2dt_dev() */
66 #include <dt_object.h>
67 #include <lustre_mds.h>
68 #include <lustre_mdt.h>
69 #include "mdt_internal.h"
70 #ifdef HAVE_QUOTA_SUPPORT
71 # include <lustre_quota.h>
72 #endif
73 #include <lustre_acl.h>
74 #include <lustre_param.h>
75
76 mdl_mode_t mdt_mdl_lock_modes[] = {
77         [LCK_MINMODE] = MDL_MINMODE,
78         [LCK_EX]      = MDL_EX,
79         [LCK_PW]      = MDL_PW,
80         [LCK_PR]      = MDL_PR,
81         [LCK_CW]      = MDL_CW,
82         [LCK_CR]      = MDL_CR,
83         [LCK_NL]      = MDL_NL,
84         [LCK_GROUP]   = MDL_GROUP
85 };
86
87 ldlm_mode_t mdt_dlm_lock_modes[] = {
88         [MDL_MINMODE] = LCK_MINMODE,
89         [MDL_EX]      = LCK_EX,
90         [MDL_PW]      = LCK_PW,
91         [MDL_PR]      = LCK_PR,
92         [MDL_CW]      = LCK_CW,
93         [MDL_CR]      = LCK_CR,
94         [MDL_NL]      = LCK_NL,
95         [MDL_GROUP]   = LCK_GROUP
96 };
97
98 /*
99  * Initialized in mdt_mod_init().
100  */
101 unsigned long mdt_num_threads;
102
103 /* ptlrpc request handler for MDT. All handlers are
104  * grouped into several slices - struct mdt_opc_slice,
105  * and stored in an array - mdt_handlers[].
106  */
107 struct mdt_handler {
108         /* The name of this handler. */
109         const char *mh_name;
110         /* Fail id for this handler, checked at the beginning of this handler*/
111         int         mh_fail_id;
112         /* Operation code for this handler */
113         __u32       mh_opc;
114         /* flags are listed in enum mdt_handler_flags below. */
115         __u32       mh_flags;
116         /* The actual handler function to execute. */
117         int (*mh_act)(struct mdt_thread_info *info);
118         /* Request format for this request. */
119         const struct req_format *mh_fmt;
120 };
121
122 enum mdt_handler_flags {
123         /*
124          * struct mdt_body is passed in the incoming message, and object
125          * identified by this fid exists on disk.
126          *
127          * "habeo corpus" == "I have a body"
128          */
129         HABEO_CORPUS = (1 << 0),
130         /*
131          * struct ldlm_request is passed in the incoming message.
132          *
133          * "habeo clavis" == "I have a key"
134          */
135         HABEO_CLAVIS = (1 << 1),
136         /*
137          * this request has fixed reply format, so that reply message can be
138          * packed by generic code.
139          *
140          * "habeo refero" == "I have a reply"
141          */
142         HABEO_REFERO = (1 << 2),
143         /*
144          * this request will modify something, so check whether the filesystem
145          * is readonly or not, then return -EROFS to client asap if necessary.
146          *
147          * "mutabor" == "I shall modify"
148          */
149         MUTABOR      = (1 << 3)
150 };
151
152 struct mdt_opc_slice {
153         __u32               mos_opc_start;
154         int                 mos_opc_end;
155         struct mdt_handler *mos_hs;
156 };
157
158 static struct mdt_opc_slice mdt_regular_handlers[];
159 static struct mdt_opc_slice mdt_readpage_handlers[];
160 static struct mdt_opc_slice mdt_xmds_handlers[];
161 static struct mdt_opc_slice mdt_seq_handlers[];
162 static struct mdt_opc_slice mdt_fld_handlers[];
163
164 static struct mdt_device *mdt_dev(struct lu_device *d);
165 static int mdt_regular_handle(struct ptlrpc_request *req);
166 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
167
168 static const struct lu_object_operations mdt_obj_ops;
169
170 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
171 {
172         if (!rep)
173                 return 0;
174         return (rep->lock_policy_res1 & flag);
175 }
176
177 void mdt_clear_disposition(struct mdt_thread_info *info,
178                            struct ldlm_reply *rep, int flag)
179 {
180         if (info)
181                 info->mti_opdata &= ~flag;
182         if (rep)
183                 rep->lock_policy_res1 &= ~flag;
184 }
185
186 void mdt_set_disposition(struct mdt_thread_info *info,
187                          struct ldlm_reply *rep, int flag)
188 {
189         if (info)
190                 info->mti_opdata |= flag;
191         if (rep)
192                 rep->lock_policy_res1 |= flag;
193 }
194
195 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
196 {
197         lh->mlh_pdo_hash = 0;
198         lh->mlh_reg_mode = lm;
199         lh->mlh_type = MDT_REG_LOCK;
200 }
201
202 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
203                        const char *name, int namelen)
204 {
205         lh->mlh_reg_mode = lm;
206         lh->mlh_type = MDT_PDO_LOCK;
207
208         if (name != NULL) {
209                 LASSERT(namelen > 0);
210                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
211         } else {
212                 LASSERT(namelen == 0);
213                 lh->mlh_pdo_hash = 0ull;
214         }
215 }
216
217 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
218                               struct mdt_lock_handle *lh)
219 {
220         mdl_mode_t mode;
221         ENTRY;
222
223         /*
224          * Any dir access needs couple of locks:
225          *
226          * 1) on part of dir we gonna take lookup/modify;
227          *
228          * 2) on whole dir to protect it from concurrent splitting and/or to
229          * flush client's cache for readdir().
230          *
231          * so, for a given mode and object this routine decides what lock mode
232          * to use for lock #2:
233          *
234          * 1) if caller's gonna lookup in dir then we need to protect dir from
235          * being splitted only - LCK_CR
236          *
237          * 2) if caller's gonna modify dir then we need to protect dir from
238          * being splitted and to flush cache - LCK_CW
239          *
240          * 3) if caller's gonna modify dir and that dir seems ready for
241          * splitting then we need to protect it from any type of access
242          * (lookup/modify/split) - LCK_EX --bzzz
243          */
244
245         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
246         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
247
248         /*
249          * Ask underlaying level its opinion about preferable PDO lock mode
250          * having access type passed as regular lock mode:
251          *
252          * - MDL_MINMODE means that lower layer does not want to specify lock
253          * mode;
254          *
255          * - MDL_NL means that no PDO lock should be taken. This is used in some
256          * cases. Say, for non-splittable directories no need to use PDO locks
257          * at all.
258          */
259         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
260                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
261
262         if (mode != MDL_MINMODE) {
263                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
264         } else {
265                 /*
266                  * Lower layer does not want to specify locking mode. We do it
267                  * our selves. No special protection is needed, just flush
268                  * client's cache on modification and allow concurrent
269                  * mondification.
270                  */
271                 switch (lh->mlh_reg_mode) {
272                 case LCK_EX:
273                         lh->mlh_pdo_mode = LCK_EX;
274                         break;
275                 case LCK_PR:
276                         lh->mlh_pdo_mode = LCK_CR;
277                         break;
278                 case LCK_PW:
279                         lh->mlh_pdo_mode = LCK_CW;
280                         break;
281                 default:
282                         CERROR("Not expected lock type (0x%x)\n",
283                                (int)lh->mlh_reg_mode);
284                         LBUG();
285                 }
286         }
287
288         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
289         EXIT;
290 }
291
292 static int mdt_getstatus(struct mdt_thread_info *info)
293 {
294         struct mdt_device *mdt  = info->mti_mdt;
295         struct md_device  *next = mdt->mdt_child;
296         struct mdt_body   *repbody;
297         int                rc;
298
299         ENTRY;
300
301         rc = mdt_check_ucred(info);
302         if (rc)
303                 RETURN(err_serious(rc));
304
305         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
306                 RETURN(err_serious(-ENOMEM));
307
308         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
309         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
310         if (rc != 0)
311                 RETURN(rc);
312
313         repbody->valid |= OBD_MD_FLID;
314
315         if (mdt->mdt_opts.mo_mds_capa &&
316             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
317                 struct mdt_object  *root;
318                 struct lustre_capa *capa;
319
320                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
321                 if (IS_ERR(root))
322                         RETURN(PTR_ERR(root));
323
324                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
325                 LASSERT(capa);
326                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
327                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
328                                  0);
329                 mdt_object_put(info->mti_env, root);
330                 if (rc == 0)
331                         repbody->valid |= OBD_MD_FLMDSCAPA;
332         }
333
334         RETURN(rc);
335 }
336
337 static int mdt_statfs(struct mdt_thread_info *info)
338 {
339         struct md_device      *next  = info->mti_mdt->mdt_child;
340         struct ptlrpc_service *svc;
341         struct obd_statfs     *osfs;
342         int                    rc;
343
344         ENTRY;
345
346         svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
347
348         /* This will trigger a watchdog timeout */
349         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
350                          (MDT_SERVICE_WATCHDOG_FACTOR *
351                           at_get(&svc->srv_at_estimate) / 1000) + 1);
352
353         rc = mdt_check_ucred(info);
354         if (rc)
355                 RETURN(err_serious(rc));
356
357         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
358                 rc = err_serious(-ENOMEM);
359         } else {
360                 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
361                 rc = next->md_ops->mdo_statfs(info->mti_env, next,
362                                               &info->mti_u.ksfs);
363                 statfs_pack(osfs, &info->mti_u.ksfs);
364         }
365         RETURN(rc);
366 }
367
368 void mdt_pack_size2body(struct mdt_thread_info *info, struct mdt_object *o)
369 {
370         struct mdt_body *b;
371         struct lu_attr *attr = &info->mti_attr.ma_attr;
372
373         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
374
375         /* Check if Size-on-MDS is enabled. */
376         if ((mdt_conn_flags(info) & OBD_CONNECT_SOM) &&
377             S_ISREG(attr->la_mode) && mdt_sizeonmds_enabled(o)) {
378                 b->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
379                 b->size = attr->la_size;
380                 b->blocks = attr->la_blocks;
381         }
382 }
383
384 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
385                         const struct lu_attr *attr, const struct lu_fid *fid)
386 {
387         /*XXX should pack the reply body according to lu_valid*/
388         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
389                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
390                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
391                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
392
393         if (!S_ISREG(attr->la_mode))
394                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
395
396         b->atime      = attr->la_atime;
397         b->mtime      = attr->la_mtime;
398         b->ctime      = attr->la_ctime;
399         b->mode       = attr->la_mode;
400         b->size       = attr->la_size;
401         b->blocks     = attr->la_blocks;
402         b->uid        = attr->la_uid;
403         b->gid        = attr->la_gid;
404         b->flags      = attr->la_flags;
405         b->nlink      = attr->la_nlink;
406         b->rdev       = attr->la_rdev;
407
408         if (fid) {
409                 b->fid1 = *fid;
410                 b->valid |= OBD_MD_FLID;
411
412                 /* FIXME: these should be fixed when new igif ready.*/
413                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
414                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
415                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
416
417                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
418                                 PFID(fid), b->nlink, b->mode, b->size);
419         }
420
421         if (info)
422                 mdt_body_reverse_idmap(info, b);
423 }
424
425 static inline int mdt_body_has_lov(const struct lu_attr *la,
426                                    const struct mdt_body *body)
427 {
428         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
429                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
430 }
431
432 static int mdt_getattr_internal(struct mdt_thread_info *info,
433                                 struct mdt_object *o)
434 {
435         struct md_object        *next = mdt_object_child(o);
436         const struct mdt_body   *reqbody = info->mti_body;
437         struct ptlrpc_request   *req = mdt_info_req(info);
438         struct md_attr          *ma = &info->mti_attr;
439         struct lu_attr          *la = &ma->ma_attr;
440         struct req_capsule      *pill = info->mti_pill;
441         const struct lu_env     *env = info->mti_env;
442         struct mdt_body         *repbody;
443         struct lu_buf           *buffer = &info->mti_buf;
444         int                     rc;
445         ENTRY;
446
447         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
448                 RETURN(err_serious(-ENOMEM));
449
450         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
451
452         ma->ma_valid = 0;
453
454         rc = mdt_object_exists(o);
455         if (rc < 0) {
456                 /* This object is located on remote node.*/
457                 repbody->fid1 = *mdt_object_fid(o);
458                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
459                 RETURN(0);
460         }
461
462         buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
463         buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
464
465         /* If it is dir object and client require MEA, then we got MEA */
466         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
467             reqbody->valid & OBD_MD_MEA) {
468                 /* Assumption: MDT_MD size is enough for lmv size. */
469                 ma->ma_lmv = buffer->lb_buf;
470                 ma->ma_lmv_size = buffer->lb_len;
471                 ma->ma_need = MA_LMV | MA_INODE;
472         } else {
473                 ma->ma_lmm = buffer->lb_buf;
474                 ma->ma_lmm_size = buffer->lb_len;
475                 ma->ma_need = MA_LOV | MA_INODE;
476         }
477
478         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
479             reqbody->valid & OBD_MD_FLDIREA  &&
480             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
481                 /* get default stripe info for this dir. */
482                 ma->ma_need |= MA_LOV_DEF;
483         }
484         rc = mo_attr_get(env, next, ma);
485         if (unlikely(rc)) {
486                 CERROR("getattr error for "DFID": %d\n",
487                         PFID(mdt_object_fid(o)), rc);
488                 RETURN(rc);
489         }
490
491         if (likely(ma->ma_valid & MA_INODE))
492                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
493         else
494                 RETURN(-EFAULT);
495
496         if (mdt_body_has_lov(la, reqbody)) {
497                 if (ma->ma_valid & MA_LOV) {
498                         LASSERT(ma->ma_lmm_size);
499                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
500                         repbody->eadatasize = ma->ma_lmm_size;
501                         if (S_ISDIR(la->la_mode))
502                                 repbody->valid |= OBD_MD_FLDIREA;
503                         else
504                                 repbody->valid |= OBD_MD_FLEASIZE;
505                 }
506                 if (ma->ma_valid & MA_LMV) {
507                         LASSERT(S_ISDIR(la->la_mode));
508                         repbody->eadatasize = ma->ma_lmv_size;
509                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
510                 }
511                 if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
512                         repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
513                 }
514         } else if (S_ISLNK(la->la_mode) &&
515                    reqbody->valid & OBD_MD_LINKNAME) {
516                 buffer->lb_buf = ma->ma_lmm;
517                 buffer->lb_len = reqbody->eadatasize;
518                 rc = mo_readlink(env, next, buffer);
519                 if (unlikely(rc <= 0)) {
520                         CERROR("readlink failed: %d\n", rc);
521                         rc = -EFAULT;
522                 } else {
523                         repbody->valid |= OBD_MD_LINKNAME;
524                         repbody->eadatasize = rc;
525                         /* NULL terminate */
526                         ((char*)ma->ma_lmm)[rc - 1] = 0;
527                         CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
528                                (char*)ma->ma_lmm, rc);
529                         rc = 0;
530                 }
531         }
532
533         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
534                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
535                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
536                 repbody->valid |= OBD_MD_FLMODEASIZE;
537                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
538                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
539                        repbody->max_cookiesize);
540         }
541
542         if (exp_connect_rmtclient(info->mti_exp) &&
543             reqbody->valid & OBD_MD_FLRMTPERM) {
544                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
545
546                 /* mdt_getattr_lock only */
547                 rc = mdt_pack_remote_perm(info, o, buf);
548                 if (rc) {
549                         repbody->valid &= ~OBD_MD_FLRMTPERM;
550                         repbody->aclsize = 0;
551                         RETURN(rc);
552                 } else {
553                         repbody->valid |= OBD_MD_FLRMTPERM;
554                         repbody->aclsize = sizeof(struct mdt_remote_perm);
555                 }
556         }
557 #ifdef CONFIG_FS_POSIX_ACL
558         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
559                  (reqbody->valid & OBD_MD_FLACL)) {
560                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
561                 buffer->lb_len = req_capsule_get_size(pill,
562                                                       &RMF_ACL, RCL_SERVER);
563                 if (buffer->lb_len > 0) {
564                         rc = mo_xattr_get(env, next, buffer,
565                                           XATTR_NAME_ACL_ACCESS);
566                         if (rc < 0) {
567                                 if (rc == -ENODATA) {
568                                         repbody->aclsize = 0;
569                                         repbody->valid |= OBD_MD_FLACL;
570                                         rc = 0;
571                                 } else if (rc == -EOPNOTSUPP) {
572                                         rc = 0;
573                                 } else {
574                                         CERROR("got acl size: %d\n", rc);
575                                 }
576                         } else {
577                                 repbody->aclsize = rc;
578                                 repbody->valid |= OBD_MD_FLACL;
579                                 rc = 0;
580                         }
581                 }
582         }
583 #endif
584
585         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
586             info->mti_mdt->mdt_opts.mo_mds_capa &&
587             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
588                 struct lustre_capa *capa;
589
590                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
591                 LASSERT(capa);
592                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
593                 rc = mo_capa_get(env, next, capa, 0);
594                 if (rc)
595                         RETURN(rc);
596                 repbody->valid |= OBD_MD_FLMDSCAPA;
597         }
598         RETURN(rc);
599 }
600
601 static int mdt_renew_capa(struct mdt_thread_info *info)
602 {
603         struct mdt_object  *obj = info->mti_object;
604         struct mdt_body    *body;
605         struct lustre_capa *capa, *c;
606         int rc;
607         ENTRY;
608
609         /* if object doesn't exist, or server has disabled capability,
610          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
611          * flag not set.
612          */
613         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
614             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
615                 RETURN(0);
616
617         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
618         LASSERT(body != NULL);
619
620         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
621         LASSERT(c);
622
623         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
624         LASSERT(capa);
625
626         *capa = *c;
627         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
628         if (rc == 0)
629                 body->valid |= OBD_MD_FLOSSCAPA;
630         RETURN(rc);
631 }
632
633 static int mdt_getattr(struct mdt_thread_info *info)
634 {
635         struct mdt_object       *obj = info->mti_object;
636         struct req_capsule      *pill = info->mti_pill;
637         struct mdt_body         *reqbody;
638         struct mdt_body         *repbody;
639         mode_t                   mode;
640         int                      md_size;
641         int rc;
642         ENTRY;
643
644         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
645         LASSERT(reqbody);
646
647         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
648                 rc = req_capsule_server_pack(pill);
649                 if (unlikely(rc))
650                         RETURN(err_serious(rc));
651                 rc = mdt_renew_capa(info);
652                 GOTO(out_shrink, rc);
653         }
654
655         LASSERT(obj != NULL);
656         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
657
658         mode = lu_object_attr(&obj->mot_obj.mo_lu);
659         if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
660             (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
661                 md_size = reqbody->eadatasize;
662         else
663                 md_size = info->mti_mdt->mdt_max_mdsize;
664
665         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
666
667         rc = req_capsule_server_pack(pill);
668         if (unlikely(rc != 0))
669                 RETURN(err_serious(rc));
670
671         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
672         LASSERT(repbody != NULL);
673         repbody->eadatasize = 0;
674         repbody->aclsize = 0;
675
676         if (reqbody->valid & OBD_MD_FLRMTPERM)
677                 rc = mdt_init_ucred(info, reqbody);
678         else
679                 rc = mdt_check_ucred(info);
680         if (unlikely(rc))
681                 GOTO(out_shrink, rc);
682
683         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
684         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
685
686         /*
687          * Don't check capability at all, because rename might getattr for
688          * remote obj, and at that time no capability is available.
689          */
690         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
691         rc = mdt_getattr_internal(info, obj);
692         if (reqbody->valid & OBD_MD_FLRMTPERM)
693                 mdt_exit_ucred(info);
694         EXIT;
695 out_shrink:
696         mdt_shrink_reply(info);
697         return rc;
698 }
699
700 static int mdt_is_subdir(struct mdt_thread_info *info)
701 {
702         struct mdt_object     *o = info->mti_object;
703         struct req_capsule    *pill = info->mti_pill;
704         const struct mdt_body *body = info->mti_body;
705         struct mdt_body       *repbody;
706         int                    rc;
707         ENTRY;
708
709         LASSERT(o != NULL);
710
711         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
712
713         /*
714          * We save last checked parent fid to @repbody->fid1 for remote
715          * directory case.
716          */
717         LASSERT(fid_is_sane(&body->fid2));
718         LASSERT(mdt_object_exists(o) > 0);
719         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
720                            &body->fid2, &repbody->fid1);
721         if (rc == 0 || rc == -EREMOTE)
722                 repbody->valid |= OBD_MD_FLID;
723
724         RETURN(rc);
725 }
726
727 static int mdt_raw_lookup(struct mdt_thread_info *info,
728                           struct mdt_object *parent,
729                           const struct lu_name *lname,
730                           struct ldlm_reply *ldlm_rep)
731 {
732         struct md_object *next = mdt_object_child(info->mti_object);
733         const struct mdt_body *reqbody = info->mti_body;
734         struct lu_fid *child_fid = &info->mti_tmp_fid1;
735         struct mdt_body *repbody;
736         int rc;
737         ENTRY;
738
739         if (reqbody->valid != OBD_MD_FLID)
740                 RETURN(0);
741
742         LASSERT(!info->mti_cross_ref);
743
744         /* Only got the fid of this obj by name */
745         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
746                         &info->mti_spec);
747 #if 0
748         /* XXX is raw_lookup possible as intent operation? */
749         if (rc != 0) {
750                 if (rc == -ENOENT)
751                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
752                 RETURN(rc);
753         } else
754                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
755
756         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
757 #endif
758         if (rc == 0) {
759                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
760                 repbody->fid1 = *child_fid;
761                 repbody->valid = OBD_MD_FLID;
762         }
763         RETURN(1);
764 }
765
766 /*
767  * UPDATE lock should be taken against parent, and be release before exit;
768  * child_bits lock should be taken against child, and be returned back:
769  *            (1)normal request should release the child lock;
770  *            (2)intent request will grant the lock to client.
771  */
772 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
773                                  struct mdt_lock_handle *lhc,
774                                  __u64 child_bits,
775                                  struct ldlm_reply *ldlm_rep)
776 {
777         struct ptlrpc_request  *req       = mdt_info_req(info);
778         struct mdt_body        *reqbody   = NULL;
779         struct mdt_object      *parent    = info->mti_object;
780         struct mdt_object      *child;
781         struct md_object       *next      = mdt_object_child(parent);
782         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
783         struct lu_name         *lname     = NULL;
784         const char             *name      = NULL;
785         int                     namelen   = 0;
786         struct mdt_lock_handle *lhp;
787         struct ldlm_lock       *lock;
788         struct ldlm_res_id     *res_id;
789         int                     is_resent;
790         int                     rc;
791
792         ENTRY;
793
794         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
795         LASSERT(ergo(is_resent,
796                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
797
798         LASSERT(parent != NULL);
799         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
800         if (name == NULL)
801                 RETURN(err_serious(-EFAULT));
802
803         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
804                                        RCL_CLIENT) - 1;
805         if (!info->mti_cross_ref) {
806                 /*
807                  * XXX: Check for "namelen == 0" is for getattr by fid
808                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
809                  * that is the name must contain at least one character and
810                  * the terminating '\0'
811                  */
812                 if (namelen == 0) {
813                         reqbody = req_capsule_client_get(info->mti_pill,
814                                                          &RMF_MDT_BODY);
815                         LASSERT(fid_is_sane(&reqbody->fid2));
816                         name = NULL;
817
818                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
819                                "ldlm_rep = %p\n",
820                                PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
821                                ldlm_rep);
822                 } else {
823                         lname = mdt_name(info->mti_env, (char *)name, namelen);
824                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
825                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
826                                name, ldlm_rep);
827                 }
828         }
829         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
830
831         rc = mdt_object_exists(parent);
832         if (unlikely(rc == 0)) {
833                 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
834                                 &parent->mot_obj.mo_lu,
835                                 "Parent doesn't exist!\n");
836                 RETURN(-ESTALE);
837         } else if (!info->mti_cross_ref) {
838                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
839                          PFID(mdt_object_fid(parent)));
840         }
841         if (lname) {
842                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
843                 if (rc != 0) {
844                         if (rc > 0)
845                                 rc = 0;
846                         RETURN(rc);
847                 }
848         }
849
850         if (info->mti_cross_ref) {
851                 /* Only getattr on the child. Parent is on another node. */
852                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
853                 child = parent;
854                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
855                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
856
857                 if (is_resent) {
858                         /* Do not take lock for resent case. */
859                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
860                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
861                                  lhc->mlh_reg_lh.cookie);
862                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
863                                                 &lock->l_resource->lr_name));
864                         LDLM_LOCK_PUT(lock);
865                         rc = 0;
866                 } else {
867                         mdt_lock_handle_init(lhc);
868                         mdt_lock_reg_init(lhc, LCK_PR);
869
870                         /*
871                          * Object's name is on another MDS, no lookup lock is
872                          * needed here but update is.
873                          */
874                         child_bits &= ~MDS_INODELOCK_LOOKUP;
875                         child_bits |= MDS_INODELOCK_UPDATE;
876
877                         rc = mdt_object_lock(info, child, lhc, child_bits,
878                                              MDT_LOCAL_LOCK);
879                 }
880                 if (rc == 0) {
881                         /* Finally, we can get attr for child. */
882                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
883                                          BYPASS_CAPA);
884                         rc = mdt_getattr_internal(info, child);
885                         if (unlikely(rc != 0))
886                                 mdt_object_unlock(info, child, lhc, 1);
887                 }
888                 RETURN(rc);
889         }
890
891         /* step 1: lock parent */
892         lhp = &info->mti_lh[MDT_LH_PARENT];
893         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
894         rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
895                              MDT_LOCAL_LOCK);
896
897         if (unlikely(rc != 0))
898                 RETURN(rc);
899
900         if (lname) {
901                 /* step 2: lookup child's fid by name */
902                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
903                                 &info->mti_spec);
904
905                 if (rc != 0) {
906                         if (rc == -ENOENT)
907                                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
908                         GOTO(out_parent, rc);
909                 } else
910                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
911         } else {
912                 *child_fid = reqbody->fid2;
913                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
914         }
915
916         /*
917          *step 3: find the child object by fid & lock it.
918          *        regardless if it is local or remote.
919          */
920         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
921
922         if (unlikely(IS_ERR(child)))
923                 GOTO(out_parent, rc = PTR_ERR(child));
924         if (is_resent) {
925                 /* Do not take lock for resent case. */
926                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
927                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
928                          lhc->mlh_reg_lh.cookie);
929
930                 res_id = &lock->l_resource->lr_name;
931                 if (!fid_res_name_eq(mdt_object_fid(child),
932                                     &lock->l_resource->lr_name)) {
933                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
934                                                  &lock->l_resource->lr_name),
935                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
936                                  (unsigned long)res_id->name[0],
937                                  (unsigned long)res_id->name[1],
938                                  (unsigned long)res_id->name[2],
939                                  PFID(mdt_object_fid(parent)));
940                           CWARN("Although resent, but still not get child lock"
941                                 "parent:"DFID" child:"DFID"\n",
942                                 PFID(mdt_object_fid(parent)),
943                                 PFID(mdt_object_fid(child)));
944                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
945                           LDLM_LOCK_PUT(lock);
946                           GOTO(relock, 0);
947                 }
948                 LDLM_LOCK_PUT(lock);
949                 rc = 0;
950         } else {
951                 struct md_attr *ma;
952 relock:
953                 ma = &info->mti_attr;
954
955                 mdt_lock_handle_init(lhc);
956                 mdt_lock_reg_init(lhc, LCK_PR);
957
958                 if (mdt_object_exists(child) == 0) {
959                         LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
960                                         &child->mot_obj.mo_lu,
961                                         "Object doesn't exist!\n");
962                         GOTO(out_child, rc = -ESTALE);
963                 }
964
965                 ma->ma_valid = 0;
966                 ma->ma_need = MA_INODE;
967                 rc = mo_attr_get(info->mti_env, next, ma);
968                 if (unlikely(rc != 0))
969                         GOTO(out_child, rc);
970
971                 /* If the file has not been changed for some time, we return
972                  * not only a LOOKUP lock, but also an UPDATE lock and this
973                  * might save us RPC on later STAT. For directories, it also
974                  * let negative dentry starts working for this dir. */
975                 if (ma->ma_valid & MA_INODE &&
976                     ma->ma_attr.la_valid & LA_CTIME &&
977                     info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
978                     ma->ma_attr.la_ctime < cfs_time_current_sec())
979                         child_bits |= MDS_INODELOCK_UPDATE;
980
981                 rc = mdt_object_lock(info, child, lhc, child_bits,
982                                      MDT_CROSS_LOCK);
983
984                 if (unlikely(rc != 0))
985                         GOTO(out_child, rc);
986         }
987
988         /* finally, we can get attr for child. */
989         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
990         rc = mdt_getattr_internal(info, child);
991         if (unlikely(rc != 0)) {
992                 mdt_object_unlock(info, child, lhc, 1);
993         } else {
994                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
995                 if (lock) {
996                         struct mdt_body *repbody;
997
998                         /* Debugging code. */
999                         res_id = &lock->l_resource->lr_name;
1000                         LDLM_DEBUG(lock, "Returning lock to client\n");
1001                         LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1002                                                  &lock->l_resource->lr_name),
1003                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1004                                  (unsigned long)res_id->name[0],
1005                                  (unsigned long)res_id->name[1],
1006                                  (unsigned long)res_id->name[2],
1007                                  PFID(mdt_object_fid(child)));
1008                         /*
1009                          * Pack Size-on-MDS inode attributes to the body if
1010                          * update lock is given.
1011                          */
1012                         repbody = req_capsule_server_get(info->mti_pill,
1013                                                          &RMF_MDT_BODY);
1014                         if (lock->l_policy_data.l_inodebits.bits &
1015                             MDS_INODELOCK_UPDATE)
1016                                 mdt_pack_size2body(info, child);
1017                         LDLM_LOCK_PUT(lock);
1018                 }
1019         }
1020         EXIT;
1021 out_child:
1022         mdt_object_put(info->mti_env, child);
1023 out_parent:
1024         mdt_object_unlock(info, parent, lhp, 1);
1025         return rc;
1026 }
1027
1028 /* normal handler: should release the child lock */
1029 static int mdt_getattr_name(struct mdt_thread_info *info)
1030 {
1031         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1032         struct mdt_body        *reqbody;
1033         struct mdt_body        *repbody;
1034         int rc;
1035         ENTRY;
1036
1037         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1038         LASSERT(reqbody != NULL);
1039         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1040         LASSERT(repbody != NULL);
1041
1042         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1043         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1044         repbody->eadatasize = 0;
1045         repbody->aclsize = 0;
1046
1047         rc = mdt_init_ucred(info, reqbody);
1048         if (unlikely(rc))
1049                 GOTO(out_shrink, rc);
1050
1051         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1052         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1053                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1054                 lhc->mlh_reg_lh.cookie = 0;
1055         }
1056         mdt_exit_ucred(info);
1057         EXIT;
1058 out_shrink:
1059         mdt_shrink_reply(info);
1060         return rc;
1061 }
1062
1063 static const struct lu_device_operations mdt_lu_ops;
1064
1065 static int lu_device_is_mdt(struct lu_device *d)
1066 {
1067         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1068 }
1069
1070 static int mdt_set_info(struct mdt_thread_info *info)
1071 {
1072         struct ptlrpc_request *req = mdt_info_req(info);
1073         char *key;
1074         __u32 *val;
1075         int keylen, rc = 0;
1076         ENTRY;
1077
1078         rc = req_capsule_server_pack(info->mti_pill);
1079         if (rc)
1080                 RETURN(rc);
1081
1082         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1083         if (key == NULL) {
1084                 DEBUG_REQ(D_HA, req, "no set_info key");
1085                 RETURN(-EFAULT);
1086         }
1087
1088         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1089                                       RCL_CLIENT);
1090
1091         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1092         if (val == NULL) {
1093                 DEBUG_REQ(D_HA, req, "no set_info val");
1094                 RETURN(-EFAULT);
1095         }
1096
1097         if (!KEY_IS(KEY_READ_ONLY))
1098                 RETURN(-EINVAL);
1099
1100         req->rq_status = 0;
1101         lustre_msg_set_status(req->rq_repmsg, 0);
1102
1103         spin_lock(&req->rq_export->exp_lock);
1104         if (*val)
1105                 req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1106         else
1107                 req->rq_export->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1108         spin_unlock(&req->rq_export->exp_lock);
1109
1110         RETURN(0);
1111 }
1112
1113 static int mdt_connect(struct mdt_thread_info *info)
1114 {
1115         int rc;
1116         struct ptlrpc_request *req;
1117
1118         req = mdt_info_req(info);
1119         rc = target_handle_connect(req);
1120         if (rc == 0) {
1121                 LASSERT(req->rq_export != NULL);
1122                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1123                 rc = mdt_init_sec_level(info);
1124                 if (rc == 0)
1125                         rc = mdt_init_idmap(info);
1126                 if (rc != 0)
1127                         obd_disconnect(class_export_get(req->rq_export));
1128         } else {
1129                 rc = err_serious(rc);
1130         }
1131         return rc;
1132 }
1133
1134 static int mdt_disconnect(struct mdt_thread_info *info)
1135 {
1136         int rc;
1137         ENTRY;
1138
1139         rc = target_handle_disconnect(mdt_info_req(info));
1140         if (rc)
1141                 rc = err_serious(rc);
1142         RETURN(rc);
1143 }
1144
1145 static int mdt_sendpage(struct mdt_thread_info *info,
1146                         struct lu_rdpg *rdpg)
1147 {
1148         struct ptlrpc_request   *req = mdt_info_req(info);
1149         struct ptlrpc_bulk_desc *desc;
1150         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1151         int                      tmpcount;
1152         int                      tmpsize;
1153         int                      timeout;
1154         int                      i;
1155         int                      rc;
1156         ENTRY;
1157
1158         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1159                                     MDS_BULK_PORTAL);
1160         if (desc == NULL)
1161                 RETURN(-ENOMEM);
1162
1163         for (i = 0, tmpcount = rdpg->rp_count;
1164                 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1165                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1166                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1167         }
1168
1169         LASSERT(desc->bd_nob == rdpg->rp_count);
1170         rc = ptlrpc_start_bulk_transfer(desc);
1171         if (rc)
1172                 GOTO(free_desc, rc);
1173
1174         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1175                 GOTO(abort_bulk, rc = 0);
1176
1177         timeout = (int) req->rq_deadline - cfs_time_current_sec();
1178         if (timeout < 0)
1179                 CERROR("Req deadline already passed %lu (now: %lu)\n",
1180                        req->rq_deadline, cfs_time_current_sec());
1181         *lwi = LWI_TIMEOUT(max(timeout, 1) * HZ, NULL, NULL);
1182         rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), lwi);
1183         LASSERT (rc == 0 || rc == -ETIMEDOUT);
1184
1185         if (rc == 0) {
1186                 if (desc->bd_success &&
1187                     desc->bd_nob_transferred == rdpg->rp_count)
1188                         GOTO(free_desc, rc);
1189
1190                 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
1191         }
1192
1193         DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1194                   (rc == -ETIMEDOUT) ? "timeout" : "network error",
1195                   desc->bd_nob_transferred, rdpg->rp_count,
1196                   req->rq_export->exp_client_uuid.uuid,
1197                   req->rq_export->exp_connection->c_remote_uuid.uuid);
1198
1199         class_fail_export(req->rq_export);
1200
1201         EXIT;
1202 abort_bulk:
1203         ptlrpc_abort_bulk(desc);
1204 free_desc:
1205         ptlrpc_free_bulk(desc);
1206         return rc;
1207 }
1208
1209 #ifdef HAVE_SPLIT_SUPPORT
1210 /*
1211  * Retrieve dir entry from the page and insert it to the slave object, actually,
1212  * this should be in osd layer, but since it will not in the final product, so
1213  * just do it here and do not define more moo api anymore for this.
1214  */
1215 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1216                               int size)
1217 {
1218         struct mdt_object *object = info->mti_object;
1219         struct lu_fid *lf = &info->mti_tmp_fid2;
1220         struct md_attr *ma = &info->mti_attr;
1221         struct lu_dirpage *dp;
1222         struct lu_dirent *ent;
1223         int rc = 0, offset = 0;
1224         ENTRY;
1225
1226         /* Make sure we have at least one entry. */
1227         if (size == 0)
1228                 RETURN(-EINVAL);
1229
1230         /*
1231          * Disable trans for this name insert, since it will include many trans
1232          * for this.
1233          */
1234         info->mti_no_need_trans = 1;
1235         /*
1236          * When write_dir_page, no need update parent's ctime,
1237          * and no permission check for name_insert.
1238          */
1239         ma->ma_attr.la_ctime = 0;
1240         ma->ma_attr.la_valid = LA_MODE;
1241         ma->ma_valid = MA_INODE;
1242
1243         cfs_kmap(page);
1244         dp = page_address(page);
1245         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1246
1247         for (ent = lu_dirent_start(dp); ent != NULL;
1248              ent = lu_dirent_next(ent)) {
1249                 struct lu_name *lname;
1250                 char *name;
1251
1252                 if (le16_to_cpu(ent->lde_namelen) == 0)
1253                         continue;
1254
1255                 fid_le_to_cpu(lf, &ent->lde_fid);
1256                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1257                         ma->ma_attr.la_mode = S_IFDIR;
1258                 else
1259                         ma->ma_attr.la_mode = 0;
1260                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1261                 if (name == NULL)
1262                         GOTO(out, rc = -ENOMEM);
1263
1264                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1265                 lname = mdt_name(info->mti_env, name,
1266                                  le16_to_cpu(ent->lde_namelen));
1267                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1268                 rc = mdo_name_insert(info->mti_env,
1269                                      md_object_next(&object->mot_obj),
1270                                      lname, lf, ma);
1271                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1272                 if (rc) {
1273                         CERROR("Can't insert %*.*s, rc %d\n",
1274                                le16_to_cpu(ent->lde_namelen),
1275                                le16_to_cpu(ent->lde_namelen),
1276                                ent->lde_name, rc);
1277                         GOTO(out, rc);
1278                 }
1279
1280                 offset += lu_dirent_size(ent);
1281                 if (offset >= size)
1282                         break;
1283         }
1284         EXIT;
1285 out:
1286         cfs_kunmap(page);
1287         return rc;
1288 }
1289
1290 static int mdt_bulk_timeout(void *data)
1291 {
1292         ENTRY;
1293
1294         CERROR("mdt bulk transfer timeout \n");
1295
1296         RETURN(1);
1297 }
1298
1299 static int mdt_writepage(struct mdt_thread_info *info)
1300 {
1301         struct ptlrpc_request   *req = mdt_info_req(info);
1302         struct mdt_body         *reqbody;
1303         struct l_wait_info      *lwi;
1304         struct ptlrpc_bulk_desc *desc;
1305         struct page             *page;
1306         int                rc;
1307         ENTRY;
1308
1309
1310         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1311         if (reqbody == NULL)
1312                 RETURN(err_serious(-EFAULT));
1313
1314         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1315         if (desc == NULL)
1316                 RETURN(err_serious(-ENOMEM));
1317
1318         /* allocate the page for the desc */
1319         page = cfs_alloc_page(CFS_ALLOC_STD);
1320         if (page == NULL)
1321                 GOTO(desc_cleanup, rc = -ENOMEM);
1322
1323         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1324                (int)reqbody->size, (int)reqbody->nlink);
1325
1326         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1327                               (int)reqbody->nlink);
1328
1329         /*
1330          * Check if client was evicted while we were doing i/o before touching
1331          * network.
1332          */
1333         OBD_ALLOC_PTR(lwi);
1334         if (!lwi)
1335                 GOTO(cleanup_page, rc = -ENOMEM);
1336
1337         if (desc->bd_export->exp_failed)
1338                 rc = -ENOTCONN;
1339         else
1340                 rc = ptlrpc_start_bulk_transfer (desc);
1341         if (rc == 0) {
1342                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ,
1343                                             mdt_bulk_timeout, desc);
1344                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1345                                   desc->bd_export->exp_failed, lwi);
1346                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1347                 if (rc == -ETIMEDOUT) {
1348                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1349                         ptlrpc_abort_bulk(desc);
1350                 } else if (desc->bd_export->exp_failed) {
1351                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1352                         rc = -ENOTCONN;
1353                         ptlrpc_abort_bulk(desc);
1354                 } else if (!desc->bd_success ||
1355                            desc->bd_nob_transferred != desc->bd_nob) {
1356                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1357                                   desc->bd_success ?
1358                                   "truncated" : "network error on",
1359                                   desc->bd_nob_transferred, desc->bd_nob);
1360                         /* XXX should this be a different errno? */
1361                         rc = -ETIMEDOUT;
1362                 }
1363         } else {
1364                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1365         }
1366         if (rc)
1367                 GOTO(cleanup_lwi, rc);
1368         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1369
1370 cleanup_lwi:
1371         OBD_FREE_PTR(lwi);
1372 cleanup_page:
1373         cfs_free_page(page);
1374 desc_cleanup:
1375         ptlrpc_free_bulk(desc);
1376         RETURN(rc);
1377 }
1378 #endif
1379
1380 static int mdt_readpage(struct mdt_thread_info *info)
1381 {
1382         struct mdt_object *object = info->mti_object;
1383         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1384         struct mdt_body   *reqbody;
1385         struct mdt_body   *repbody;
1386         int                rc;
1387         int                i;
1388         ENTRY;
1389
1390         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1391                 RETURN(err_serious(-ENOMEM));
1392
1393         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1394         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1395         if (reqbody == NULL || repbody == NULL)
1396                 RETURN(err_serious(-EFAULT));
1397
1398         /*
1399          * prepare @rdpg before calling lower layers and transfer itself. Here
1400          * reqbody->size contains offset of where to start to read and
1401          * reqbody->nlink contains number bytes to read.
1402          */
1403         rdpg->rp_hash = reqbody->size;
1404         if (rdpg->rp_hash != reqbody->size) {
1405                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1406                        rdpg->rp_hash, reqbody->size);
1407                 RETURN(-EFAULT);
1408         }
1409         rdpg->rp_count  = reqbody->nlink;
1410         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1411         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1412         if (rdpg->rp_pages == NULL)
1413                 RETURN(-ENOMEM);
1414
1415         for (i = 0; i < rdpg->rp_npages; ++i) {
1416                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1417                 if (rdpg->rp_pages[i] == NULL)
1418                         GOTO(free_rdpg, rc = -ENOMEM);
1419         }
1420
1421         /* call lower layers to fill allocated pages with directory data */
1422         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1423         if (rc)
1424                 GOTO(free_rdpg, rc);
1425
1426         /* send pages to client */
1427         rc = mdt_sendpage(info, rdpg);
1428
1429         EXIT;
1430 free_rdpg:
1431
1432         for (i = 0; i < rdpg->rp_npages; i++)
1433                 if (rdpg->rp_pages[i] != NULL)
1434                         cfs_free_page(rdpg->rp_pages[i]);
1435         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1436
1437         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1438                 RETURN(0);
1439
1440         return rc;
1441 }
1442
1443 static int mdt_reint_internal(struct mdt_thread_info *info,
1444                               struct mdt_lock_handle *lhc,
1445                               __u32 op)
1446 {
1447         struct req_capsule      *pill = info->mti_pill;
1448         struct mdt_device       *mdt = info->mti_mdt;
1449         struct mdt_body         *repbody;
1450         int                      rc = 0;
1451         ENTRY;
1452
1453         /* pack reply */
1454         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1455                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1456                                      mdt->mdt_max_mdsize);
1457         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1458                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1459                                      mdt->mdt_max_cookiesize);
1460
1461         rc = req_capsule_server_pack(pill);
1462         if (rc != 0) {
1463                 CERROR("Can't pack response, rc %d\n", rc);
1464                 RETURN(err_serious(rc));
1465         }
1466
1467         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1468                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1469                 LASSERT(repbody);
1470                 repbody->eadatasize = 0;
1471                 repbody->aclsize = 0;
1472         }
1473
1474         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1475                 GOTO(out_shrink, rc = err_serious(-EFAULT));
1476
1477         rc = mdt_reint_unpack(info, op);
1478         if (rc != 0) {
1479                 CERROR("Can't unpack reint, rc %d\n", rc);
1480                 GOTO(out_shrink, rc = err_serious(rc));
1481         }
1482
1483         /* for replay no cookkie / lmm need, because client have this already */
1484         if (info->mti_spec.no_create == 1)  {
1485                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1486                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1487
1488                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1489                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1490                                              0);
1491         }
1492
1493         rc = mdt_init_ucred_reint(info);
1494         if (rc)
1495                 GOTO(out_shrink, rc);
1496
1497         rc = mdt_fix_attr_ucred(info, op);
1498         if (rc != 0)
1499                 GOTO(out_ucred, rc = err_serious(rc));
1500
1501         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1502                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1503                 GOTO(out_ucred, rc);
1504         }
1505         rc = mdt_reint_rec(info, lhc);
1506         EXIT;
1507 out_ucred:
1508         mdt_exit_ucred(info);
1509 out_shrink:
1510         mdt_shrink_reply(info);
1511         return rc;
1512 }
1513
1514 static long mdt_reint_opcode(struct mdt_thread_info *info,
1515                              const struct req_format **fmt)
1516 {
1517         struct mdt_rec_reint *rec;
1518         long opc;
1519
1520         opc = err_serious(-EFAULT);
1521         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1522         if (rec != NULL) {
1523                 opc = rec->rr_opcode;
1524                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1525                 if (opc < REINT_MAX && fmt[opc] != NULL)
1526                         req_capsule_extend(info->mti_pill, fmt[opc]);
1527                 else {
1528                         CERROR("Unsupported opc: %ld\n", opc);
1529                         opc = err_serious(opc);
1530                 }
1531         }
1532         return opc;
1533 }
1534
1535 static int mdt_reint(struct mdt_thread_info *info)
1536 {
1537         long opc;
1538         int  rc;
1539
1540         static const struct req_format *reint_fmts[REINT_MAX] = {
1541                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1542                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1543                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1544                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1545                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1546                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1547                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1548         };
1549
1550         ENTRY;
1551
1552         opc = mdt_reint_opcode(info, reint_fmts);
1553         if (opc >= 0) {
1554                 /*
1555                  * No lock possible here from client to pass it to reint code
1556                  * path.
1557                  */
1558                 rc = mdt_reint_internal(info, NULL, opc);
1559         } else {
1560                 rc = opc;
1561         }
1562
1563         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1564         RETURN(rc);
1565 }
1566
1567 /* this should sync the whole device */
1568 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1569 {
1570         struct dt_device *dt = mdt->mdt_bottom;
1571         int rc;
1572         ENTRY;
1573
1574         rc = dt->dd_ops->dt_sync(env, dt);
1575         RETURN(rc);
1576 }
1577
1578 /* this should sync this object */
1579 static int mdt_object_sync(struct mdt_thread_info *info)
1580 {
1581         struct md_object *next;
1582         int rc;
1583         ENTRY;
1584
1585         if (!mdt_object_exists(info->mti_object)) {
1586                 CWARN("Non existing object  "DFID"!\n",
1587                       PFID(mdt_object_fid(info->mti_object)));
1588                 RETURN(-ESTALE);
1589         }
1590         next = mdt_object_child(info->mti_object);
1591         rc = mo_object_sync(info->mti_env, next);
1592
1593         RETURN(rc);
1594 }
1595
1596 static int mdt_sync(struct mdt_thread_info *info)
1597 {
1598         struct req_capsule *pill = info->mti_pill;
1599         struct mdt_body *body;
1600         int rc;
1601         ENTRY;
1602
1603         /* The fid may be zero, so we req_capsule_set manually */
1604         req_capsule_set(pill, &RQF_MDS_SYNC);
1605
1606         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1607         if (body == NULL)
1608                 RETURN(err_serious(-EINVAL));
1609
1610         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1611                 RETURN(err_serious(-ENOMEM));
1612
1613         if (fid_seq(&body->fid1) == 0) {
1614                 /* sync the whole device */
1615                 rc = req_capsule_server_pack(pill);
1616                 if (rc == 0)
1617                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1618                 else
1619                         rc = err_serious(rc);
1620         } else {
1621                 /* sync an object */
1622                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1623                 if (rc == 0) {
1624                         rc = mdt_object_sync(info);
1625                         if (rc == 0) {
1626                                 struct md_object *next;
1627                                 const struct lu_fid *fid;
1628                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1629
1630                                 next = mdt_object_child(info->mti_object);
1631                                 info->mti_attr.ma_need = MA_INODE;
1632                                 info->mti_attr.ma_valid = 0;
1633                                 rc = mo_attr_get(info->mti_env, next,
1634                                                  &info->mti_attr);
1635                                 if (rc == 0) {
1636                                         body = req_capsule_server_get(pill,
1637                                                                 &RMF_MDT_BODY);
1638                                         fid = mdt_object_fid(info->mti_object);
1639                                         mdt_pack_attr2body(info, body, la, fid);
1640                                 }
1641                         }
1642                 } else
1643                         rc = err_serious(rc);
1644         }
1645         RETURN(rc);
1646 }
1647
1648 #ifdef HAVE_QUOTA_SUPPORT
1649 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1650 {
1651         struct obd_quotactl *oqctl;
1652         struct req_capsule *pill = info->mti_pill;
1653         struct obd_export *exp = info->mti_exp;
1654         struct md_device *next = info->mti_mdt->mdt_child;
1655         int rc;
1656         ENTRY;
1657
1658         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_QUOTACHECK_NET))
1659                 RETURN(0);
1660
1661         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1662         if (oqctl == NULL)
1663                 RETURN(-EPROTO);
1664
1665         /* remote client has no permission for quotacheck */
1666         if (unlikely(exp_connect_rmtclient(exp)))
1667                 RETURN(-EPERM);
1668
1669         rc = req_capsule_server_pack(pill);
1670         if (rc)
1671                 RETURN(rc);
1672
1673         rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next, exp,
1674                                                oqctl->qc_type);
1675         RETURN(rc);
1676 }
1677
1678 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1679 {
1680         struct obd_quotactl *oqctl, *repoqc;
1681         struct req_capsule *pill = info->mti_pill;
1682         struct obd_export *exp = info->mti_exp;
1683         struct md_device *next = info->mti_mdt->mdt_child;
1684         const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
1685         int id, rc;
1686         ENTRY;
1687
1688         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_QUOTACTL_NET))
1689                 RETURN(0);
1690
1691         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1692         if (oqctl == NULL)
1693                 RETURN(-EPROTO);
1694
1695         id = oqctl->qc_id;
1696         if (exp_connect_rmtclient(exp)) {
1697                 struct ptlrpc_request *req = mdt_info_req(info);
1698                 struct mdt_export_data *med = mdt_req2med(req);
1699                 struct lustre_idmap_table *idmap = med->med_idmap;
1700
1701                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1702                              oqctl->qc_cmd != Q_GETINFO))
1703                         RETURN(-EPERM);
1704
1705
1706                 if (oqctl->qc_type == USRQUOTA)
1707                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1708                                                      oqctl->qc_id);
1709                 else if (oqctl->qc_type == GRPQUOTA)
1710                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1711                                                      oqctl->qc_id);
1712                 else
1713                         RETURN(-EINVAL);
1714
1715                 if (id == CFS_IDMAP_NOTFOUND) {
1716                         CDEBUG(D_QUOTA, "no mapping for id %u\n",
1717                                oqctl->qc_id);
1718                         RETURN(-EACCES);
1719                 }
1720         }
1721
1722         rc = req_capsule_server_pack(pill);
1723         if (rc)
1724                 RETURN(rc);
1725
1726         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
1727         LASSERT(repoqc != NULL);
1728
1729         switch (oqctl->qc_cmd) {
1730         case Q_QUOTAON:
1731                 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type, id);
1732                 break;
1733         case Q_QUOTAOFF:
1734                 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type, id);
1735                 break;
1736         case Q_SETINFO:
1737                 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
1738                                       &oqctl->qc_dqinfo);
1739                 break;
1740         case Q_GETINFO:
1741                 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
1742                                       &oqctl->qc_dqinfo);
1743                 break;
1744         case Q_SETQUOTA:
1745                 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
1746                                        &oqctl->qc_dqblk);
1747                 break;
1748         case Q_GETQUOTA:
1749                 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
1750                                        &oqctl->qc_dqblk);
1751                 break;
1752         case Q_GETOINFO:
1753                 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
1754                                        &oqctl->qc_dqinfo);
1755                 break;
1756         case Q_GETOQUOTA:
1757                 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
1758                                         &oqctl->qc_dqblk);
1759                 break;
1760         case LUSTRE_Q_INVALIDATE:
1761                 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
1762                 break;
1763         case LUSTRE_Q_FINVALIDATE:
1764                 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
1765                 break;
1766         default:
1767                 CERROR("unsupported mdt_quotactl command: %d\n",
1768                        oqctl->qc_cmd);
1769                 RETURN(-EFAULT);
1770         }
1771
1772         *repoqc = *oqctl;
1773         RETURN(rc);
1774 }
1775 #endif
1776
1777 /*
1778  * OBD PING and other handlers.
1779  */
1780 static int mdt_obd_ping(struct mdt_thread_info *info)
1781 {
1782         int rc;
1783         ENTRY;
1784
1785         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1786
1787         rc = target_handle_ping(mdt_info_req(info));
1788         if (rc < 0)
1789                 rc = err_serious(rc);
1790         RETURN(rc);
1791 }
1792
1793 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1794 {
1795         return err_serious(-EOPNOTSUPP);
1796 }
1797
1798 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1799 {
1800         return err_serious(-EOPNOTSUPP);
1801 }
1802
1803
1804 /*
1805  * DLM handlers.
1806  */
1807 static struct ldlm_callback_suite cbs = {
1808         .lcs_completion = ldlm_server_completion_ast,
1809         .lcs_blocking   = ldlm_server_blocking_ast,
1810         .lcs_glimpse    = NULL
1811 };
1812
1813 static int mdt_enqueue(struct mdt_thread_info *info)
1814 {
1815         struct ptlrpc_request *req;
1816         __u64 req_bits;
1817         int rc;
1818
1819         /*
1820          * info->mti_dlm_req already contains swapped and (if necessary)
1821          * converted dlm request.
1822          */
1823         LASSERT(info->mti_dlm_req != NULL);
1824
1825         req = mdt_info_req(info);
1826
1827         /*
1828          * Lock without inodebits makes no sense and will oops later in
1829          * ldlm. Let's check it now to see if we have wrong lock from client or
1830          * bits get corrupted somewhere in mdt_intent_policy().
1831          */
1832         req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1833         /* This is disabled because we need to support liblustre flock.
1834          * LASSERT(req_bits != 0);
1835          */
1836
1837         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
1838                                   req, info->mti_dlm_req, &cbs);
1839         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
1840         return rc ? err_serious(rc) : req->rq_status;
1841 }
1842
1843 static int mdt_convert(struct mdt_thread_info *info)
1844 {
1845         int rc;
1846         struct ptlrpc_request *req;
1847
1848         LASSERT(info->mti_dlm_req);
1849         req = mdt_info_req(info);
1850         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
1851         return rc ? err_serious(rc) : req->rq_status;
1852 }
1853
1854 static int mdt_bl_callback(struct mdt_thread_info *info)
1855 {
1856         CERROR("bl callbacks should not happen on MDS\n");
1857         LBUG();
1858         return err_serious(-EOPNOTSUPP);
1859 }
1860
1861 static int mdt_cp_callback(struct mdt_thread_info *info)
1862 {
1863         CERROR("cp callbacks should not happen on MDS\n");
1864         LBUG();
1865         return err_serious(-EOPNOTSUPP);
1866 }
1867
1868 /*
1869  * sec context handlers
1870  */
1871 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
1872 {
1873         int rc;
1874
1875         rc = mdt_handle_idmap(info);
1876
1877         if (unlikely(rc)) {
1878                 struct ptlrpc_request *req = mdt_info_req(info);
1879                 __u32                  opc;
1880
1881                 opc = lustre_msg_get_opc(req->rq_reqmsg);
1882                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
1883                         sptlrpc_svc_ctx_invalidate(req);
1884         }
1885
1886         OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
1887
1888         return rc;
1889 }
1890
1891 static struct mdt_object *mdt_obj(struct lu_object *o)
1892 {
1893         LASSERT(lu_device_is_mdt(o->lo_dev));
1894         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
1895 }
1896
1897 struct mdt_object *mdt_object_find(const struct lu_env *env,
1898                                    struct mdt_device *d,
1899                                    const struct lu_fid *f)
1900 {
1901         struct lu_object *o;
1902         struct mdt_object *m;
1903         ENTRY;
1904
1905         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
1906         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
1907         if (unlikely(IS_ERR(o)))
1908                 m = (struct mdt_object *)o;
1909         else
1910                 m = mdt_obj(o);
1911         RETURN(m);
1912 }
1913
1914 /**
1915  * Asyncronous commit for mdt device.
1916  *
1917  * Pass asynchonous commit call down the MDS stack.
1918  *
1919  * \param env environment
1920  * \param mdt the mdt device
1921  */
1922 static void mdt_device_commit_async(const struct lu_env *env,
1923                                     struct mdt_device *mdt)
1924 {
1925         struct dt_device *dt = mdt->mdt_bottom;
1926         int rc;
1927
1928         rc = dt->dd_ops->dt_commit_async(env, dt);
1929         if (unlikely(rc != 0))
1930                 CWARN("async commit start failed with rc = %d", rc);
1931 }
1932
1933 /**
1934  * Mark the lock as "synchonous".
1935  *
1936  * Mark the lock to deffer transaction commit to the unlock time.
1937  *
1938  * \param lock the lock to mark as "synchonous"
1939  *
1940  * \see mdt_is_lock_sync
1941  * \see mdt_save_lock
1942  */
1943 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
1944 {
1945         lock->l_ast_data = (void*)1;
1946 }
1947
1948 /**
1949  * Check whehter the lock "synchonous" or not.
1950  *
1951  * \param lock the lock to check
1952  * \retval 1 the lock is "synchonous"
1953  * \retval 0 the lock isn't "synchronous"
1954  *
1955  * \see mdt_set_lock_sync
1956  * \see mdt_save_lock
1957  */
1958 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
1959 {
1960         return lock->l_ast_data != NULL;
1961 }
1962
1963 /**
1964  * Blocking AST for mdt locks.
1965  *
1966  * Starts transaction commit if in case of COS lock conflict or
1967  * deffers such a commit to the mdt_save_lock.
1968  *
1969  * \param lock the lock which blocks a request or cancelling lock
1970  * \param desc unused
1971  * \param data unused
1972  * \param flag indicates whether this cancelling or blocking callback
1973  * \retval 0
1974  * \see ldlm_blocking_ast_nocheck
1975  */
1976 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1977                      void *data, int flag)
1978 {
1979         struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
1980         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
1981         int rc;
1982         ENTRY;
1983
1984         if (flag == LDLM_CB_CANCELING)
1985                 RETURN(0);
1986         lock_res_and_lock(lock);
1987         if (lock->l_blocking_ast != mdt_blocking_ast) {
1988                 unlock_res_and_lock(lock);
1989                 RETURN(0);
1990         }
1991         if (mdt_cos_is_enabled(mdt) &&
1992             lock->l_req_mode & (LCK_PW | LCK_EX) &&
1993             lock->l_blocking_lock != NULL &&
1994             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
1995                 mdt_set_lock_sync(lock);
1996         }
1997         rc = ldlm_blocking_ast_nocheck(lock);
1998
1999         /* There is no lock conflict if l_blocking_lock == NULL,
2000          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2001          * when the last reference to a local lock was released */
2002         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2003                 struct lu_env env;
2004
2005                 rc = lu_env_init(&env, LCT_MD_THREAD);
2006                 if (unlikely(rc != 0))
2007                         CWARN("lu_env initialization failed with rc = %d,"
2008                               "cannot start asynchronous commit\n", rc);
2009                 else
2010                         mdt_device_commit_async(&env, mdt);
2011                 lu_env_fini(&env);
2012         }
2013         RETURN(rc);
2014 }
2015
2016 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2017                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2018 {
2019         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2020         ldlm_policy_data_t *policy = &info->mti_policy;
2021         struct ldlm_res_id *res_id = &info->mti_res_id;
2022         int rc;
2023         ENTRY;
2024
2025         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2026         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2027         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2028         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2029
2030         if (mdt_object_exists(o) < 0) {
2031                 if (locality == MDT_CROSS_LOCK) {
2032                         /* cross-ref object fix */
2033                         ibits &= ~MDS_INODELOCK_UPDATE;
2034                         ibits |= MDS_INODELOCK_LOOKUP;
2035                 } else {
2036                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2037                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2038                 }
2039                 /* No PDO lock on remote object */
2040                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2041         }
2042
2043         if (lh->mlh_type == MDT_PDO_LOCK) {
2044                 /* check for exists after object is locked */
2045                 if (mdt_object_exists(o) == 0) {
2046                         /* Non-existent object shouldn't have PDO lock */
2047                         RETURN(-ESTALE);
2048                 } else {
2049                         /* Non-dir object shouldn't have PDO lock */
2050                         LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
2051                 }
2052         }
2053
2054         memset(policy, 0, sizeof(*policy));
2055         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2056
2057         /*
2058          * Take PDO lock on whole directory and build correct @res_id for lock
2059          * on part of directory.
2060          */
2061         if (lh->mlh_pdo_hash != 0) {
2062                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2063                 mdt_lock_pdo_mode(info, o, lh);
2064                 if (lh->mlh_pdo_mode != LCK_NL) {
2065                         /*
2066                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2067                          * is never going to be sent to client and we do not
2068                          * want it slowed down due to possible cancels.
2069                          */
2070                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2071                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2072                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2073                                           &info->mti_exp->exp_handle.h_cookie);
2074                         if (unlikely(rc))
2075                                 RETURN(rc);
2076                 }
2077
2078                 /*
2079                  * Finish res_id initializing by name hash marking part of
2080                  * directory which is taking modification.
2081                  */
2082                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2083         }
2084
2085         policy->l_inodebits.bits = ibits;
2086
2087         /*
2088          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2089          * going to be sent to client. If it is - mdt_intent_policy() path will
2090          * fix it up and turn FL_LOCAL flag off.
2091          */
2092         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2093                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2094                           &info->mti_exp->exp_handle.h_cookie);
2095         if (rc)
2096                 GOTO(out, rc);
2097
2098 out:
2099         if (rc)
2100                 mdt_object_unlock(info, o, lh, 1);
2101
2102
2103         RETURN(rc);
2104 }
2105
2106 /**
2107  * Save a lock within request object.
2108  *
2109  * Keep the lock referenced until whether client ACK or transaction
2110  * commit happens or release the lock immediately depending on input
2111  * parameters. If COS is ON, a write lock is converted to COS lock
2112  * before saving.
2113  *
2114  * \param info thead info object
2115  * \param h lock handle
2116  * \param mode lock mode
2117  * \param decref force immediate lock releasing
2118  */
2119 static
2120 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2121                    ldlm_mode_t mode, int decref)
2122 {
2123         ENTRY;
2124
2125         if (lustre_handle_is_used(h)) {
2126                 if (decref || !info->mti_has_trans ||
2127                     !(mode & (LCK_PW | LCK_EX))){
2128                         mdt_fid_unlock(h, mode);
2129                 } else {
2130                         struct mdt_device *mdt = info->mti_mdt;
2131                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2132                         struct ptlrpc_request *req = mdt_info_req(info);
2133                         int no_ack = 0;
2134
2135                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2136                                  h->cookie);
2137                         CDEBUG(D_HA, "request = %p reply state = %p"
2138                                " transno = "LPD64"\n",
2139                                req, req->rq_reply_state, req->rq_transno);
2140                         if (mdt_cos_is_enabled(mdt)) {
2141                                 no_ack = 1;
2142                                 ldlm_lock_downgrade(lock, LCK_COS);
2143                                 mode = LCK_COS;
2144                         }
2145                         ptlrpc_save_lock(req, h, mode, no_ack);
2146                         if (mdt_is_lock_sync(lock)) {
2147                                 CDEBUG(D_HA, "found sync-lock,"
2148                                        " async commit started\n");
2149                                 mdt_device_commit_async(info->mti_env,
2150                                                         mdt);
2151                         }
2152                         LDLM_LOCK_PUT(lock);
2153                 }
2154                 h->cookie = 0ull;
2155         }
2156
2157         EXIT;
2158 }
2159
2160 /**
2161  * Unlock mdt object.
2162  *
2163  * Immeditely release the regular lock and the PDO lock or save the
2164  * lock in reqeuest and keep them referenced until client ACK or
2165  * transaction commit.
2166  *
2167  * \param info thread info object
2168  * \param o mdt object
2169  * \param h mdt lock handle referencing regular and PDO locks
2170  * \param decref force immediate lock releasing
2171  */
2172 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2173                        struct mdt_lock_handle *lh, int decref)
2174 {
2175         ENTRY;
2176
2177         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2178         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2179
2180         EXIT;
2181 }
2182
2183 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2184                                         const struct lu_fid *f,
2185                                         struct mdt_lock_handle *lh,
2186                                         __u64 ibits)
2187 {
2188         struct mdt_object *o;
2189
2190         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2191         if (!IS_ERR(o)) {
2192                 int rc;
2193
2194                 rc = mdt_object_lock(info, o, lh, ibits,
2195                                      MDT_LOCAL_LOCK);
2196                 if (rc != 0) {
2197                         mdt_object_put(info->mti_env, o);
2198                         o = ERR_PTR(rc);
2199                 }
2200         }
2201         return o;
2202 }
2203
2204 void mdt_object_unlock_put(struct mdt_thread_info * info,
2205                            struct mdt_object * o,
2206                            struct mdt_lock_handle *lh,
2207                            int decref)
2208 {
2209         mdt_object_unlock(info, o, lh, decref);
2210         mdt_object_put(info->mti_env, o);
2211 }
2212
2213 static struct mdt_handler *mdt_handler_find(__u32 opc,
2214                                             struct mdt_opc_slice *supported)
2215 {
2216         struct mdt_opc_slice *s;
2217         struct mdt_handler   *h;
2218
2219         h = NULL;
2220         for (s = supported; s->mos_hs != NULL; s++) {
2221                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2222                         h = s->mos_hs + (opc - s->mos_opc_start);
2223                         if (likely(h->mh_opc != 0))
2224                                 LASSERT(h->mh_opc == opc);
2225                         else
2226                                 h = NULL; /* unsupported opc */
2227                         break;
2228                 }
2229         }
2230         return h;
2231 }
2232
2233 static int mdt_lock_resname_compat(struct mdt_device *m,
2234                                    struct ldlm_request *req)
2235 {
2236         /* XXX something... later. */
2237         return 0;
2238 }
2239
2240 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2241 {
2242         /* XXX something... later. */
2243         return 0;
2244 }
2245
2246 /*
2247  * Generic code handling requests that have struct mdt_body passed in:
2248  *
2249  *  - extract mdt_body from request and save it in @info, if present;
2250  *
2251  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2252  *  @info;
2253  *
2254  *  - if HABEO_CORPUS flag is set for this request type check whether object
2255  *  actually exists on storage (lu_object_exists()).
2256  *
2257  */
2258 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2259 {
2260         const struct mdt_body    *body;
2261         struct mdt_object        *obj;
2262         const struct lu_env      *env;
2263         struct req_capsule       *pill;
2264         int                       rc;
2265         ENTRY;
2266
2267         env = info->mti_env;
2268         pill = info->mti_pill;
2269
2270         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2271         if (body == NULL)
2272                 RETURN(-EFAULT);
2273
2274         if (!(body->valid & OBD_MD_FLID))
2275                 RETURN(0);
2276
2277         if (!fid_is_sane(&body->fid1)) {
2278                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2279                 RETURN(-EINVAL);
2280         }
2281
2282         /*
2283          * Do not get size or any capa fields before we check that request
2284          * contains capa actually. There are some requests which do not, for
2285          * instance MDS_IS_SUBDIR.
2286          */
2287         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2288             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2289                 mdt_set_capainfo(info, 0, &body->fid1,
2290                                  req_capsule_client_get(pill, &RMF_CAPA1));
2291
2292         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2293         if (!IS_ERR(obj)) {
2294                 if ((flags & HABEO_CORPUS) &&
2295                     !mdt_object_exists(obj)) {
2296                         mdt_object_put(env, obj);
2297                         /* for capability renew ENOENT will be handled in
2298                          * mdt_renew_capa */
2299                         if (body->valid & OBD_MD_FLOSSCAPA)
2300                                 rc = 0;
2301                         else
2302                                 rc = -ENOENT;
2303                 } else {
2304                         info->mti_object = obj;
2305                         rc = 0;
2306                 }
2307         } else
2308                 rc = PTR_ERR(obj);
2309
2310         RETURN(rc);
2311 }
2312
2313 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2314 {
2315         struct req_capsule *pill = info->mti_pill;
2316         int rc;
2317         ENTRY;
2318
2319         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2320                 rc = mdt_body_unpack(info, flags);
2321         else
2322                 rc = 0;
2323
2324         if (rc == 0 && (flags & HABEO_REFERO)) {
2325                 struct mdt_device *mdt = info->mti_mdt;
2326
2327                 /* Pack reply. */
2328                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2329                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2330                                              mdt->mdt_max_mdsize);
2331                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2332                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2333                                              mdt->mdt_max_cookiesize);
2334
2335                 rc = req_capsule_server_pack(pill);
2336         }
2337         RETURN(rc);
2338 }
2339
2340 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2341 {
2342         struct md_device *next = m->mdt_child;
2343
2344         return next->md_ops->mdo_init_capa_ctxt(env, next,
2345                                                 m->mdt_opts.mo_mds_capa,
2346                                                 m->mdt_capa_timeout,
2347                                                 m->mdt_capa_alg,
2348                                                 m->mdt_capa_keys);
2349 }
2350
2351 /*
2352  * Invoke handler for this request opc. Also do necessary preprocessing
2353  * (according to handler ->mh_flags), and post-processing (setting of
2354  * ->last_{xid,committed}).
2355  */
2356 static int mdt_req_handle(struct mdt_thread_info *info,
2357                           struct mdt_handler *h, struct ptlrpc_request *req)
2358 {
2359         int   rc, serious = 0;
2360         __u32 flags;
2361
2362         ENTRY;
2363
2364         LASSERT(h->mh_act != NULL);
2365         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2366         LASSERT(current->journal_info == NULL);
2367
2368         /*
2369          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2370          * to put same checks into handlers like mdt_close(), mdt_reint(),
2371          * etc., without talking to mdt authors first. Checking same thing
2372          * there again is useless and returning 0 error wihtout packing reply
2373          * is buggy! Handlers either pack reply or return error.
2374          *
2375          * We return 0 here and do not send any reply in order to emulate
2376          * network failure. Do not send any reply in case any of NET related
2377          * fail_id has occured.
2378          */
2379         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2380                 RETURN(0);
2381
2382         rc = 0;
2383         flags = h->mh_flags;
2384         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2385
2386         if (h->mh_fmt != NULL) {
2387                 req_capsule_set(info->mti_pill, h->mh_fmt);
2388                 rc = mdt_unpack_req_pack_rep(info, flags);
2389         }
2390
2391         if (rc == 0 && flags & MUTABOR &&
2392             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2393                 /* should it be rq_status? */
2394                 rc = -EROFS;
2395
2396         if (rc == 0 && flags & HABEO_CLAVIS) {
2397                 struct ldlm_request *dlm_req;
2398
2399                 LASSERT(h->mh_fmt != NULL);
2400
2401                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2402                 if (dlm_req != NULL) {
2403                         if (info->mti_mdt->mdt_opts.mo_compat_resname)
2404                                 rc = mdt_lock_resname_compat(info->mti_mdt,
2405                                                              dlm_req);
2406                         info->mti_dlm_req = dlm_req;
2407                 } else {
2408                         rc = -EFAULT;
2409                 }
2410         }
2411
2412         /* capability setting changed via /proc, needs reinitialize ctxt */
2413         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2414                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2415                 info->mti_mdt->mdt_capa_conf = 0;
2416         }
2417
2418         if (likely(rc == 0)) {
2419                 /*
2420                  * Process request, there can be two types of rc:
2421                  * 1) errors with msg unpack/pack, other failures outside the
2422                  * operation itself. This is counted as serious errors;
2423                  * 2) errors during fs operation, should be placed in rq_status
2424                  * only
2425                  */
2426                 rc = h->mh_act(info);
2427                 if (rc == 0 &&
2428                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2429                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2430                                   "pack reply and returned 0 error\n",
2431                                   h->mh_name);
2432                         LBUG();
2433                 }
2434                 serious = is_serious(rc);
2435                 rc = clear_serious(rc);
2436         } else
2437                 serious = 1;
2438
2439         req->rq_status = rc;
2440
2441         /*
2442          * ELDLM_* codes which > 0 should be in rq_status only as well as
2443          * all non-serious errors.
2444          */
2445         if (rc > 0 || !serious)
2446                 rc = 0;
2447
2448         LASSERT(current->journal_info == NULL);
2449
2450         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2451             info->mti_mdt->mdt_opts.mo_compat_resname) {
2452                 struct ldlm_reply *dlmrep;
2453
2454                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2455                 if (dlmrep != NULL)
2456                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2457         }
2458
2459         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2460         if (likely(rc == 0 && h->mh_opc != MDS_DISCONNECT))
2461                 target_committed_to_req(req);
2462
2463         if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
2464                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2465                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2466                 LBUG();
2467         }
2468
2469         target_send_reply(req, rc, info->mti_fail_id);
2470         RETURN(0);
2471 }
2472
2473 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2474 {
2475         lh->mlh_type = MDT_NUL_LOCK;
2476         lh->mlh_reg_lh.cookie = 0ull;
2477         lh->mlh_reg_mode = LCK_MINMODE;
2478         lh->mlh_pdo_lh.cookie = 0ull;
2479         lh->mlh_pdo_mode = LCK_MINMODE;
2480 }
2481
2482 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2483 {
2484         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2485         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2486 }
2487
2488 /*
2489  * Initialize fields of struct mdt_thread_info. Other fields are left in
2490  * uninitialized state, because it's too expensive to zero out whole
2491  * mdt_thread_info (> 1K) on each request arrival.
2492  */
2493 static void mdt_thread_info_init(struct ptlrpc_request *req,
2494                                  struct mdt_thread_info *info)
2495 {
2496         int i;
2497         struct md_capainfo *ci;
2498
2499         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2500         info->mti_pill = &req->rq_pill;
2501
2502         /* lock handle */
2503         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2504                 mdt_lock_handle_init(&info->mti_lh[i]);
2505
2506         /* mdt device: it can be NULL while CONNECT */
2507         if (req->rq_export) {
2508                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2509                 info->mti_exp = req->rq_export;
2510         } else
2511                 info->mti_mdt = NULL;
2512         info->mti_env = req->rq_svc_thread->t_env;
2513         ci = md_capainfo(info->mti_env);
2514         memset(ci, 0, sizeof *ci);
2515         if (req->rq_export) {
2516                 if (exp_connect_rmtclient(req->rq_export))
2517                         ci->mc_auth = LC_ID_CONVERT;
2518                 else if (req->rq_export->exp_connect_flags &
2519                          OBD_CONNECT_MDS_CAPA)
2520                         ci->mc_auth = LC_ID_PLAIN;
2521                 else
2522                         ci->mc_auth = LC_ID_NONE;
2523         }
2524
2525         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2526         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2527
2528         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2529         info->mti_body = NULL;
2530         info->mti_object = NULL;
2531         info->mti_dlm_req = NULL;
2532         info->mti_has_trans = 0;
2533         info->mti_no_need_trans = 0;
2534         info->mti_cross_ref = 0;
2535         info->mti_opdata = 0;
2536
2537         /* To not check for split by default. */
2538         info->mti_spec.sp_ck_split = 0;
2539 }
2540
2541 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2542 {
2543         int i;
2544
2545         req_capsule_fini(info->mti_pill);
2546         if (info->mti_object != NULL) {
2547                 mdt_object_put(info->mti_env, info->mti_object);
2548                 info->mti_object = NULL;
2549         }
2550         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2551                 mdt_lock_handle_fini(&info->mti_lh[i]);
2552         info->mti_env = NULL;
2553 }
2554
2555 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
2556                                        struct obd_device *obd, int *process)
2557 {
2558         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2559         case MDS_CONNECT: /* This will never get here, but for completeness. */
2560         case OST_CONNECT: /* This will never get here, but for completeness. */
2561         case MDS_DISCONNECT:
2562         case OST_DISCONNECT:
2563                *process = 1;
2564                RETURN(0);
2565
2566         case MDS_CLOSE:
2567         case MDS_DONE_WRITING:
2568         case MDS_SYNC: /* used in unmounting */
2569         case OBD_PING:
2570         case MDS_REINT:
2571         case SEQ_QUERY:
2572         case FLD_QUERY:
2573         case LDLM_ENQUEUE:
2574                 *process = target_queue_recovery_request(req, obd);
2575                 RETURN(0);
2576
2577         default:
2578                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2579                 *process = -EAGAIN;
2580                 RETURN(0);
2581         }
2582 }
2583
2584 /*
2585  * Handle recovery. Return:
2586  *        +1: continue request processing;
2587  *       -ve: abort immediately with the given error code;
2588  *         0: send reply with error code in req->rq_status;
2589  */
2590 static int mdt_recovery(struct mdt_thread_info *info)
2591 {
2592         struct ptlrpc_request *req = mdt_info_req(info);
2593         int recovering;
2594         struct obd_device *obd;
2595
2596         ENTRY;
2597
2598         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2599         case MDS_CONNECT:
2600         case SEC_CTX_INIT:
2601         case SEC_CTX_INIT_CONT:
2602         case SEC_CTX_FINI:
2603                 {
2604 #if 0
2605                         int rc;
2606
2607                         rc = mdt_handle_idmap(info);
2608                         if (rc)
2609                                 RETURN(rc);
2610                         else
2611 #endif
2612                                 RETURN(+1);
2613                 }
2614         }
2615
2616         if (unlikely(req->rq_export == NULL)) {
2617                 CERROR("operation %d on unconnected MDS from %s\n",
2618                        lustre_msg_get_opc(req->rq_reqmsg),
2619                        libcfs_id2str(req->rq_peer));
2620                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2621                  * mds_A will get -ENOTCONN(especially for ping req),
2622                  * which will cause that mds_A deactive timeout, then when
2623                  * mds_A cleanup, the cleanup process will be suspended since
2624                  * deactive timeout is not zero.
2625                  */
2626                 req->rq_status = -ENOTCONN;
2627                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2628                 RETURN(0);
2629         }
2630
2631         /* sanity check: if the xid matches, the request must be marked as a
2632          * resent or replayed */
2633         if (req_xid_is_last(req)) {
2634                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2635                       (MSG_RESENT | MSG_REPLAY))) {
2636                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2637                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2638                                   lustre_msg_get_flags(req->rq_reqmsg));
2639                         LBUG();
2640                         req->rq_status = -ENOTCONN;
2641                         RETURN(-ENOTCONN);
2642                 }
2643         }
2644
2645         /* else: note the opposite is not always true; a RESENT req after a
2646          * failover will usually not match the last_xid, since it was likely
2647          * never committed. A REPLAYed request will almost never match the
2648          * last xid, however it could for a committed, but still retained,
2649          * open. */
2650
2651         obd = req->rq_export->exp_obd;
2652
2653         /* Check for aborted recovery... */
2654         spin_lock_bh(&obd->obd_processing_task_lock);
2655         recovering = obd->obd_recovering;
2656         spin_unlock_bh(&obd->obd_processing_task_lock);
2657         if (unlikely(recovering)) {
2658                 int rc;
2659                 int should_process;
2660                 DEBUG_REQ(D_INFO, req, "Got new replay");
2661                 rc = mdt_filter_recovery_request(req, obd, &should_process);
2662                 if (rc != 0 || !should_process)
2663                         RETURN(rc);
2664                 else if (should_process < 0) {
2665                         req->rq_status = should_process;
2666                         rc = ptlrpc_error(req);
2667                         RETURN(rc);
2668                 }
2669         }
2670         RETURN(+1);
2671 }
2672
2673 static int mdt_msg_check_version(struct lustre_msg *msg)
2674 {
2675         int rc;
2676
2677         switch (lustre_msg_get_opc(msg)) {
2678         case MDS_CONNECT:
2679         case MDS_DISCONNECT:
2680         case OBD_PING:
2681         case SEC_CTX_INIT:
2682         case SEC_CTX_INIT_CONT:
2683         case SEC_CTX_FINI:
2684                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2685                 if (rc)
2686                         CERROR("bad opc %u version %08x, expecting %08x\n",
2687                                lustre_msg_get_opc(msg),
2688                                lustre_msg_get_version(msg),
2689                                LUSTRE_OBD_VERSION);
2690                 break;
2691         case MDS_GETSTATUS:
2692         case MDS_GETATTR:
2693         case MDS_GETATTR_NAME:
2694         case MDS_STATFS:
2695         case MDS_READPAGE:
2696         case MDS_WRITEPAGE:
2697         case MDS_IS_SUBDIR:
2698         case MDS_REINT:
2699         case MDS_CLOSE:
2700         case MDS_DONE_WRITING:
2701         case MDS_PIN:
2702         case MDS_SYNC:
2703         case MDS_GETXATTR:
2704         case MDS_SETXATTR:
2705         case MDS_SET_INFO:
2706         case MDS_QUOTACHECK:
2707         case MDS_QUOTACTL:
2708         case QUOTA_DQACQ:
2709         case QUOTA_DQREL:
2710         case SEQ_QUERY:
2711         case FLD_QUERY:
2712                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2713                 if (rc)
2714                         CERROR("bad opc %u version %08x, expecting %08x\n",
2715                                lustre_msg_get_opc(msg),
2716                                lustre_msg_get_version(msg),
2717                                LUSTRE_MDS_VERSION);
2718                 break;
2719         case LDLM_ENQUEUE:
2720         case LDLM_CONVERT:
2721         case LDLM_BL_CALLBACK:
2722         case LDLM_CP_CALLBACK:
2723                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2724                 if (rc)
2725                         CERROR("bad opc %u version %08x, expecting %08x\n",
2726                                lustre_msg_get_opc(msg),
2727                                lustre_msg_get_version(msg),
2728                                LUSTRE_DLM_VERSION);
2729                 break;
2730         case OBD_LOG_CANCEL:
2731         case LLOG_ORIGIN_HANDLE_CREATE:
2732         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2733         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2734         case LLOG_ORIGIN_HANDLE_CLOSE:
2735         case LLOG_ORIGIN_HANDLE_DESTROY:
2736         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2737         case LLOG_CATINFO:
2738                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2739                 if (rc)
2740                         CERROR("bad opc %u version %08x, expecting %08x\n",
2741                                lustre_msg_get_opc(msg),
2742                                lustre_msg_get_version(msg),
2743                                LUSTRE_LOG_VERSION);
2744                 break;
2745         default:
2746                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
2747                 rc = -ENOTSUPP;
2748         }
2749         return rc;
2750 }
2751
2752 static int mdt_handle0(struct ptlrpc_request *req,
2753                        struct mdt_thread_info *info,
2754                        struct mdt_opc_slice *supported)
2755 {
2756         struct mdt_handler *h;
2757         struct lustre_msg  *msg;
2758         int                 rc;
2759
2760         ENTRY;
2761
2762         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2763                 RETURN(0);
2764
2765         LASSERT(current->journal_info == NULL);
2766
2767         msg = req->rq_reqmsg;
2768         rc = mdt_msg_check_version(msg);
2769         if (likely(rc == 0)) {
2770                 rc = mdt_recovery(info);
2771                 if (likely(rc == +1)) {
2772                         h = mdt_handler_find(lustre_msg_get_opc(msg),
2773                                              supported);
2774                         if (likely(h != NULL)) {
2775                                 rc = mdt_req_handle(info, h, req);
2776                         } else {
2777                                 CERROR("The unsupported opc: 0x%x\n", lustre_msg_get_opc(msg) );
2778                                 req->rq_status = -ENOTSUPP;
2779                                 rc = ptlrpc_error(req);
2780                                 RETURN(rc);
2781                         }
2782                 }
2783         } else
2784                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2785         RETURN(rc);
2786 }
2787
2788 /*
2789  * MDT handler function called by ptlrpc service thread when request comes.
2790  *
2791  * XXX common "target" functionality should be factored into separate module
2792  * shared by mdt, ost and stand-alone services like fld.
2793  */
2794 static int mdt_handle_common(struct ptlrpc_request *req,
2795                              struct mdt_opc_slice *supported)
2796 {
2797         struct lu_env          *env;
2798         struct mdt_thread_info *info;
2799         int                     rc;
2800         ENTRY;
2801
2802         env = req->rq_svc_thread->t_env;
2803         LASSERT(env != NULL);
2804         LASSERT(env->le_ses != NULL);
2805         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2806         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2807         LASSERT(info != NULL);
2808
2809         mdt_thread_info_init(req, info);
2810
2811         rc = mdt_handle0(req, info, supported);
2812
2813         mdt_thread_info_fini(info);
2814         RETURN(rc);
2815 }
2816
2817 /*
2818  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
2819  * as well.
2820  */
2821 int mdt_recovery_handle(struct ptlrpc_request *req)
2822 {
2823         int rc;
2824         ENTRY;
2825
2826         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2827         case FLD_QUERY:
2828                 rc = mdt_handle_common(req, mdt_fld_handlers);
2829                 break;
2830         case SEQ_QUERY:
2831                 rc = mdt_handle_common(req, mdt_seq_handlers);
2832                 break;
2833         default:
2834                 rc = mdt_handle_common(req, mdt_regular_handlers);
2835                 break;
2836         }
2837
2838         RETURN(rc);
2839 }
2840
2841 static int mdt_regular_handle(struct ptlrpc_request *req)
2842 {
2843         return mdt_handle_common(req, mdt_regular_handlers);
2844 }
2845
2846 static int mdt_readpage_handle(struct ptlrpc_request *req)
2847 {
2848         return mdt_handle_common(req, mdt_readpage_handlers);
2849 }
2850
2851 static int mdt_xmds_handle(struct ptlrpc_request *req)
2852 {
2853         return mdt_handle_common(req, mdt_xmds_handlers);
2854 }
2855
2856 static int mdt_mdsc_handle(struct ptlrpc_request *req)
2857 {
2858         return mdt_handle_common(req, mdt_seq_handlers);
2859 }
2860
2861 static int mdt_mdss_handle(struct ptlrpc_request *req)
2862 {
2863         return mdt_handle_common(req, mdt_seq_handlers);
2864 }
2865
2866 static int mdt_dtss_handle(struct ptlrpc_request *req)
2867 {
2868         return mdt_handle_common(req, mdt_seq_handlers);
2869 }
2870
2871 static int mdt_fld_handle(struct ptlrpc_request *req)
2872 {
2873         return mdt_handle_common(req, mdt_fld_handlers);
2874 }
2875
2876 enum mdt_it_code {
2877         MDT_IT_OPEN,
2878         MDT_IT_OCREAT,
2879         MDT_IT_CREATE,
2880         MDT_IT_GETATTR,
2881         MDT_IT_READDIR,
2882         MDT_IT_LOOKUP,
2883         MDT_IT_UNLINK,
2884         MDT_IT_TRUNC,
2885         MDT_IT_GETXATTR,
2886         MDT_IT_NR
2887 };
2888
2889 static int mdt_intent_getattr(enum mdt_it_code opcode,
2890                               struct mdt_thread_info *info,
2891                               struct ldlm_lock **,
2892                               int);
2893 static int mdt_intent_reint(enum mdt_it_code opcode,
2894                             struct mdt_thread_info *info,
2895                             struct ldlm_lock **,
2896                             int);
2897
2898 static struct mdt_it_flavor {
2899         const struct req_format *it_fmt;
2900         __u32                    it_flags;
2901         int                    (*it_act)(enum mdt_it_code ,
2902                                          struct mdt_thread_info *,
2903                                          struct ldlm_lock **,
2904                                          int);
2905         long                     it_reint;
2906 } mdt_it_flavor[] = {
2907         [MDT_IT_OPEN]     = {
2908                 .it_fmt   = &RQF_LDLM_INTENT,
2909                 /*.it_flags = HABEO_REFERO,*/
2910                 .it_flags = 0,
2911                 .it_act   = mdt_intent_reint,
2912                 .it_reint = REINT_OPEN
2913         },
2914         [MDT_IT_OCREAT]   = {
2915                 .it_fmt   = &RQF_LDLM_INTENT,
2916                 .it_flags = MUTABOR,
2917                 .it_act   = mdt_intent_reint,
2918                 .it_reint = REINT_OPEN
2919         },
2920         [MDT_IT_CREATE]   = {
2921                 .it_fmt   = &RQF_LDLM_INTENT,
2922                 .it_flags = MUTABOR,
2923                 .it_act   = mdt_intent_reint,
2924                 .it_reint = REINT_CREATE
2925         },
2926         [MDT_IT_GETATTR]  = {
2927                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
2928                 .it_flags = HABEO_REFERO,
2929                 .it_act   = mdt_intent_getattr
2930         },
2931         [MDT_IT_READDIR]  = {
2932                 .it_fmt   = NULL,
2933                 .it_flags = 0,
2934                 .it_act   = NULL
2935         },
2936         [MDT_IT_LOOKUP]   = {
2937                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
2938                 .it_flags = HABEO_REFERO,
2939                 .it_act   = mdt_intent_getattr
2940         },
2941         [MDT_IT_UNLINK]   = {
2942                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
2943                 .it_flags = MUTABOR,
2944                 .it_act   = NULL,
2945                 .it_reint = REINT_UNLINK
2946         },
2947         [MDT_IT_TRUNC]    = {
2948                 .it_fmt   = NULL,
2949                 .it_flags = MUTABOR,
2950                 .it_act   = NULL
2951         },
2952         [MDT_IT_GETXATTR] = {
2953                 .it_fmt   = NULL,
2954                 .it_flags = 0,
2955                 .it_act   = NULL
2956         }
2957 };
2958
2959 int mdt_intent_lock_replace(struct mdt_thread_info *info,
2960                             struct ldlm_lock **lockp,
2961                             struct ldlm_lock *new_lock,
2962                             struct mdt_lock_handle *lh,
2963                             int flags)
2964 {
2965         struct ptlrpc_request  *req = mdt_info_req(info);
2966         struct ldlm_lock       *lock = *lockp;
2967
2968         /*
2969          * Get new lock only for cases when possible resent did not find any
2970          * lock.
2971          */
2972         if (new_lock == NULL)
2973                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
2974
2975         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
2976                 lh->mlh_reg_lh.cookie = 0;
2977                 RETURN(0);
2978         }
2979
2980         LASSERTF(new_lock != NULL,
2981                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
2982
2983         /*
2984          * If we've already given this lock to a client once, then we should
2985          * have no readers or writers.  Otherwise, we should have one reader
2986          * _or_ writer ref (which will be zeroed below) before returning the
2987          * lock to a client.
2988          */
2989         if (new_lock->l_export == req->rq_export) {
2990                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2991         } else {
2992                 LASSERT(new_lock->l_export == NULL);
2993                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2994         }
2995
2996         *lockp = new_lock;
2997
2998         if (new_lock->l_export == req->rq_export) {
2999                 /*
3000                  * Already gave this to the client, which means that we
3001                  * reconstructed a reply.
3002                  */
3003                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3004                         MSG_RESENT);
3005                 lh->mlh_reg_lh.cookie = 0;
3006                 RETURN(ELDLM_LOCK_REPLACED);
3007         }
3008
3009         /*
3010          * Fixup the lock to be given to the client.
3011          */
3012         lock_res_and_lock(new_lock);
3013         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3014          * possible blocking AST. */
3015         while (new_lock->l_readers > 0) {
3016                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3017                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3018                 new_lock->l_readers--;
3019         }
3020         while (new_lock->l_writers > 0) {
3021                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3022                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3023                 new_lock->l_writers--;
3024         }
3025
3026         new_lock->l_export = class_export_get(req->rq_export);
3027         new_lock->l_blocking_ast = lock->l_blocking_ast;
3028         new_lock->l_completion_ast = lock->l_completion_ast;
3029         new_lock->l_remote_handle = lock->l_remote_handle;
3030         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3031
3032         unlock_res_and_lock(new_lock);
3033
3034         lustre_hash_add(new_lock->l_export->exp_lock_hash,
3035                         &new_lock->l_remote_handle,
3036                         &new_lock->l_exp_hash);
3037
3038         LDLM_LOCK_RELEASE(new_lock);
3039         lh->mlh_reg_lh.cookie = 0;
3040
3041         RETURN(ELDLM_LOCK_REPLACED);
3042 }
3043
3044 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3045                                     struct ldlm_lock *new_lock,
3046                                     struct ldlm_lock **old_lock,
3047                                     struct mdt_lock_handle *lh)
3048 {
3049         struct ptlrpc_request  *req = mdt_info_req(info);
3050         struct obd_export      *exp = req->rq_export;
3051         struct lustre_handle    remote_hdl;
3052         struct ldlm_request    *dlmreq;
3053         struct ldlm_lock       *lock;
3054
3055         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3056                 return;
3057
3058         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3059         remote_hdl = dlmreq->lock_handle[0];
3060
3061         lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3062         if (lock) {
3063                 if (lock != new_lock) {
3064                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3065                         lh->mlh_reg_mode = lock->l_granted_mode;
3066
3067                         LDLM_DEBUG(lock, "Restoring lock cookie");
3068                         DEBUG_REQ(D_DLMTRACE, req,
3069                                   "restoring lock cookie "LPX64,
3070                                   lh->mlh_reg_lh.cookie);
3071                         if (old_lock)
3072                                 *old_lock = LDLM_LOCK_GET(lock);
3073                         lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
3074                         return;
3075                 }
3076
3077                 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
3078         }
3079
3080         /*
3081          * If the xid matches, then we know this is a resent request, and allow
3082          * it. (It's probably an OPEN, for which we don't send a lock.
3083          */
3084         if (req_xid_is_last(req))
3085                 return;
3086
3087         /*
3088          * This remote handle isn't enqueued, so we never received or processed
3089          * this request.  Clear MSG_RESENT, because it can be handled like any
3090          * normal request now.
3091          */
3092         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3093
3094         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3095                   remote_hdl.cookie);
3096 }
3097
3098 static int mdt_intent_getattr(enum mdt_it_code opcode,
3099                               struct mdt_thread_info *info,
3100                               struct ldlm_lock **lockp,
3101                               int flags)
3102 {
3103         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3104         struct ldlm_lock       *new_lock = NULL;
3105         __u64                   child_bits;
3106         struct ldlm_reply      *ldlm_rep;
3107         struct ptlrpc_request  *req;
3108         struct mdt_body        *reqbody;
3109         struct mdt_body        *repbody;
3110         int                     rc;
3111         ENTRY;
3112
3113         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
3114         LASSERT(reqbody);
3115
3116         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
3117         LASSERT(repbody);
3118
3119         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
3120         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
3121         repbody->eadatasize = 0;
3122         repbody->aclsize = 0;
3123
3124         switch (opcode) {
3125         case MDT_IT_LOOKUP:
3126                 child_bits = MDS_INODELOCK_LOOKUP;
3127                 break;
3128         case MDT_IT_GETATTR:
3129                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
3130                 break;
3131         default:
3132                 CERROR("Unhandled till now");
3133                 GOTO(out_shrink, rc = -EINVAL);
3134         }
3135
3136         rc = mdt_init_ucred(info, reqbody);
3137         if (rc)
3138                 GOTO(out_shrink, rc);
3139
3140         req = info->mti_pill->rc_req;
3141         ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3142         mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD);
3143
3144         /* Get lock from request for possible resent case. */
3145         mdt_intent_fixup_resent(info, *lockp, &new_lock, lhc);
3146
3147         ldlm_rep->lock_policy_res2 =
3148                 mdt_getattr_name_lock(info, lhc, child_bits, ldlm_rep);
3149
3150         if (mdt_get_disposition(ldlm_rep, DISP_LOOKUP_NEG))
3151                 ldlm_rep->lock_policy_res2 = 0;
3152         if (!mdt_get_disposition(ldlm_rep, DISP_LOOKUP_POS) ||
3153             ldlm_rep->lock_policy_res2) {
3154                 lhc->mlh_reg_lh.cookie = 0ull;
3155                 GOTO(out_ucred, rc = ELDLM_LOCK_ABORTED);
3156         }
3157
3158         rc = mdt_intent_lock_replace(info, lockp, new_lock, lhc, flags);
3159         EXIT;
3160 out_ucred:
3161         mdt_exit_ucred(info);
3162 out_shrink:
3163         mdt_shrink_reply(info);
3164         return rc;
3165 }
3166
3167 static int mdt_intent_reint(enum mdt_it_code opcode,
3168                             struct mdt_thread_info *info,
3169                             struct ldlm_lock **lockp,
3170                             int flags)
3171 {
3172         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3173         struct ldlm_reply      *rep = NULL;
3174         long                    opc;
3175         int                     rc;
3176
3177         static const struct req_format *intent_fmts[REINT_MAX] = {
3178                 [REINT_CREATE]  = &RQF_LDLM_INTENT_CREATE,
3179                 [REINT_OPEN]    = &RQF_LDLM_INTENT_OPEN
3180         };
3181
3182         ENTRY;
3183
3184         opc = mdt_reint_opcode(info, intent_fmts);
3185         if (opc < 0)
3186                 RETURN(opc);
3187
3188         if (mdt_it_flavor[opcode].it_reint != opc) {
3189                 CERROR("Reint code %ld doesn't match intent: %d\n",
3190                        opc, opcode);
3191                 RETURN(err_serious(-EPROTO));
3192         }
3193
3194         /* Get lock from request for possible resent case. */
3195         mdt_intent_fixup_resent(info, *lockp, NULL, lhc);
3196
3197         rc = mdt_reint_internal(info, lhc, opc);
3198
3199         /* Check whether the reply has been packed successfully. */
3200         if (mdt_info_req(info)->rq_repmsg != NULL)
3201                 rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3202         if (rep == NULL)
3203                 RETURN(err_serious(-EFAULT));
3204
3205         /* MDC expects this in any case */
3206         if (rc != 0)
3207                 mdt_set_disposition(info, rep, DISP_LOOKUP_EXECD);
3208
3209         /* Cross-ref case, the lock should be returned to the client */
3210         if (rc == -EREMOTE) {
3211                 LASSERT(lustre_handle_is_used(&lhc->mlh_reg_lh));
3212                 rep->lock_policy_res2 = 0;
3213                 rc = mdt_intent_lock_replace(info, lockp, NULL, lhc, flags);
3214                 RETURN(rc);
3215         }
3216         rep->lock_policy_res2 = clear_serious(rc);
3217
3218         lhc->mlh_reg_lh.cookie = 0ull;
3219         if (rc == -ENOTCONN || rc == -ENODEV) {
3220                 /*
3221                  * If it is the disconnect error (ENODEV & ENOCONN), the error
3222                  * will be returned by rq_status, and client at ptlrpc layer
3223                  * will detect this, then disconnect, reconnect the import
3224                  * immediately, instead of impacting the following the rpc.
3225                  */
3226                 RETURN(rc);
3227         } else {
3228                 /*
3229                  * For other cases, the error will be returned by intent.
3230                  * and client will retrieve the result from intent.
3231                  */
3232                  /*
3233                   * FIXME: when open lock is finished, that should be
3234                   * checked here.
3235                   */
3236                 RETURN(ELDLM_LOCK_ABORTED);
3237         }
3238 }
3239
3240 static int mdt_intent_code(long itcode)
3241 {
3242         int rc;
3243
3244         switch(itcode) {
3245         case IT_OPEN:
3246                 rc = MDT_IT_OPEN;
3247                 break;
3248         case IT_OPEN|IT_CREAT:
3249                 rc = MDT_IT_OCREAT;
3250                 break;
3251         case IT_CREAT:
3252                 rc = MDT_IT_CREATE;
3253                 break;
3254         case IT_READDIR:
3255                 rc = MDT_IT_READDIR;
3256                 break;
3257         case IT_GETATTR:
3258                 rc = MDT_IT_GETATTR;
3259                 break;
3260         case IT_LOOKUP:
3261                 rc = MDT_IT_LOOKUP;
3262                 break;
3263         case IT_UNLINK:
3264                 rc = MDT_IT_UNLINK;
3265                 break;
3266         case IT_TRUNC:
3267                 rc = MDT_IT_TRUNC;
3268                 break;
3269         case IT_GETXATTR:
3270                 rc = MDT_IT_GETXATTR;
3271                 break;
3272         default:
3273                 CERROR("Unknown intent opcode: %ld\n", itcode);
3274                 rc = -EINVAL;
3275                 break;
3276         }
3277         return rc;
3278 }
3279
3280 static int mdt_intent_opc(long itopc, struct mdt_thread_info *info,
3281                           struct ldlm_lock **lockp, int flags)
3282 {
3283         struct req_capsule   *pill;
3284         struct mdt_it_flavor *flv;
3285         int opc;
3286         int rc;
3287         ENTRY;
3288
3289         opc = mdt_intent_code(itopc);
3290         if (opc < 0)
3291                 RETURN(-EINVAL);
3292
3293         pill = info->mti_pill;
3294         flv  = &mdt_it_flavor[opc];
3295
3296         if (flv->it_fmt != NULL)
3297                 req_capsule_extend(pill, flv->it_fmt);
3298
3299         rc = mdt_unpack_req_pack_rep(info, flv->it_flags);
3300         if (rc == 0) {
3301                 struct ptlrpc_request *req = mdt_info_req(info);
3302                 if (flv->it_flags & MUTABOR &&
3303                     req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
3304                         RETURN(-EROFS);
3305         }
3306         if (rc == 0 && flv->it_act != NULL) {
3307                 /* execute policy */
3308                 rc = flv->it_act(opc, info, lockp, flags);
3309         } else {
3310                 rc = -EOPNOTSUPP;
3311         }
3312         RETURN(rc);
3313 }
3314
3315 static int mdt_intent_policy(struct ldlm_namespace *ns,
3316                              struct ldlm_lock **lockp, void *req_cookie,
3317                              ldlm_mode_t mode, int flags, void *data)
3318 {
3319         struct mdt_thread_info *info;
3320         struct ptlrpc_request  *req  =  req_cookie;
3321         struct ldlm_intent     *it;
3322         struct req_capsule     *pill;
3323         int rc;
3324
3325         ENTRY;
3326
3327         LASSERT(req != NULL);
3328
3329         info = lu_context_key_get(&req->rq_svc_thread->t_env->le_ctx,
3330                                   &mdt_thread_key);
3331         LASSERT(info != NULL);
3332         pill = info->mti_pill;
3333         LASSERT(pill->rc_req == req);
3334
3335         if (req->rq_reqmsg->lm_bufcount > DLM_INTENT_IT_OFF) {
3336                 req_capsule_extend(pill, &RQF_LDLM_INTENT);
3337                 it = req_capsule_client_get(pill, &RMF_LDLM_INTENT);
3338                 if (it != NULL) {
3339                         const struct ldlm_request *dlmreq;
3340                         __u64 req_bits;
3341
3342                         rc = mdt_intent_opc(it->opc, info, lockp, flags);
3343                         if (rc == 0)
3344                                 rc = ELDLM_OK;
3345
3346                         /*
3347                          * Lock without inodebits makes no sense and will oops
3348                          * later in ldlm. Let's check it now to see if we have
3349                          * wrong lock from client or bits get corrupted
3350                          * somewhere in mdt_intent_opc().
3351                          */
3352                         dlmreq = info->mti_dlm_req;
3353                         req_bits = dlmreq->lock_desc.l_policy_data.l_inodebits.bits;
3354                         LASSERT(req_bits != 0);
3355
3356                 } else
3357                         rc = err_serious(-EFAULT);
3358         } else {
3359                 /* No intent was provided */
3360                 LASSERT(pill->rc_fmt == &RQF_LDLM_ENQUEUE);
3361                 rc = req_capsule_server_pack(pill);
3362                 if (rc)
3363                         rc = err_serious(rc);
3364         }
3365         RETURN(rc);
3366 }
3367
3368 /*
3369  * Seq wrappers
3370  */
3371 static void mdt_seq_adjust(const struct lu_env *env,
3372                           struct mdt_device *m, int lost)
3373 {
3374         struct md_site *ms = mdt_md_site(m);
3375         struct lu_range out;
3376         ENTRY;
3377
3378         LASSERT(ms && ms->ms_server_seq);
3379         LASSERT(lost >= 0);
3380         /* get extra seq from seq_server, moving it's range up */
3381         while (lost-- > 0) {
3382                 seq_server_alloc_meta(ms->ms_server_seq, NULL, &out, env);
3383         }
3384         EXIT;
3385 }
3386
3387 static int mdt_seq_fini(const struct lu_env *env,
3388                         struct mdt_device *m)
3389 {
3390         struct md_site *ms = mdt_md_site(m);
3391         ENTRY;
3392
3393         if (ms != NULL) {
3394                 if (ms->ms_server_seq) {
3395                         seq_server_fini(ms->ms_server_seq, env);
3396                         OBD_FREE_PTR(ms->ms_server_seq);
3397                         ms->ms_server_seq = NULL;
3398         }
3399
3400                 if (ms->ms_control_seq) {
3401                         seq_server_fini(ms->ms_control_seq, env);
3402                         OBD_FREE_PTR(ms->ms_control_seq);
3403                         ms->ms_control_seq = NULL;
3404         }
3405
3406                 if (ms->ms_client_seq) {
3407                         seq_client_fini(ms->ms_client_seq);
3408                         OBD_FREE_PTR(ms->ms_client_seq);
3409                         ms->ms_client_seq = NULL;
3410                 }
3411         }
3412
3413         RETURN(0);
3414 }
3415
3416 static int mdt_seq_init(const struct lu_env *env,
3417                         const char *uuid,
3418                         struct mdt_device *m)
3419 {
3420         struct md_site *ms;
3421         char *prefix;
3422         int rc;
3423         ENTRY;
3424
3425         ms = mdt_md_site(m);
3426
3427         /*
3428          * This is sequence-controller node. Init seq-controller server on local
3429          * MDT.
3430          */
3431         if (ms->ms_node_id == 0) {
3432                 LASSERT(ms->ms_control_seq == NULL);
3433
3434                 OBD_ALLOC_PTR(ms->ms_control_seq);
3435                 if (ms->ms_control_seq == NULL)
3436                         RETURN(-ENOMEM);
3437
3438                 rc = seq_server_init(ms->ms_control_seq,
3439                                      m->mdt_bottom, uuid,
3440                                      LUSTRE_SEQ_CONTROLLER,
3441                                      env);
3442
3443                 if (rc)
3444                         GOTO(out_seq_fini, rc);
3445
3446                 OBD_ALLOC_PTR(ms->ms_client_seq);
3447                 if (ms->ms_client_seq == NULL)
3448                         GOTO(out_seq_fini, rc = -ENOMEM);
3449
3450                 OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
3451                 if (prefix == NULL) {
3452                         OBD_FREE_PTR(ms->ms_client_seq);
3453                         GOTO(out_seq_fini, rc = -ENOMEM);
3454                 }
3455
3456                 snprintf(prefix, MAX_OBD_NAME + 5, "ctl-%s",
3457                          uuid);
3458
3459                 /*
3460                  * Init seq-controller client after seq-controller server is
3461                  * ready. Pass ms->ms_control_seq to it for direct talking.
3462                  */
3463                 rc = seq_client_init(ms->ms_client_seq, NULL,
3464                                      LUSTRE_SEQ_METADATA, prefix,
3465                                      ms->ms_control_seq);
3466                 OBD_FREE(prefix, MAX_OBD_NAME + 5);
3467
3468                 if (rc)
3469                         GOTO(out_seq_fini, rc);
3470         }
3471
3472         /* Init seq-server on local MDT */
3473         LASSERT(ms->ms_server_seq == NULL);
3474
3475         OBD_ALLOC_PTR(ms->ms_server_seq);
3476         if (ms->ms_server_seq == NULL)
3477                 GOTO(out_seq_fini, rc = -ENOMEM);
3478
3479         rc = seq_server_init(ms->ms_server_seq,
3480                              m->mdt_bottom, uuid,
3481                              LUSTRE_SEQ_SERVER,
3482                              env);
3483         if (rc)
3484                 GOTO(out_seq_fini, rc = -ENOMEM);
3485
3486         /* Assign seq-controller client to local seq-server. */
3487         if (ms->ms_node_id == 0) {
3488                 LASSERT(ms->ms_client_seq != NULL);
3489
3490                 rc = seq_server_set_cli(ms->ms_server_seq,
3491                                         ms->ms_client_seq,
3492                                         env);
3493         }
3494
3495         EXIT;
3496 out_seq_fini:
3497         if (rc)
3498                 mdt_seq_fini(env, m);
3499
3500         return rc;
3501 }
3502 /*
3503  * Init client sequence manager which is used by local MDS to talk to sequence
3504  * controller on remote node.
3505  */
3506 static int mdt_seq_init_cli(const struct lu_env *env,
3507                             struct mdt_device *m,
3508                             struct lustre_cfg *cfg)
3509 {
3510         struct md_site    *ms = mdt_md_site(m);
3511         struct obd_device *mdc;
3512         struct obd_uuid   *uuidp, *mdcuuidp;
3513         char              *uuid_str, *mdc_uuid_str;
3514         int                rc;
3515         int                index;
3516         struct mdt_thread_info *info;
3517         char *p, *index_string = lustre_cfg_string(cfg, 2);
3518         ENTRY;
3519
3520         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3521         uuidp = &info->mti_u.uuid[0];
3522         mdcuuidp = &info->mti_u.uuid[1];
3523
3524         LASSERT(index_string);
3525
3526         index = simple_strtol(index_string, &p, 10);
3527         if (*p) {
3528                 CERROR("Invalid index in lustre_cgf, offset 2\n");
3529                 RETURN(-EINVAL);
3530         }
3531
3532         /* check if this is adding the first MDC and controller is not yet
3533          * initialized. */
3534         if (index != 0 || ms->ms_client_seq)
3535                 RETURN(0);
3536
3537         uuid_str = lustre_cfg_string(cfg, 1);
3538         mdc_uuid_str = lustre_cfg_string(cfg, 4);
3539         obd_str2uuid(uuidp, uuid_str);
3540         obd_str2uuid(mdcuuidp, mdc_uuid_str);
3541
3542         mdc = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, mdcuuidp);
3543         if (!mdc) {
3544                 CERROR("can't find controller MDC by uuid %s\n",
3545                        uuid_str);
3546                 rc = -ENOENT;
3547         } else if (!mdc->obd_set_up) {
3548                 CERROR("target %s not set up\n", mdc->obd_name);
3549                 rc = -EINVAL;
3550         } else {
3551                 LASSERT(ms->ms_control_exp);
3552                 OBD_ALLOC_PTR(ms->ms_client_seq);
3553                 if (ms->ms_client_seq != NULL) {
3554                         char *prefix;
3555
3556                         OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
3557                         if (!prefix)
3558                                 RETURN(-ENOMEM);
3559
3560                         snprintf(prefix, MAX_OBD_NAME + 5, "ctl-%s",
3561                                  mdc->obd_name);
3562
3563                         rc = seq_client_init(ms->ms_client_seq,
3564                                              ms->ms_control_exp,
3565                                              LUSTRE_SEQ_METADATA,
3566                                              prefix, NULL);
3567                         OBD_FREE(prefix, MAX_OBD_NAME + 5);
3568                 } else
3569                         rc = -ENOMEM;
3570
3571                 if (rc)
3572                         RETURN(rc);
3573
3574                 LASSERT(ms->ms_server_seq != NULL);
3575                 rc = seq_server_set_cli(ms->ms_server_seq, ms->ms_client_seq,
3576                                         env);
3577         }
3578
3579         RETURN(rc);
3580 }
3581
3582 static void mdt_seq_fini_cli(struct mdt_device *m)
3583 {
3584         struct md_site *ms;
3585
3586         ENTRY;
3587
3588         ms = mdt_md_site(m);
3589
3590         if (ms != NULL) {
3591                 if (ms->ms_server_seq)
3592                         seq_server_set_cli(ms->ms_server_seq,
3593                                    NULL, NULL);
3594
3595                 if (ms->ms_control_exp) {
3596                         class_export_put(ms->ms_control_exp);
3597                         ms->ms_control_exp = NULL;
3598                 }
3599         }
3600         EXIT;
3601 }
3602
3603 /*
3604  * FLD wrappers
3605  */
3606 static int mdt_fld_fini(const struct lu_env *env,
3607                         struct mdt_device *m)
3608 {
3609         struct md_site *ms = mdt_md_site(m);
3610         ENTRY;
3611
3612         if (ms && ms->ms_server_fld) {
3613                 fld_server_fini(ms->ms_server_fld, env);
3614                 OBD_FREE_PTR(ms->ms_server_fld);
3615                 ms->ms_server_fld = NULL;
3616         }
3617
3618         RETURN(0);
3619 }
3620
3621 static int mdt_fld_init(const struct lu_env *env,
3622                         const char *uuid,
3623                         struct mdt_device *m)
3624 {
3625         struct md_site *ms;
3626         int rc;
3627         ENTRY;
3628
3629         ms = mdt_md_site(m);
3630
3631         OBD_ALLOC_PTR(ms->ms_server_fld);
3632         if (ms->ms_server_fld == NULL)
3633                 RETURN(rc = -ENOMEM);
3634
3635         rc = fld_server_init(ms->ms_server_fld,
3636                              m->mdt_bottom, uuid, env);
3637         if (rc) {
3638                 OBD_FREE_PTR(ms->ms_server_fld);
3639                 ms->ms_server_fld = NULL;
3640                 RETURN(rc);
3641         }
3642
3643         RETURN(0);
3644 }
3645
3646 /* device init/fini methods */
3647 static void mdt_stop_ptlrpc_service(struct mdt_device *m)
3648 {
3649         ENTRY;
3650         if (m->mdt_regular_service != NULL) {
3651                 ptlrpc_unregister_service(m->mdt_regular_service);
3652                 m->mdt_regular_service = NULL;
3653         }
3654         if (m->mdt_readpage_service != NULL) {
3655                 ptlrpc_unregister_service(m->mdt_readpage_service);
3656                 m->mdt_readpage_service = NULL;
3657         }
3658         if (m->mdt_xmds_service != NULL) {
3659                 ptlrpc_unregister_service(m->mdt_xmds_service);
3660                 m->mdt_xmds_service = NULL;
3661         }
3662         if (m->mdt_setattr_service != NULL) {
3663                 ptlrpc_unregister_service(m->mdt_setattr_service);
3664                 m->mdt_setattr_service = NULL;
3665         }
3666         if (m->mdt_mdsc_service != NULL) {
3667                 ptlrpc_unregister_service(m->mdt_mdsc_service);
3668                 m->mdt_mdsc_service = NULL;
3669         }
3670         if (m->mdt_mdss_service != NULL) {
3671                 ptlrpc_unregister_service(m->mdt_mdss_service);
3672                 m->mdt_mdss_service = NULL;
3673         }
3674         if (m->mdt_dtss_service != NULL) {
3675                 ptlrpc_unregister_service(m->mdt_dtss_service);
3676                 m->mdt_dtss_service = NULL;
3677         }
3678         if (m->mdt_fld_service != NULL) {
3679                 ptlrpc_unregister_service(m->mdt_fld_service);
3680                 m->mdt_fld_service = NULL;
3681         }
3682         EXIT;
3683 }
3684
3685 static int mdt_start_ptlrpc_service(struct mdt_device *m)
3686 {
3687         int rc;
3688         static struct ptlrpc_service_conf conf;
3689         cfs_proc_dir_entry_t *procfs_entry;
3690         ENTRY;
3691
3692         procfs_entry = m->mdt_md_dev.md_lu_dev.ld_obd->obd_proc_entry;
3693
3694         conf = (typeof(conf)) {
3695                 .psc_nbufs           = MDS_NBUFS,
3696                 .psc_bufsize         = MDS_BUFSIZE,
3697                 .psc_max_req_size    = MDS_MAXREQSIZE,
3698                 .psc_max_reply_size  = MDS_MAXREPSIZE,
3699                 .psc_req_portal      = MDS_REQUEST_PORTAL,
3700                 .psc_rep_portal      = MDC_REPLY_PORTAL,
3701                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3702                 /*
3703                  * We'd like to have a mechanism to set this on a per-device
3704                  * basis, but alas...
3705                  */
3706                 .psc_min_threads    = min(max(mdt_num_threads, MDT_MIN_THREADS),
3707                                           MDT_MAX_THREADS),
3708                 .psc_max_threads     = MDT_MAX_THREADS,
3709                 .psc_ctx_tags        = LCT_MD_THREAD
3710         };
3711
3712         m->mdt_ldlm_client = &m->mdt_md_dev.md_lu_dev.ld_obd->obd_ldlm_client;
3713         ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
3714                            "mdt_ldlm_client", m->mdt_ldlm_client);
3715
3716         m->mdt_regular_service =
3717                 ptlrpc_init_svc_conf(&conf, mdt_regular_handle, LUSTRE_MDT_NAME,
3718                                      procfs_entry, target_print_req,
3719                                      LUSTRE_MDT_NAME);
3720         if (m->mdt_regular_service == NULL)
3721                 RETURN(-ENOMEM);
3722
3723         rc = ptlrpc_start_threads(NULL, m->mdt_regular_service);
3724         if (rc)
3725                 GOTO(err_mdt_svc, rc);
3726
3727         /*
3728          * readpage service configuration. Parameters have to be adjusted,
3729          * ideally.
3730          */
3731         conf = (typeof(conf)) {
3732                 .psc_nbufs           = MDS_NBUFS,
3733                 .psc_bufsize         = MDS_BUFSIZE,
3734                 .psc_max_req_size    = MDS_MAXREQSIZE,
3735                 .psc_max_reply_size  = MDS_MAXREPSIZE,
3736                 .psc_req_portal      = MDS_READPAGE_PORTAL,
3737                 .psc_rep_portal      = MDC_REPLY_PORTAL,
3738                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3739                 .psc_min_threads    = min(max(mdt_num_threads, MDT_MIN_THREADS),
3740                                           MDT_MAX_THREADS),
3741                 .psc_max_threads     = MDT_MAX_THREADS,
3742                 .psc_ctx_tags        = LCT_MD_THREAD
3743         };
3744         m->mdt_readpage_service =
3745                 ptlrpc_init_svc_conf(&conf, mdt_readpage_handle,
3746                                      LUSTRE_MDT_NAME "_readpage",
3747                                      procfs_entry, target_print_req,"mdt_rdpg");
3748
3749         if (m->mdt_readpage_service == NULL) {
3750                 CERROR("failed to start readpage service\n");
3751                 GOTO(err_mdt_svc, rc = -ENOMEM);
3752         }
3753
3754         rc = ptlrpc_start_threads(NULL, m->mdt_readpage_service);
3755
3756         /*
3757          * setattr service configuration.
3758          */
3759         conf = (typeof(conf)) {
3760                 .psc_nbufs           = MDS_NBUFS,
3761                 .psc_bufsize         = MDS_BUFSIZE,
3762                 .psc_max_req_size    = MDS_MAXREQSIZE,
3763                 .psc_max_reply_size  = MDS_MAXREPSIZE,
3764                 .psc_req_portal      = MDS_SETATTR_PORTAL,
3765                 .psc_rep_portal      = MDC_REPLY_PORTAL,
3766                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3767                 .psc_min_threads   = min(max(mdt_num_threads, MDT_MIN_THREADS),
3768                                          MDT_MAX_THREADS),
3769                 .psc_max_threads     = MDT_MAX_THREADS,
3770                 .psc_ctx_tags        = LCT_MD_THREAD
3771         };
3772
3773         m->mdt_setattr_service =
3774                 ptlrpc_init_svc_conf(&conf, mdt_regular_handle,
3775                                      LUSTRE_MDT_NAME "_setattr",
3776                                      procfs_entry, target_print_req,"mdt_attr");
3777
3778         if (!m->mdt_setattr_service) {
3779                 CERROR("failed to start setattr service\n");
3780                 GOTO(err_mdt_svc, rc = -ENOMEM);
3781         }
3782
3783         rc = ptlrpc_start_threads(NULL, m->mdt_setattr_service);
3784         if (rc)
3785                 GOTO(err_mdt_svc, rc);
3786
3787         /*
3788          * sequence controller service configuration
3789          */
3790         conf = (typeof(conf)) {
3791                 .psc_nbufs           = MDS_NBUFS,
3792                 .psc_bufsize         = MDS_BUFSIZE,
3793                 .psc_max_req_size    = SEQ_MAXREQSIZE,
3794                 .psc_max_reply_size  = SEQ_MAXREPSIZE,
3795                 .psc_req_portal      = SEQ_CONTROLLER_PORTAL,
3796                 .psc_rep_portal      = MDC_REPLY_PORTAL,
3797                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3798                 .psc_min_threads     = SEQ_NUM_THREADS,
3799                 .psc_max_threads     = SEQ_NUM_THREADS,
3800                 .psc_ctx_tags        = LCT_MD_THREAD|LCT_DT_THREAD
3801         };
3802
3803         m->mdt_mdsc_service =
3804                 ptlrpc_init_svc_conf(&conf, mdt_mdsc_handle,
3805                                      LUSTRE_MDT_NAME"_mdsc",
3806                                      procfs_entry, target_print_req,"mdt_mdsc");
3807         if (!m->mdt_mdsc_service) {
3808                 CERROR("failed to start seq controller service\n");
3809                 GOTO(err_mdt_svc, rc = -ENOMEM);
3810         }
3811
3812         rc = ptlrpc_start_threads(NULL, m->mdt_mdsc_service);
3813         if (rc)
3814                 GOTO(err_mdt_svc, rc);
3815
3816         /*
3817          * metadata sequence server service configuration
3818          */
3819         conf = (typeof(conf)) {
3820                 .psc_nbufs           = MDS_NBUFS,
3821                 .psc_bufsize         = MDS_BUFSIZE,
3822                 .psc_max_req_size    = SEQ_MAXREQSIZE,
3823                 .psc_max_reply_size  = SEQ_MAXREPSIZE,
3824                 .psc_req_portal      = SEQ_METADATA_PORTAL,
3825                 .psc_rep_portal      = MDC_REPLY_PORTAL,
3826                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3827                 .psc_min_threads     = SEQ_NUM_THREADS,
3828                 .psc_max_threads     = SEQ_NUM_THREADS,
3829                 .psc_ctx_tags        = LCT_MD_THREAD|LCT_DT_THREAD
3830         };
3831
3832         m->mdt_mdss_service =
3833                 ptlrpc_init_svc_conf(&conf, mdt_mdss_handle,
3834                                      LUSTRE_MDT_NAME"_mdss",
3835                                      procfs_entry, target_print_req,"mdt_mdss");
3836         if (!m->mdt_mdss_service) {
3837                 CERROR("failed to start metadata seq server service\n");
3838                 GOTO(err_mdt_svc, rc = -ENOMEM);
3839         }
3840
3841         rc = ptlrpc_start_threads(NULL, m->mdt_mdss_service);
3842         if (rc)
3843                 GOTO(err_mdt_svc, rc);
3844
3845
3846         /*
3847          * Data sequence server service configuration. We want to have really
3848          * cluster-wide sequences space. This is why we start only one sequence
3849          * controller which manages space.
3850          */
3851         conf = (typeof(conf)) {
3852                 .psc_nbufs           = MDS_NBUFS,
3853                 .psc_bufsize         = MDS_BUFSIZE,
3854                 .psc_max_req_size    = SEQ_MAXREQSIZE,
3855                 .psc_max_reply_size  = SEQ_MAXREPSIZE,
3856                 .psc_req_portal      = SEQ_DATA_PORTAL,
3857                 .psc_rep_portal      = OSC_REPLY_PORTAL,
3858                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3859                 .psc_min_threads     = SEQ_NUM_THREADS,
3860                 .psc_max_threads     = SEQ_NUM_THREADS,
3861                 .psc_ctx_tags        = LCT_MD_THREAD|LCT_DT_THREAD
3862         };
3863
3864         m->mdt_dtss_service =
3865                 ptlrpc_init_svc_conf(&conf, mdt_dtss_handle,
3866                                      LUSTRE_MDT_NAME"_dtss",
3867                                      procfs_entry, target_print_req,"mdt_dtss");
3868         if (!m->mdt_dtss_service) {
3869                 CERROR("failed to start data seq server service\n");
3870                 GOTO(err_mdt_svc, rc = -ENOMEM);
3871         }
3872
3873         rc = ptlrpc_start_threads(NULL, m->mdt_dtss_service);
3874         if (rc)
3875                 GOTO(err_mdt_svc, rc);
3876
3877         /* FLD service start */
3878         conf = (typeof(conf)) {
3879                 .psc_nbufs           = MDS_NBUFS,
3880                 .psc_bufsize         = MDS_BUFSIZE,
3881                 .psc_max_req_size    = FLD_MAXREQSIZE,
3882                 .psc_max_reply_size  = FLD_MAXREPSIZE,
3883                 .psc_req_portal      = FLD_REQUEST_PORTAL,
3884                 .psc_rep_portal      = MDC_REPLY_PORTAL,
3885                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3886                 .psc_min_threads     = FLD_NUM_THREADS,
3887                 .psc_max_threads     = FLD_NUM_THREADS,
3888                 .psc_ctx_tags        = LCT_DT_THREAD|LCT_MD_THREAD
3889         };
3890
3891         m->mdt_fld_service =
3892                 ptlrpc_init_svc_conf(&conf, mdt_fld_handle,
3893                                      LUSTRE_MDT_NAME"_fld",
3894                                      procfs_entry, target_print_req, "mdt_fld");
3895         if (!m->mdt_fld_service) {
3896                 CERROR("failed to start fld service\n");
3897                 GOTO(err_mdt_svc, rc = -ENOMEM);
3898         }
3899
3900         rc = ptlrpc_start_threads(NULL, m->mdt_fld_service);
3901         if (rc)
3902                 GOTO(err_mdt_svc, rc);
3903
3904         /*
3905          * mds-mds service configuration. Separate portal is used to allow
3906          * mds-mds requests be not blocked during recovery.
3907          */
3908         conf = (typeof(conf)) {
3909                 .psc_nbufs           = MDS_NBUFS,
3910                 .psc_bufsize         = MDS_BUFSIZE,
3911                 .psc_max_req_size    = MDS_MAXREQSIZE,
3912                 .psc_max_reply_size  = MDS_MAXREPSIZE,
3913                 .psc_req_portal      = MDS_MDS_PORTAL,
3914                 .psc_rep_portal      = MDC_REPLY_PORTAL,
3915                 .psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
3916                 .psc_min_threads    = min(max(mdt_num_threads, MDT_MIN_THREADS),
3917                                           MDT_MAX_THREADS),
3918                 .psc_max_threads     = MDT_MAX_THREADS,
3919                 .psc_ctx_tags        = LCT_MD_THREAD
3920         };
3921         m->mdt_xmds_service =
3922                 ptlrpc_init_svc_conf(&conf, mdt_xmds_handle,
3923                                      LUSTRE_MDT_NAME "_mds",
3924                                      procfs_entry, target_print_req,"mdt_xmds");
3925
3926         if (m->mdt_xmds_service == NULL) {
3927                 CERROR("failed to start readpage service\n");
3928                 GOTO(err_mdt_svc, rc = -ENOMEM);
3929         }
3930
3931         rc = ptlrpc_start_threads(NULL, m->mdt_xmds_service);
3932         if (rc)
3933                 GOTO(err_mdt_svc, rc);
3934
3935         EXIT;
3936 err_mdt_svc:
3937         if (rc)
3938                 mdt_stop_ptlrpc_service(m);
3939
3940         return rc;
3941 }
3942
3943 static void mdt_stack_fini(const struct lu_env *env,
3944                            struct mdt_device *m, struct lu_device *top)
3945 {
3946         struct obd_device       *obd = mdt2obd_dev(m);
3947         struct lustre_cfg_bufs  *bufs;
3948         struct lustre_cfg       *lcfg;
3949         struct mdt_thread_info  *info;
3950         char flags[3]="";
3951         ENTRY;
3952
3953         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3954         LASSERT(info != NULL);
3955
3956         bufs = &info->mti_u.bufs;
3957         /* process cleanup, pass mdt obd name to get obd umount flags */
3958         lustre_cfg_bufs_reset(bufs, obd->obd_name);
3959         if (obd->obd_force)
3960                 strcat(flags, "F");
3961         if (obd->obd_fail)
3962                 strcat(flags, "A");
3963         lustre_cfg_bufs_set_string(bufs, 1, flags);
3964         lcfg = lustre_cfg_new(LCFG_CLEANUP, bufs);
3965         if (!lcfg) {
3966                 CERROR("Cannot alloc lcfg!\n");
3967                 return;
3968         }
3969
3970         LASSERT(top);
3971         top->ld_ops->ldo_process_config(env, top, lcfg);
3972         lustre_cfg_free(lcfg);
3973
3974         lu_stack_fini(env, top);
3975         m->mdt_child = NULL;
3976         m->mdt_bottom = NULL;
3977 }
3978
3979 static struct lu_device *mdt_layer_setup(struct lu_env *env,
3980                                          const char *typename,
3981                                          struct lu_device *child,
3982                                          struct lustre_cfg *cfg)
3983 {
3984         const char            *dev = lustre_cfg_string(cfg, 0);
3985         struct obd_type       *type;
3986         struct lu_device_type *ldt;
3987         struct lu_device      *d;
3988         int rc;
3989         ENTRY;
3990
3991         /* find the type */
3992         type = class_get_type(typename);
3993         if (!type) {
3994                 CERROR("Unknown type: '%s'\n", typename);
3995                 GOTO(out, rc = -ENODEV);
3996         }
3997
3998         rc = lu_env_refill((struct lu_env *)env);
3999         if (rc != 0) {
4000                 CERROR("Failure to refill session: '%d'\n", rc);
4001                 GOTO(out_type, rc);
4002         }
4003
4004         ldt = type->typ_lu;
4005         if (ldt == NULL) {
4006                 CERROR("type: '%s'\n", typename);
4007                 GOTO(out_type, rc = -EINVAL);
4008         }
4009
4010         ldt->ldt_obd_type = type;
4011         d = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
4012         if (IS_ERR(d)) {
4013                 CERROR("Cannot allocate device: '%s'\n", typename);
4014                 GOTO(out_type, rc = -ENODEV);
4015         }
4016
4017         LASSERT(child->ld_site);
4018         d->ld_site = child->ld_site;
4019
4020         type->typ_refcnt++;
4021         rc = ldt->ldt_ops->ldto_device_init(env, d, dev, child);
4022         if (rc) {
4023                 CERROR("can't init device '%s', rc %d\n", typename, rc);
4024                 GOTO(out_alloc, rc);
4025         }
4026         lu_device_get(d);
4027         lu_ref_add(&d->ld_reference, "lu-stack", &lu_site_init);
4028
4029         RETURN(d);
4030
4031 out_alloc:
4032         ldt->ldt_ops->ldto_device_free(env, d);
4033         type->typ_refcnt--;
4034 out_type:
4035         class_put_type(type);
4036 out:
4037         return ERR_PTR(rc);
4038 }
4039
4040 static int mdt_stack_init(struct lu_env *env,
4041                           struct mdt_device *m,
4042                           struct lustre_cfg *cfg,
4043                           struct lustre_mount_info  *lmi)
4044 {
4045         struct lu_device  *d = &m->mdt_md_dev.md_lu_dev;
4046         struct lu_device  *tmp;
4047         struct md_device  *md;
4048         struct lu_device  *child_lu_dev;
4049         int rc;
4050         ENTRY;
4051
4052         /* init the stack */
4053         tmp = mdt_layer_setup(env, LUSTRE_OSD_NAME, d, cfg);
4054         if (IS_ERR(tmp)) {
4055                 RETURN(PTR_ERR(tmp));
4056         }
4057         m->mdt_bottom = lu2dt_dev(tmp);
4058         d = tmp;
4059         tmp = mdt_layer_setup(env, LUSTRE_MDD_NAME, d, cfg);
4060         if (IS_ERR(tmp)) {
4061                 GOTO(out, rc = PTR_ERR(tmp));
4062         }
4063         d = tmp;
4064         md = lu2md_dev(d);
4065
4066         tmp = mdt_layer_setup(env, LUSTRE_CMM_NAME, d, cfg);
4067         if (IS_ERR(tmp)) {
4068                 GOTO(out, rc = PTR_ERR(tmp));
4069         }
4070         d = tmp;
4071         /*set mdd upcall device*/
4072         md_upcall_dev_set(md, lu2md_dev(d));
4073
4074         md = lu2md_dev(d);
4075         /*set cmm upcall device*/
4076         md_upcall_dev_set(md, &m->mdt_md_dev);
4077
4078         m->mdt_child = lu2md_dev(d);
4079
4080         /* process setup config */
4081         tmp = &m->mdt_md_dev.md_lu_dev;
4082         rc = tmp->ld_ops->ldo_process_config(env, tmp, cfg);
4083         if (rc)
4084                 GOTO(out, rc);
4085
4086         /* initialize local objects */
4087         child_lu_dev = &m->mdt_child->md_lu_dev;
4088
4089         rc = child_lu_dev->ld_ops->ldo_prepare(env,
4090                                                &m->mdt_md_dev.md_lu_dev,
4091                                                child_lu_dev);
4092 out:
4093         /* fini from last known good lu_device */
4094         if (rc)
4095                 mdt_stack_fini(env, m, d);
4096
4097         return rc;
4098 }
4099
4100 static void mdt_fini(const struct lu_env *env, struct mdt_device *m)
4101 {
4102         struct md_device *next = m->mdt_child;
4103         struct lu_device *d    = &m->mdt_md_dev.md_lu_dev;
4104         struct lu_site   *ls   = d->ld_site;
4105         struct obd_device *obd = mdt2obd_dev(m);
4106         int             waited = 0;
4107         ENTRY;
4108
4109         /* At this point, obd exports might still be on the "obd_zombie_exports"
4110          * list, and obd_zombie_impexp_thread() is trying to destroy them.
4111          * We wait a little bit until all exports (except the self-export)
4112          * have been destroyed, because the whole mdt stack might be accessed
4113          * in mdt_destroy_export(). This will not be a long time, maybe one or
4114          * two seconds are enough. This is not a problem while umounting.
4115          *
4116          * The three references that should be remaining are the
4117          * obd_self_export and the attach and setup references.
4118          */
4119         while (atomic_read(&obd->obd_refcount) > 3) {
4120                 cfs_schedule_timeout(CFS_TASK_UNINT, cfs_time_seconds(1));
4121                 ++waited;
4122                 if (waited > 5 && IS_PO2(waited))
4123                         LCONSOLE_WARN("Waiting for obd_zombie_impexp_thread "
4124                                       "more than %d seconds to destroy all "
4125                                       "the exports. The current obd refcount ="
4126                                       " %d. Is it stuck there?\n",
4127                                       waited, atomic_read(&obd->obd_refcount));
4128         }
4129
4130         ping_evictor_stop();
4131
4132         target_recovery_fini(obd);
4133         mdt_stop_ptlrpc_service(m);
4134         obd_zombie_barrier();
4135 #ifdef HAVE_QUOTA_SUPPORT
4136         next->md_ops->mdo_quota.mqo_cleanup(env, next);
4137 #endif
4138         mdt_fs_cleanup(env, m);
4139         upcall_cache_cleanup(m->mdt_identity_cache);
4140         m->mdt_identity_cache = NULL;
4141
4142         if (m->mdt_namespace != NULL) {
4143                 ldlm_namespace_free(m->mdt_namespace, NULL, d->ld_obd->obd_force);
4144                 d->ld_obd->obd_namespace = m->mdt_namespace = NULL;
4145         }
4146
4147         mdt_seq_fini(env, m);
4148         mdt_seq_fini_cli(m);
4149         mdt_fld_fini(env, m);
4150         mdt_procfs_fini(m);
4151         lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
4152         lprocfs_free_per_client_stats(obd);
4153         lprocfs_free_obd_stats(obd);
4154         ptlrpc_lprocfs_unregister_obd(d->ld_obd);
4155         lprocfs_obd_cleanup(d->ld_obd);
4156
4157         sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
4158
4159         next->md_ops->mdo_init_capa_ctxt(env, next, 0, 0, 0, NULL);
4160         cfs_timer_disarm(&m->mdt_ck_timer);
4161         mdt_ck_thread_stop(m);
4162
4163         /* finish the stack */
4164         mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
4165
4166         if (ls) {
4167                 struct md_site *mite;
4168
4169                 lu_site_fini(ls);
4170                 mite = lu_site2md(ls);
4171                 OBD_FREE_PTR(mite);
4172                 d->ld_site = NULL;
4173         }
4174         LASSERT(atomic_read(&d->ld_ref) == 0);
4175
4176         EXIT;
4177 }
4178
4179 static void fsoptions_to_mdt_flags(struct mdt_device *m, char *options)
4180 {
4181         char *p = options;
4182
4183         m->mdt_opts.mo_mds_capa = 1;
4184         m->mdt_opts.mo_oss_capa = 1;
4185 #ifdef CONFIG_FS_POSIX_ACL
4186         /* ACLs should be enabled by default (b=13829) */
4187         m->mdt_opts.mo_acl = 1;
4188         LCONSOLE_INFO("Enabling ACL\n");
4189 #else
4190         m->mdt_opts.mo_acl = 0;
4191         LCONSOLE_INFO("Disabling ACL\n");
4192 #endif
4193
4194         if (!options)
4195                 return;
4196
4197         while (*options) {
4198                 int len;
4199
4200                 while (*p && *p != ',')
4201                         p++;
4202
4203                 len = p - options;
4204                 if ((len == sizeof("user_xattr") - 1) &&
4205                     (memcmp(options, "user_xattr", len) == 0)) {
4206                         m->mdt_opts.mo_user_xattr = 1;
4207                         LCONSOLE_INFO("Enabling user_xattr\n");
4208                 } else if ((len == sizeof("nouser_xattr") - 1) &&
4209                            (memcmp(options, "nouser_xattr", len) == 0)) {
4210                         m->mdt_opts.mo_user_xattr = 0;
4211                         LCONSOLE_INFO("Disabling user_xattr\n");
4212                 } else if ((len == sizeof("noacl") - 1) &&
4213                            (memcmp(options, "noacl", len) == 0)) {
4214                         m->mdt_opts.mo_acl = 0;
4215                         LCONSOLE_INFO("Disabling ACL\n");
4216                 }
4217
4218                 options = ++p;
4219         }
4220 }
4221
4222 int mdt_postrecov(const struct lu_env *, struct mdt_device *);
4223
4224 static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
4225                      struct lu_device_type *ldt, struct lustre_cfg *cfg)
4226 {
4227         struct lprocfs_static_vars lvars;
4228         struct mdt_thread_info    *info;
4229         struct obd_device         *obd;
4230         const char                *dev = lustre_cfg_string(cfg, 0);
4231         const char                *num = lustre_cfg_string(cfg, 2);
4232         struct lustre_mount_info  *lmi = NULL;
4233         struct lustre_sb_info     *lsi;
4234         struct lustre_disk_data   *ldd;
4235         struct lu_site            *s;
4236         struct md_site            *mite;
4237         const char                *identity_upcall = "NONE";
4238 #ifdef HAVE_QUOTA_SUPPORT
4239         struct md_device          *next;
4240 #endif
4241         int                        rc;
4242         int                        node_id;
4243         ENTRY;
4244
4245         md_device_init(&m->mdt_md_dev, ldt);
4246         /*
4247          * Environment (env) might be missing mdt_thread_key values at that
4248          * point, if device is allocated when mdt_thread_key is in QUIESCENT
4249          * mode.
4250          *
4251          * Usually device allocation path doesn't use module key values, but
4252          * mdt has to do a lot of work here, so allocate key value.
4253          */
4254         rc = lu_env_refill((struct lu_env *)env);
4255         if (rc != 0)
4256                 RETURN(rc);
4257
4258         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
4259         LASSERT(info != NULL);
4260
4261         obd = class_name2obd(dev);
4262         LASSERT(obd != NULL);
4263
4264         spin_lock_init(&m->mdt_transno_lock);
4265
4266         m->mdt_max_mdsize = MAX_MD_SIZE;
4267         m->mdt_max_cookiesize = sizeof(struct llog_cookie);
4268
4269         m->mdt_opts.mo_user_xattr = 0;
4270         m->mdt_opts.mo_acl = 0;
4271         m->mdt_opts.mo_cos = MDT_COS_DEFAULT;
4272         lmi = server_get_mount_2(dev);
4273         if (lmi == NULL) {
4274                 CERROR("Cannot get mount info for %s!\n", dev);
4275                 RETURN(-EFAULT);
4276         } else {
4277                 lsi = s2lsi(lmi->lmi_sb);
4278                 fsoptions_to_mdt_flags(m, lsi->lsi_lmd->lmd_opts);
4279                 server_put_mount_2(dev, lmi->lmi_mnt);
4280                 /* CMD is supported only in IAM mode */
4281                 ldd = lsi->lsi_ldd;
4282                 LASSERT(num);
4283                 node_id = simple_strtol(num, NULL, 10);
4284                 if (!(ldd->ldd_flags & LDD_F_IAM_DIR) && node_id) {
4285                         CERROR("CMD Operation not allowed in IOP mode\n");
4286                         RETURN(-EINVAL);
4287                 }
4288         }
4289
4290         rwlock_init(&m->mdt_sptlrpc_lock);
4291         sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset);
4292
4293         spin_lock_init(&m->mdt_ioepoch_lock);
4294         m->mdt_opts.mo_compat_resname = 0;
4295         m->mdt_capa_timeout = CAPA_TIMEOUT;
4296         m->mdt_capa_alg = CAPA_HMAC_ALG_SHA1;
4297         m->mdt_ck_timeout = CAPA_KEY_TIMEOUT;
4298
4299         spin_lock_init(&m->mdt_client_bitmap_lock);
4300
4301         OBD_ALLOC_PTR(mite);
4302         if (mite == NULL)
4303                 GOTO(err_lmi, rc = -ENOMEM);
4304
4305         s = &mite->ms_lu;
4306
4307         m->mdt_md_dev.md_lu_dev.ld_ops = &mdt_lu_ops;
4308         m->mdt_md_dev.md_lu_dev.ld_obd = obd;
4309         /* set this lu_device to obd, because error handling need it */
4310         obd->obd_lu_dev = &m->mdt_md_dev.md_lu_dev;
4311
4312         rc = lu_site_init(s, &m->mdt_md_dev.md_lu_dev);
4313         if (rc) {
4314                 CERROR("Can't init lu_site, rc %d\n", rc);
4315                 GOTO(err_free_site, rc);
4316         }
4317
4318         lprocfs_mdt_init_vars(&lvars);
4319         rc = lprocfs_obd_setup(obd, lvars.obd_vars);
4320         if (rc) {
4321                 CERROR("Can't init lprocfs, rc %d\n", rc);
4322                 GOTO(err_fini_site, rc);
4323         }
4324         ptlrpc_lprocfs_register_obd(obd);
4325
4326         rc = mdt_procfs_init(m, dev);
4327         if (rc) {
4328                 CERROR("Can't init MDT lprocfs, rc %d\n", rc);
4329                 GOTO(err_fini_proc, rc);
4330         }
4331
4332         obd->obd_proc_exports_entry = proc_mkdir("exports",
4333                                                  obd->obd_proc_entry);
4334         if (obd->obd_proc_exports_entry)
4335                 lprocfs_add_simple(obd->obd_proc_exports_entry,
4336                                    "clear", lprocfs_nid_stats_clear_read,
4337                                    lprocfs_nid_stats_clear_write, obd, NULL);
4338
4339         /* set server index */
4340         lu_site2md(s)->ms_node_id = node_id;
4341
4342         /* failover is the default
4343          * FIXME: we do not failout mds0/mgs, which may cause some problems.
4344          * assumed whose ms_node_id == 0 XXX
4345          * */
4346         obd->obd_replayable = 1;
4347         /* No connection accepted until configurations will finish */
4348         obd->obd_no_conn = 1;
4349
4350         if (cfg->lcfg_bufcount > 4 && LUSTRE_CFG_BUFLEN(cfg, 4) > 0) {
4351                 char *str = lustre_cfg_string(cfg, 4);
4352                 if (strchr(str, 'n')) {
4353                         CWARN("%s: recovery disabled\n", obd->obd_name);
4354                         obd->obd_replayable = 0;
4355                 }
4356         }
4357
4358         /* init the stack */
4359         rc = mdt_stack_init((struct lu_env *)env, m, cfg, lmi);
4360         if (rc) {
4361                 CERROR("Can't init device stack, rc %d\n", rc);
4362                 GOTO(err_fini_proc, rc);
4363         }
4364
4365         rc = mdt_fld_init(env, obd->obd_name, m);
4366         if (rc)
4367                 GOTO(err_fini_stack, rc);
4368
4369         rc = mdt_seq_init(env, obd->obd_name, m);
4370         if (rc)
4371                 GOTO(err_fini_fld, rc);
4372
4373         snprintf(info->mti_u.ns_name, sizeof info->mti_u.ns_name,
4374                  LUSTRE_MDT_NAME"-%p", m);
4375         m->mdt_namespace = ldlm_namespace_new(obd, info->mti_u.ns_name,
4376                                               LDLM_NAMESPACE_SERVER,
4377                                               LDLM_NAMESPACE_GREEDY);
4378         if (m->mdt_namespace == NULL)
4379                 GOTO(err_fini_seq, rc = -ENOMEM);
4380
4381         ldlm_register_intent(m->mdt_namespace, mdt_intent_policy);
4382         /* set obd_namespace for compatibility with old code */
4383         obd->obd_namespace = m->mdt_namespace;
4384
4385         /* XXX: to support suppgid for ACL, we enable identity_upcall
4386          * by default, otherwise, maybe got unexpected -EACCESS. */
4387         if (m->mdt_opts.mo_acl)
4388                 identity_upcall = MDT_IDENTITY_UPCALL_PATH;
4389
4390         m->mdt_identity_cache = upcall_cache_init(obd->obd_name, identity_upcall,
4391                                                   &mdt_identity_upcall_cache_ops);
4392         if (IS_ERR(m->mdt_identity_cache)) {
4393                 rc = PTR_ERR(m->mdt_identity_cache);
4394                 m->mdt_identity_cache = NULL;
4395                 GOTO(err_free_ns, rc);
4396         }
4397
4398         cfs_timer_init(&m->mdt_ck_timer, mdt_ck_timer_callback, m);
4399
4400         rc = mdt_ck_thread_start(m);
4401         if (rc)
4402                 GOTO(err_free_ns, rc);
4403
4404         rc = mdt_fs_setup(env, m, obd, lsi);
4405         if (rc)
4406                 GOTO(err_capa, rc);
4407
4408 #ifdef HAVE_QUOTA_SUPPORT
4409         next = m->mdt_child;
4410         rc = next->md_ops->mdo_quota.mqo_setup(env, next, lmi->lmi_mnt);
4411         if (rc)
4412                 GOTO(err_fs_cleanup, rc);
4413 #endif
4414
4415         server_put_mount_2(dev, lmi->lmi_mnt);
4416         lmi = NULL;
4417
4418         target_recovery_init(obd, mdt_recovery_handle);
4419
4420         rc = mdt_start_ptlrpc_service(m);
4421         if (rc)
4422                 GOTO(err_recovery, rc);
4423
4424         ping_evictor_start();
4425
4426         rc = lu_site_init_finish(s);
4427         if (rc)
4428                 GOTO(err_stop_service, rc);
4429
4430         if (obd->obd_recovering == 0)
4431                 mdt_postrecov(env, m);
4432
4433         mdt_init_capa_ctxt(env, m);
4434
4435         /* Reduce the initial timeout on an MDS because it doesn't need such
4436          * a long timeout as an OST does. Adaptive timeouts will adjust this
4437          * value appropriately. */
4438         if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
4439                 ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT;
4440
4441         RETURN(0);
4442
4443 err_stop_service:
4444         ping_evictor_stop();
4445         mdt_stop_ptlrpc_service(m);
4446 err_recovery:
4447         target_recovery_fini(obd);
4448 #ifdef HAVE_QUOTA_SUPPORT
4449         next->md_ops->mdo_quota.mqo_cleanup(env, next);
4450 err_fs_cleanup:
4451 #endif
4452         mdt_fs_cleanup(env, m);
4453 err_capa:
4454         cfs_timer_disarm(&m->mdt_ck_timer);
4455         mdt_ck_thread_stop(m);
4456 err_free_ns:
4457         upcall_cache_cleanup(m->mdt_identity_cache);
4458         m->mdt_identity_cache = NULL;
4459         ldlm_namespace_free(m->mdt_namespace, NULL, 0);
4460         obd->obd_namespace = m->mdt_namespace = NULL;
4461 err_fini_seq:
4462         mdt_seq_fini(env, m);
4463 err_fini_fld:
4464         mdt_fld_fini(env, m);
4465 err_fini_stack:
4466         mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
4467 err_fini_proc:
4468         mdt_procfs_fini(m);
4469         if (obd->obd_proc_exports_entry)
4470                 lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
4471         ptlrpc_lprocfs_unregister_obd(obd);
4472         lprocfs_obd_cleanup(obd);
4473 err_fini_site:
4474         lu_site_fini(s);
4475 err_free_site:
4476         OBD_FREE_PTR(mite);
4477 err_lmi:
4478         if (lmi) 
4479                 server_put_mount_2(dev, lmi->lmi_mnt);
4480         return (rc);
4481 }
4482
4483 /* used by MGS to process specific configurations */
4484 static int mdt_process_config(const struct lu_env *env,
4485                               struct lu_device *d, struct lustre_cfg *cfg)
4486 {
4487         struct mdt_device *m = mdt_dev(d);
4488         struct md_device *md_next = m->mdt_child;
4489         struct lu_device *next = md2lu_dev(md_next);
4490         int rc = 0;
4491         ENTRY;
4492
4493         switch (cfg->lcfg_command) {
4494         case LCFG_SPTLRPC_CONF: {
4495                 struct sptlrpc_conf_log *log;
4496                 struct sptlrpc_rule_set  tmp_rset;
4497
4498                 log = sptlrpc_conf_log_extract(cfg);
4499                 if (IS_ERR(log)) {
4500                         rc = PTR_ERR(log);
4501                         break;
4502                 }
4503
4504                 sptlrpc_rule_set_init(&tmp_rset);
4505
4506                 rc = sptlrpc_rule_set_from_log(&tmp_rset, log);
4507                 if (rc) {
4508                         CERROR("mdt %p: failed get sptlrpc rules: %d\n", m, rc);
4509                         break;
4510                 }
4511
4512                 write_lock(&m->mdt_sptlrpc_lock);
4513                 sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
4514                 m->mdt_sptlrpc_rset = tmp_rset;
4515                 write_unlock(&m->mdt_sptlrpc_lock);
4516
4517                 sptlrpc_target_update_exp_flavor(
4518                                 md2lu_dev(&m->mdt_md_dev)->ld_obd, &tmp_rset);
4519
4520                 break;
4521         }
4522         case LCFG_PARAM: {
4523                 struct lprocfs_static_vars lvars;
4524                 struct obd_device *obd = d->ld_obd;
4525
4526                 /*
4527                  * For interoperability between 1.8 and 2.0,
4528                  * skip old "mdt.group_upcall" param.
4529                  */
4530                 {
4531                         char *param = lustre_cfg_string(cfg, 1);
4532                         if (param && !strncmp("mdt.group_upcall", param, 16)) {
4533                                 CWARN("For 1.8 interoperability, skip this"
4534                                        " mdt.group_upcall. It is obsolete\n");
4535                                 break;
4536                         }
4537                 }
4538
4539                 lprocfs_mdt_init_vars(&lvars);
4540                 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars,
4541                                               cfg, obd);
4542                 if (rc > 0 || rc == -ENOSYS)
4543                         /* we don't understand; pass it on */
4544                         rc = next->ld_ops->ldo_process_config(env, next, cfg);
4545                 break;
4546         }
4547         case LCFG_ADD_MDC:
4548                 /*
4549                  * Add mdc hook to get first MDT uuid and connect it to
4550                  * ls->controller to use for seq manager.
4551                  */
4552                 rc = next->ld_ops->ldo_process_config(env, next, cfg);
4553                 if (rc)
4554                         CERROR("Can't add mdc, rc %d\n", rc);
4555                 else
4556                         rc = mdt_seq_init_cli(env, mdt_dev(d), cfg);
4557                 break;
4558         default:
4559                 /* others are passed further */
4560                 rc = next->ld_ops->ldo_process_config(env, next, cfg);
4561                 break;
4562         }
4563         RETURN(rc);
4564 }
4565
4566 static struct lu_object *mdt_object_alloc(const struct lu_env *env,
4567                                           const struct lu_object_header *hdr,
4568                                           struct lu_device *d)
4569 {
4570         struct mdt_object *mo;
4571
4572         ENTRY;
4573
4574         OBD_ALLOC_PTR(mo);
4575         if (mo != NULL) {
4576                 struct lu_object *o;
4577                 struct lu_object_header *h;
4578
4579                 o = &mo->mot_obj.mo_lu;
4580                 h = &mo->mot_header;
4581                 lu_object_header_init(h);
4582                 lu_object_init(o, h, d);
4583                 lu_object_add_top(h, o);
4584                 o->lo_ops = &mdt_obj_ops;
4585                 RETURN(o);
4586         } else
4587                 RETURN(NULL);
4588 }
4589
4590 static int mdt_object_init(const struct lu_env *env, struct lu_object *o,
4591                            const struct lu_object_conf *_)
4592 {
4593         struct mdt_device *d = mdt_dev(o->lo_dev);
4594         struct lu_device  *under;
4595         struct lu_object  *below;
4596         int                rc = 0;
4597         ENTRY;
4598
4599         CDEBUG(D_INFO, "object init, fid = "DFID"\n",
4600                PFID(lu_object_fid(o)));
4601
4602         under = &d->mdt_child->md_lu_dev;
4603         below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
4604         if (below != NULL) {
4605                 lu_object_add(o, below);
4606         } else
4607                 rc = -ENOMEM;
4608
4609         RETURN(rc);
4610 }
4611
4612 static void mdt_object_free(const struct lu_env *env, struct lu_object *o)
4613 {
4614         struct mdt_object *mo = mdt_obj(o);
4615         struct lu_object_header *h;
4616         ENTRY;
4617
4618         h = o->lo_header;
4619         CDEBUG(D_INFO, "object free, fid = "DFID"\n",
4620                PFID(lu_object_fid(o)));
4621
4622         lu_object_fini(o);
4623         lu_object_header_fini(h);
4624         OBD_FREE_PTR(mo);
4625         EXIT;
4626 }
4627
4628 static const struct lu_device_operations mdt_lu_ops = {
4629         .ldo_object_alloc   = mdt_object_alloc,
4630         .ldo_process_config = mdt_process_config,
4631 };
4632
4633 static const struct lu_object_operations mdt_obj_ops = {
4634         .loo_object_init    = mdt_object_init,
4635         .loo_object_free    = mdt_object_free
4636 };
4637
4638 /* mds_connect_internal */
4639 static int mdt_connect_internal(struct obd_export *exp,
4640                                 struct mdt_device *mdt,
4641                                 struct obd_connect_data *data)
4642 {
4643         if (data != NULL) {
4644                 data->ocd_connect_flags &= MDT_CONNECT_SUPPORTED;
4645                 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
4646
4647                 /* If no known bits (which should not happen, probably,
4648                    as everybody should support LOOKUP and UPDATE bits at least)
4649                    revert to compat mode with plain locks. */
4650                 if (!data->ocd_ibits_known &&
4651                     data->ocd_connect_flags & OBD_CONNECT_IBITS)
4652                         data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
4653
4654                 if (!mdt->mdt_opts.mo_acl)
4655                         data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
4656
4657                 if (!mdt->mdt_opts.mo_user_xattr)
4658                         data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
4659
4660                 spin_lock(&exp->exp_lock);
4661                 exp->exp_connect_flags = data->ocd_connect_flags;
4662                 spin_unlock(&exp->exp_lock);
4663                 data->ocd_version = LUSTRE_VERSION_CODE;
4664                 exp->exp_mdt_data.med_ibits_known = data->ocd_ibits_known;
4665         }
4666
4667 #if 0
4668         if (mdt->mdt_opts.mo_acl &&
4669             ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
4670                 CWARN("%s: MDS requires ACL support but client does not\n",
4671                       mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
4672                 return -EBADE;
4673         }
4674 #endif
4675
4676         if ((exp->exp_connect_flags & OBD_CONNECT_FID) == 0) {
4677                 CWARN("%s: MDS requires FID support, but client not\n",
4678                       mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
4679                 return -EBADE;
4680         }
4681
4682         return 0;
4683 }
4684
4685 /* mds_connect copy */
4686 static int mdt_obd_connect(const struct lu_env *env,
4687                            struct lustre_handle *conn, struct obd_device *obd,
4688                            struct obd_uuid *cluuid,
4689                            struct obd_connect_data *data,
4690                            void *localdata)
4691 {
4692         struct mdt_thread_info *info;
4693         struct lsd_client_data *lcd;
4694         struct obd_export      *exp;
4695         struct mdt_device      *mdt;
4696         struct ptlrpc_request  *req;
4697         int                     rc;
4698         ENTRY;
4699
4700         LASSERT(env != NULL);
4701         if (!conn || !obd || !cluuid)
4702                 RETURN(-EINVAL);
4703
4704         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
4705         req = info->mti_pill->rc_req;
4706         mdt = mdt_dev(obd->obd_lu_dev);
4707
4708         rc = class_connect(conn, obd, cluuid);
4709         if (rc)
4710                 RETURN(rc);
4711
4712         exp = class_conn2export(conn);
4713         LASSERT(exp != NULL);
4714
4715         CDEBUG(D_SEC, "from %s\n", sptlrpc_part2name(req->rq_sp_from));
4716
4717         spin_lock(&exp->exp_lock);
4718         exp->exp_sp_peer = req->rq_sp_from;
4719
4720         read_lock(&mdt->mdt_sptlrpc_lock);
4721         sptlrpc_rule_set_choose(&mdt->mdt_sptlrpc_rset, exp->exp_sp_peer,
4722                                 req->rq_peer.nid, &exp->exp_flvr);
4723         read_unlock(&mdt->mdt_sptlrpc_lock);
4724
4725         if (exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
4726                 CERROR("invalid rpc flavor %x, expect %x, from %s\n",
4727                        req->rq_flvr.sf_rpc, exp->exp_flvr.sf_rpc,
4728                        libcfs_nid2str(req->rq_peer.nid));
4729                 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
4730                 spin_unlock(&exp->exp_lock);
4731                 RETURN(-EACCES);
4732         }
4733         spin_unlock(&exp->exp_lock);
4734
4735         rc = mdt_connect_internal(exp, mdt, data);
4736         if (rc == 0) {
4737                 OBD_ALLOC_PTR(lcd);
4738                 if (lcd != NULL) {
4739                         struct mdt_thread_info *mti;
4740                         mti = lu_context_key_get(&env->le_ctx,
4741                                                  &mdt_thread_key);
4742                         LASSERT(mti != NULL);
4743                         mti->mti_exp = exp;
4744                         memcpy(lcd->lcd_uuid, cluuid, sizeof lcd->lcd_uuid);
4745                         exp->exp_mdt_data.med_lcd = lcd;
4746                         rc = mdt_client_new(env, mdt);
4747                         if (rc != 0) {
4748                                 OBD_FREE_PTR(lcd);
4749                                 exp->exp_mdt_data.med_lcd = NULL;
4750                         } else {
4751                                 mdt_export_stats_init(obd, exp, localdata);
4752                         }
4753                 } else
4754                         rc = -ENOMEM;
4755         }
4756
4757         if (rc != 0)
4758                 class_disconnect(exp);
4759         else
4760                 class_export_put(exp);
4761
4762         RETURN(rc);
4763 }
4764
4765 static int mdt_obd_reconnect(const struct lu_env *env,
4766                              struct obd_export *exp, struct obd_device *obd,
4767                              struct obd_uuid *cluuid,
4768                              struct obd_connect_data *data,
4769                              void *localdata)
4770 {
4771         struct mdt_thread_info *info;
4772         struct mdt_device      *mdt;
4773         struct ptlrpc_request  *req;
4774         int                     rc;
4775         ENTRY;
4776
4777         if (exp == NULL || obd == NULL || cluuid == NULL)
4778                 RETURN(-EINVAL);
4779
4780         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
4781         req = info->mti_pill->rc_req;
4782         mdt = mdt_dev(obd->obd_lu_dev);
4783
4784         CDEBUG(D_SEC, "from %s\n", sptlrpc_part2name(req->rq_sp_from));
4785
4786         spin_lock(&exp->exp_lock);
4787         if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
4788                 exp->exp_sp_peer = req->rq_sp_from;
4789
4790                 read_lock(&mdt->mdt_sptlrpc_lock);
4791                 sptlrpc_rule_set_choose(&mdt->mdt_sptlrpc_rset,
4792                                         exp->exp_sp_peer,
4793                                         req->rq_peer.nid, &exp->exp_flvr);
4794                 read_unlock(&mdt->mdt_sptlrpc_lock);
4795
4796                 if (exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
4797                         CERROR("invalid rpc flavor %x, expect %x, from %s\n",
4798                                req->rq_flvr.sf_rpc, exp->exp_flvr.sf_rpc,
4799                                libcfs_nid2str(req->rq_peer.nid));
4800                         exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
4801                         spin_unlock(&exp->exp_lock);
4802                         RETURN(-EACCES);
4803                 }
4804         }
4805         spin_unlock(&exp->exp_lock);
4806
4807         rc = mdt_connect_internal(exp, mdt_dev(obd->obd_lu_dev), data);
4808         if (rc == 0)
4809                 mdt_export_stats_init(obd, exp, localdata);
4810
4811         RETURN(rc);
4812 }
4813
4814 static int mdt_obd_disconnect(struct obd_export *exp)
4815 {
4816         struct mdt_device *mdt = mdt_dev(exp->exp_obd->obd_lu_dev);
4817         int rc;
4818         ENTRY;
4819
4820         LASSERT(exp);
4821         class_export_get(exp);
4822
4823         /* Disconnect early so that clients can't keep using export */
4824         rc = class_disconnect(exp);
4825         if (mdt->mdt_namespace != NULL || exp->exp_obd->obd_namespace != NULL)
4826                 ldlm_cancel_locks_for_export(exp);
4827
4828         /* release nid stat refererence */
4829         lprocfs_exp_cleanup(exp);
4830
4831         /* complete all outstanding replies */
4832         spin_lock(&exp->exp_lock);
4833         while (!list_empty(&exp->exp_outstanding_replies)) {
4834                 struct ptlrpc_reply_state *rs =
4835                         list_entry(exp->exp_outstanding_replies.next,
4836                                    struct ptlrpc_reply_state, rs_exp_list);
4837                 struct ptlrpc_service *svc = rs->rs_service;
4838
4839                 spin_lock(&svc->srv_lock);
4840                 list_del_init(&rs->rs_exp_list);
4841                 ptlrpc_schedule_difficult_reply(rs);
4842                 spin_unlock(&svc->srv_lock);
4843         }
4844         spin_unlock(&exp->exp_lock);
4845
4846         class_export_put(exp);
4847         RETURN(rc);
4848 }
4849
4850 /* FIXME: Can we avoid using these two interfaces? */
4851 static int mdt_init_export(struct obd_export *exp)
4852 {
4853         struct mdt_export_data *med = &exp->exp_mdt_data;
4854         int                     rc;
4855         ENTRY;
4856
4857         CFS_INIT_LIST_HEAD(&med->med_open_head);
4858         spin_lock_init(&med->med_open_lock);
4859         sema_init(&med->med_idmap_sem, 1);
4860         med->med_idmap = NULL;
4861         spin_lock(&exp->exp_lock);
4862         exp->exp_connecting = 1;
4863         spin_unlock(&exp->exp_lock);
4864         rc = ldlm_init_export(exp);
4865         if (rc)
4866                 CERROR("Error %d while initializing export\n", rc);
4867         RETURN(rc);
4868 }
4869
4870 static int mdt_destroy_export(struct obd_export *export)
4871 {
4872         struct mdt_export_data *med;
4873         struct obd_device      *obd = export->exp_obd;
4874         struct mdt_device      *mdt;
4875         struct mdt_thread_info *info;
4876         struct lu_env           env;
4877         struct md_attr         *ma;
4878         int lmm_size;
4879         int cookie_size;
4880         CFS_LIST_HEAD(closing_list);
4881         struct mdt_file_data *mfd, *n;
4882         int rc = 0;
4883         ENTRY;
4884
4885         med = &export->exp_mdt_data;
4886         if (exp_connect_rmtclient(export))
4887                 mdt_cleanup_idmap(med);
4888
4889         target_destroy_export(export);
4890         ldlm_destroy_export(export);
4891
4892         if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
4893                 RETURN(0);
4894
4895         mdt = mdt_dev(obd->obd_lu_dev);
4896         LASSERT(mdt != NULL);
4897
4898         rc = lu_env_init(&env, LCT_MD_THREAD);
4899         if (rc)
4900                 RETURN(rc);
4901
4902         info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
4903         LASSERT(info != NULL);
4904         memset(info, 0, sizeof *info);
4905         info->mti_env = &env;
4906         info->mti_mdt = mdt;
4907         info->mti_exp = export;
4908
4909         ma = &info->mti_attr;
4910         lmm_size = ma->ma_lmm_size = mdt->mdt_max_mdsize;
4911         cookie_size = ma->ma_cookie_size = mdt->mdt_max_cookiesize;
4912         OBD_ALLOC(ma->ma_lmm, lmm_size);
4913         OBD_ALLOC(ma->ma_cookie, cookie_size);
4914
4915         if (ma->ma_lmm == NULL || ma->ma_cookie == NULL)
4916                 GOTO(out, rc = -ENOMEM);
4917         ma->ma_need = MA_LOV | MA_COOKIE;
4918         ma->ma_valid = 0;
4919         /* Close any open files (which may also cause orphan unlinking). */
4920         spin_lock(&med->med_open_lock);
4921         while (!list_empty(&med->med_open_head)) {
4922                 struct list_head *tmp = med->med_open_head.next;
4923                 mfd = list_entry(tmp, struct mdt_file_data, mfd_list);
4924
4925                 /* Remove mfd handle so it can't be found again.
4926                  * We are consuming the mfd_list reference here. */
4927                 class_handle_unhash(&mfd->mfd_handle);
4928                 list_move_tail(&mfd->mfd_list, &closing_list);
4929         }
4930         spin_unlock(&med->med_open_lock);
4931
4932         list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
4933                 list_del_init(&mfd->mfd_list);
4934                 mdt_mfd_close(info, mfd);
4935                 /* TODO: if we close the unlinked file,
4936                  * we need to remove its objects from OST */
4937                 memset(&ma->ma_attr, 0, sizeof(ma->ma_attr));
4938                 spin_lock(&med->med_open_lock);
4939                 ma->ma_lmm_size = lmm_size;
4940                 ma->ma_cookie_size = cookie_size;
4941                 ma->ma_need = MA_LOV | MA_COOKIE;
4942                 ma->ma_valid = 0;
4943                 spin_unlock(&med->med_open_lock);
4944         }
4945
4946         info->mti_mdt = NULL;
4947         mdt_client_del(&env, mdt);
4948
4949         EXIT;
4950 out:
4951         if (lmm_size) {
4952                 OBD_FREE(ma->ma_lmm, lmm_size);
4953                 ma->ma_lmm = NULL;
4954         }
4955         if (cookie_size) {
4956                 OBD_FREE(ma->ma_cookie, cookie_size);
4957                 ma->ma_cookie = NULL;
4958         }
4959         lu_env_fini(&env);
4960
4961         return rc;
4962 }
4963
4964 static void mdt_allow_cli(struct mdt_device *m, unsigned int flag)
4965 {
4966         if (flag & CONFIG_LOG)
4967                 m->mdt_fl_cfglog = 1;
4968
4969         /* also notify active event */
4970         if (flag & CONFIG_SYNC)
4971                 m->mdt_fl_synced = 1;
4972
4973         if (m->mdt_fl_cfglog && m->mdt_fl_synced)
4974                 /* Open for clients */
4975                 m->mdt_md_dev.md_lu_dev.ld_obd->obd_no_conn = 0;
4976 }
4977
4978 static int mdt_upcall(const struct lu_env *env, struct md_device *md,
4979                       enum md_upcall_event ev)
4980 {
4981         struct mdt_device *m = mdt_dev(&md->md_lu_dev);
4982         struct md_device  *next  = m->mdt_child;
4983         struct mdt_thread_info *mti;
4984         int rc = 0;
4985         ENTRY;
4986
4987         switch (ev) {
4988                 case MD_LOV_SYNC:
4989                         rc = next->md_ops->mdo_maxsize_get(env, next,
4990                                         &m->mdt_max_mdsize,
4991                                         &m->mdt_max_cookiesize);
4992                         CDEBUG(D_INFO, "get max mdsize %d max cookiesize %d\n",
4993                                      m->mdt_max_mdsize, m->mdt_max_cookiesize);
4994                         mdt_allow_cli(m, CONFIG_SYNC);
4995 #ifdef HAVE_QUOTA_SUPPORT
4996                         if (md->md_lu_dev.ld_obd->obd_recovering == 0)
4997                                 next->md_ops->mdo_quota.mqo_recovery(env, next);
4998 #endif
4999                         break;
5000                 case MD_NO_TRANS:
5001                         mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
5002                         mti->mti_no_need_trans = 1;
5003                         CDEBUG(D_INFO, "disable mdt trans for this thread\n");
5004                         break;
5005                 case MD_LOV_CONFIG:
5006                         /* Check that MDT is not yet configured */
5007                         LASSERT(!m->mdt_fl_cfglog);
5008                         break;
5009                 default:
5010                         CERROR("invalid event\n");
5011                         rc = -EINVAL;
5012                         break;
5013         }
5014         RETURN(rc);
5015 }
5016
5017 static int mdt_obd_notify(struct obd_device *host,
5018                           struct obd_device *watched,
5019                           enum obd_notify_event ev, void *data)
5020 {
5021         struct mdt_device *mdt = mdt_dev(host->obd_lu_dev);
5022 #ifdef HAVE_QUOTA_SUPPORT
5023         struct md_device *next = mdt->mdt_child;
5024 #endif
5025         ENTRY;
5026
5027         switch (ev) {
5028         case OBD_NOTIFY_CONFIG:
5029                 mdt_allow_cli(mdt, (unsigned long)data);
5030
5031 #ifdef HAVE_QUOTA_SUPPORT
5032                /* quota_type has been processed, we can now handle
5033                 * incoming quota requests */
5034                 next->md_ops->mdo_quota.mqo_notify(NULL, next);
5035 #endif
5036                 break;
5037         default:
5038                 CDEBUG(D_INFO, "Unhandled notification %#x\n", ev);
5039         }
5040         RETURN(0);
5041 }
5042
5043 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
5044                          void *karg, void *uarg)
5045 {
5046         struct lu_env      env;
5047         struct obd_device *obd= exp->exp_obd;
5048         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
5049         struct dt_device  *dt = mdt->mdt_bottom;
5050         int rc;
5051
5052         ENTRY;
5053         CDEBUG(D_IOCTL, "handling ioctl cmd %#x\n", cmd);
5054         rc = lu_env_init(&env, LCT_MD_THREAD);
5055         if (rc)
5056                 RETURN(rc);
5057
5058         switch (cmd) {
5059         case OBD_IOC_SYNC:
5060                 rc = mdt_device_sync(&env, mdt);
5061                 break;
5062         case OBD_IOC_SET_READONLY:
5063                 dt->dd_ops->dt_ro(&env, dt);
5064                 break;
5065         case OBD_IOC_ABORT_RECOVERY:
5066                 CERROR("Aborting recovery for device %s\n", obd->obd_name);
5067                 target_stop_recovery_thread(obd);
5068                 rc = 0;
5069                 break;
5070         default:
5071                 CERROR("Not supported cmd = %d for device %s\n",
5072                        cmd, obd->obd_name);
5073                 rc = -EOPNOTSUPP;
5074         }
5075
5076         lu_env_fini(&env);
5077         RETURN(rc);
5078 }
5079
5080 int mdt_postrecov(const struct lu_env *env, struct mdt_device *mdt)
5081 {
5082         struct lu_device *ld = md2lu_dev(mdt->mdt_child);
5083         struct obd_device *obd = mdt2obd_dev(mdt);
5084 #ifdef HAVE_QUOTA_SUPPORT
5085         struct md_device *next = mdt->mdt_child;
5086 #endif
5087         int rc, lost;
5088         ENTRY;
5089         /* if some clients didn't participate in recovery then we can possibly
5090          * lost sequence. Now we should increase sequence for safe value */
5091         lost = obd->obd_max_recoverable_clients - obd->obd_connected_clients;
5092         mdt_seq_adjust(env, mdt, lost);
5093
5094         rc = ld->ld_ops->ldo_recovery_complete(env, ld);
5095 #ifdef HAVE_QUOTA_SUPPORT
5096         next->md_ops->mdo_quota.mqo_recovery(env, next);
5097 #endif
5098         RETURN(rc);
5099 }
5100
5101 int mdt_obd_postrecov(struct obd_device *obd)
5102 {
5103         struct lu_env env;
5104         int rc;
5105
5106         rc = lu_env_init(&env, LCT_MD_THREAD);
5107         if (rc)
5108                 RETURN(rc);
5109         rc = mdt_postrecov(&env, mdt_dev(obd->obd_lu_dev));
5110         lu_env_fini(&env);
5111         return rc;
5112 }
5113
5114 static struct obd_ops mdt_obd_device_ops = {
5115         .o_owner          = THIS_MODULE,
5116         .o_connect        = mdt_obd_connect,
5117         .o_reconnect      = mdt_obd_reconnect,
5118         .o_disconnect     = mdt_obd_disconnect,
5119         .o_init_export    = mdt_init_export,
5120         .o_destroy_export = mdt_destroy_export,
5121         .o_iocontrol      = mdt_iocontrol,
5122         .o_postrecov      = mdt_obd_postrecov,
5123         .o_notify         = mdt_obd_notify
5124 };
5125
5126 static struct lu_device* mdt_device_fini(const struct lu_env *env,
5127                                          struct lu_device *d)
5128 {
5129         struct mdt_device *m = mdt_dev(d);
5130         ENTRY;
5131
5132         mdt_fini(env, m);
5133         RETURN(NULL);
5134 }
5135
5136 static struct lu_device *mdt_device_free(const struct lu_env *env,
5137                                          struct lu_device *d)
5138 {
5139         struct mdt_device *m = mdt_dev(d);
5140         ENTRY;
5141
5142         md_device_fini(&m->mdt_md_dev);
5143         OBD_FREE_PTR(m);
5144         RETURN(NULL);
5145 }
5146
5147 static struct lu_device *mdt_device_alloc(const struct lu_env *env,
5148                                           struct lu_device_type *t,
5149                                           struct lustre_cfg *cfg)
5150 {
5151         struct lu_device  *l;
5152         struct mdt_device *m;
5153
5154         OBD_ALLOC_PTR(m);
5155         if (m != NULL) {
5156                 int rc;
5157
5158                 l = &m->mdt_md_dev.md_lu_dev;
5159                 rc = mdt_init0(env, m, t, cfg);
5160                 if (rc != 0) {
5161                         mdt_device_free(env, l);
5162                         l = ERR_PTR(rc);
5163                         return l;
5164                 }
5165                 md_upcall_init(&m->mdt_md_dev, mdt_upcall);
5166         } else
5167                 l = ERR_PTR(-ENOMEM);
5168         return l;
5169 }
5170
5171 /* context key constructor/destructor: mdt_key_init, mdt_key_fini */
5172 LU_KEY_INIT_FINI(mdt, struct mdt_thread_info);
5173
5174 /* context key: mdt_thread_key */
5175 LU_CONTEXT_KEY_DEFINE(mdt, LCT_MD_THREAD);
5176
5177 /* context key constructor/destructor: mdt_txn_key_init, mdt_txn_key_fini */
5178 LU_KEY_INIT_FINI(mdt_txn, struct mdt_txn_info);
5179
5180 struct lu_context_key mdt_txn_key = {
5181         .lct_tags = LCT_TX_HANDLE,
5182         .lct_init = mdt_txn_key_init,
5183         .lct_fini = mdt_txn_key_fini
5184 };
5185
5186 struct md_ucred *mdt_ucred(const struct mdt_thread_info *info)
5187 {
5188         return md_ucred(info->mti_env);
5189 }
5190
5191 /**
5192  * Enable/disable COS.
5193  *
5194  * Set/Clear the COS flag in mdt options.
5195  *
5196  * \param mdt mdt device
5197  * \param val 0 disables COS, other values enable COS
5198  */
5199 void mdt_enable_cos(struct mdt_device *mdt, int val)
5200 {
5201         struct lu_env env;
5202         int rc;
5203
5204         mdt->mdt_opts.mo_cos = !!val;
5205         rc = lu_env_init(&env, LCT_MD_THREAD);
5206         if (unlikely(rc != 0)) {
5207                 CWARN("lu_env initialization failed with rc = %d,"
5208                       "cannot sync\n", rc);
5209                 return;
5210         }
5211         mdt_device_sync(&env, mdt);
5212         lu_env_fini(&env);
5213 }
5214
5215 /**
5216  * Check COS status.
5217  *
5218  * Return COS flag status/
5219  *
5220  * \param mdt mdt device
5221  */
5222 int mdt_cos_is_enabled(struct mdt_device *mdt)
5223 {
5224         return mdt->mdt_opts.mo_cos != 0;
5225 }
5226
5227 /* type constructor/destructor: mdt_type_init, mdt_type_fini */
5228 LU_TYPE_INIT_FINI(mdt, &mdt_thread_key, &mdt_txn_key);
5229
5230 static struct lu_device_type_operations mdt_device_type_ops = {
5231         .ldto_init = mdt_type_init,
5232         .ldto_fini = mdt_type_fini,
5233
5234         .ldto_start = mdt_type_start,
5235         .ldto_stop  = mdt_type_stop,
5236
5237         .ldto_device_alloc = mdt_device_alloc,
5238         .ldto_device_free  = mdt_device_free,
5239         .ldto_device_fini  = mdt_device_fini
5240 };
5241
5242 static struct lu_device_type mdt_device_type = {
5243         .ldt_tags     = LU_DEVICE_MD,
5244         .ldt_name     = LUSTRE_MDT_NAME,
5245         .ldt_ops      = &mdt_device_type_ops,
5246         .ldt_ctx_tags = LCT_MD_THREAD
5247 };
5248
5249 static struct lu_local_obj_desc mdt_last_recv = {
5250         .llod_name      = LAST_RCVD,
5251         .llod_oid       = MDT_LAST_RECV_OID,
5252         .llod_is_index  = 0,
5253 };
5254
5255 static int __init mdt_mod_init(void)
5256 {
5257         struct lprocfs_static_vars lvars;
5258         int rc;
5259
5260         llo_local_obj_register(&mdt_last_recv);
5261
5262         mdt_num_threads = MDT_NUM_THREADS;
5263         lprocfs_mdt_init_vars(&lvars);
5264         rc = class_register_type(&mdt_obd_device_ops, NULL,
5265                                  lvars.module_vars, LUSTRE_MDT_NAME,
5266                                  &mdt_device_type);
5267
5268         return rc;
5269 }
5270
5271 static void __exit mdt_mod_exit(void)
5272 {
5273         class_unregister_type(LUSTRE_MDT_NAME);
5274 }
5275
5276
5277 #define DEF_HNDL(prefix, base, suffix, flags, opc, fn, fmt)             \
5278 [prefix ## _ ## opc - prefix ## _ ## base] = {                          \
5279         .mh_name    = #opc,                                             \
5280         .mh_fail_id = OBD_FAIL_ ## prefix ## _  ## opc ## suffix,       \
5281         .mh_opc     = prefix ## _  ## opc,                              \
5282         .mh_flags   = flags,                                            \
5283         .mh_act     = fn,                                               \
5284         .mh_fmt     = fmt                                               \
5285 }
5286
5287 #define DEF_MDT_HNDL(flags, name, fn, fmt)                                  \
5288         DEF_HNDL(MDS, GETATTR, _NET, flags, name, fn, fmt)
5289
5290 #define DEF_SEQ_HNDL(flags, name, fn, fmt)                      \
5291         DEF_HNDL(SEQ, QUERY, _NET, flags, name, fn, fmt)
5292
5293 #define DEF_FLD_HNDL(flags, name, fn, fmt)                      \
5294         DEF_HNDL(FLD, QUERY, _NET, flags, name, fn, fmt)
5295 /*
5296  * Request with a format known in advance
5297  */
5298 #define DEF_MDT_HNDL_F(flags, name, fn)                                 \
5299         DEF_HNDL(MDS, GETATTR, _NET, flags, name, fn, &RQF_MDS_ ## name)
5300
5301 #define DEF_SEQ_HNDL_F(flags, name, fn)                                 \
5302         DEF_HNDL(SEQ, QUERY, _NET, flags, name, fn, &RQF_SEQ_ ## name)
5303
5304 #define DEF_FLD_HNDL_F(flags, name, fn)                                 \
5305         DEF_HNDL(FLD, QUERY, _NET, flags, name, fn, &RQF_FLD_ ## name)
5306 /*
5307  * Request with a format we do not yet know
5308  */
5309 #define DEF_MDT_HNDL_0(flags, name, fn)                                 \
5310         DEF_HNDL(MDS, GETATTR, _NET, flags, name, fn, NULL)
5311
5312 static struct mdt_handler mdt_mds_ops[] = {
5313 DEF_MDT_HNDL_F(0,                         CONNECT,      mdt_connect),
5314 DEF_MDT_HNDL_F(0,                         DISCONNECT,   mdt_disconnect),
5315 DEF_MDT_HNDL_F(0,                         SET_INFO,     mdt_set_info),
5316 DEF_MDT_HNDL_F(0           |HABEO_REFERO, GETSTATUS,    mdt_getstatus),
5317 DEF_MDT_HNDL_F(HABEO_CORPUS,              GETATTR,      mdt_getattr),
5318 DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, GETATTR_NAME, mdt_getattr_name),
5319 DEF_MDT_HNDL_F(HABEO_CORPUS,              GETXATTR,     mdt_getxattr),
5320 DEF_MDT_HNDL_F(0           |HABEO_REFERO, STATFS,       mdt_statfs),
5321 DEF_MDT_HNDL_F(0           |MUTABOR,      REINT,        mdt_reint),
5322 DEF_MDT_HNDL_F(HABEO_CORPUS,              CLOSE,        mdt_close),
5323 DEF_MDT_HNDL_F(HABEO_CORPUS,              DONE_WRITING, mdt_done_writing),
5324 DEF_MDT_HNDL_F(0           |HABEO_REFERO, PIN,          mdt_pin),
5325 DEF_MDT_HNDL_0(0,                         SYNC,         mdt_sync),
5326 DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, IS_SUBDIR,    mdt_is_subdir),
5327 #ifdef HAVE_QUOTA_SUPPORT
5328 DEF_MDT_HNDL_F(0,                         QUOTACHECK,   mdt_quotacheck_handle),
5329 DEF_MDT_HNDL_F(0,                         QUOTACTL,     mdt_quotactl_handle)
5330 #endif
5331 };
5332
5333 #define DEF_OBD_HNDL(flags, name, fn)                   \
5334         DEF_HNDL(OBD, PING, _NET, flags, name, fn, NULL)
5335
5336
5337 static struct mdt_handler mdt_obd_ops[] = {
5338         DEF_OBD_HNDL(0, PING,           mdt_obd_ping),
5339         DEF_OBD_HNDL(0, LOG_CANCEL,     mdt_obd_log_cancel),
5340         DEF_OBD_HNDL(0, QC_CALLBACK,    mdt_obd_qc_callback)
5341 };
5342
5343 #define DEF_DLM_HNDL_0(flags, name, fn)                   \
5344         DEF_HNDL(LDLM, ENQUEUE, , flags, name, fn, NULL)
5345 #define DEF_DLM_HNDL_F(flags, name, fn)                   \
5346         DEF_HNDL(LDLM, ENQUEUE, , flags, name, fn, &RQF_LDLM_ ## name)
5347
5348 static struct mdt_handler mdt_dlm_ops[] = {
5349         DEF_DLM_HNDL_F(HABEO_CLAVIS, ENQUEUE,        mdt_enqueue),
5350         DEF_DLM_HNDL_0(HABEO_CLAVIS, CONVERT,        mdt_convert),
5351         DEF_DLM_HNDL_0(0,            BL_CALLBACK,    mdt_bl_callback),
5352         DEF_DLM_HNDL_0(0,            CP_CALLBACK,    mdt_cp_callback)
5353 };
5354
5355 static struct mdt_handler mdt_llog_ops[] = {
5356 };
5357
5358 #define DEF_SEC_CTX_HNDL(name, fn)                      \
5359         DEF_HNDL(SEC_CTX, INIT, _NET, 0, name, fn, NULL)
5360
5361 static struct mdt_handler mdt_sec_ctx_ops[] = {
5362         DEF_SEC_CTX_HNDL(INIT,          mdt_sec_ctx_handle),
5363         DEF_SEC_CTX_HNDL(INIT_CONT,     mdt_sec_ctx_handle),
5364         DEF_SEC_CTX_HNDL(FINI,          mdt_sec_ctx_handle)
5365 };
5366
5367 static struct mdt_opc_slice mdt_regular_handlers[] = {
5368         {
5369                 .mos_opc_start = MDS_GETATTR,
5370                 .mos_opc_end   = MDS_LAST_OPC,
5371                 .mos_hs        = mdt_mds_ops
5372         },
5373         {
5374                 .mos_opc_start = OBD_PING,
5375                 .mos_opc_end   = OBD_LAST_OPC,
5376                 .mos_hs        = mdt_obd_ops
5377         },
5378         {
5379                 .mos_opc_start = LDLM_ENQUEUE,
5380                 .mos_opc_end   = LDLM_LAST_OPC,
5381                 .mos_hs        = mdt_dlm_ops
5382         },
5383         {
5384                 .mos_opc_start = LLOG_ORIGIN_HANDLE_CREATE,
5385                 .mos_opc_end   = LLOG_LAST_OPC,
5386                 .mos_hs        = mdt_llog_ops
5387         },
5388         {
5389                 .mos_opc_start = SEC_CTX_INIT,
5390                 .mos_opc_end   = SEC_LAST_OPC,
5391                 .mos_hs        = mdt_sec_ctx_ops
5392         },
5393         {
5394                 .mos_hs        = NULL
5395         }
5396 };
5397
5398 static struct mdt_handler mdt_readpage_ops[] = {
5399         DEF_MDT_HNDL_F(0,                         CONNECT,  mdt_connect),
5400         DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, READPAGE, mdt_readpage),
5401 #ifdef HAVE_SPLIT_SUPPORT
5402         DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, WRITEPAGE, mdt_writepage),
5403 #endif
5404
5405         /*
5406          * XXX: this is ugly and should be fixed one day, see mdc_close() for
5407          * detailed comments. --umka
5408          */
5409         DEF_MDT_HNDL_F(HABEO_CORPUS,              CLOSE,    mdt_close),
5410         DEF_MDT_HNDL_F(HABEO_CORPUS,              DONE_WRITING,    mdt_done_writing),
5411 };
5412
5413 static struct mdt_opc_slice mdt_readpage_handlers[] = {
5414         {
5415                 .mos_opc_start = MDS_GETATTR,
5416                 .mos_opc_end   = MDS_LAST_OPC,
5417                 .mos_hs        = mdt_readpage_ops
5418         },
5419         {
5420                 .mos_hs        = NULL
5421         }
5422 };
5423
5424 static struct mdt_handler mdt_xmds_ops[] = {
5425         DEF_MDT_HNDL_F(0,                         CONNECT,      mdt_connect),
5426         DEF_MDT_HNDL_F(HABEO_CORPUS             , GETATTR,      mdt_getattr),
5427         DEF_MDT_HNDL_F(0 | MUTABOR              , REINT,        mdt_reint),
5428         DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, IS_SUBDIR,    mdt_is_subdir),
5429 };
5430
5431 static struct mdt_opc_slice mdt_xmds_handlers[] = {
5432         {
5433                 .mos_opc_start = MDS_GETATTR,
5434                 .mos_opc_end   = MDS_LAST_OPC,
5435                 .mos_hs        = mdt_xmds_ops
5436         },
5437         {
5438                 .mos_opc_start = OBD_PING,
5439                 .mos_opc_end   = OBD_LAST_OPC,
5440                 .mos_hs        = mdt_obd_ops
5441         },
5442         {
5443                 .mos_opc_start = SEC_CTX_INIT,
5444                 .mos_opc_end   = SEC_LAST_OPC,
5445                 .mos_hs        = mdt_sec_ctx_ops
5446         },
5447         {
5448                 .mos_hs        = NULL
5449         }
5450 };
5451
5452 static struct mdt_handler mdt_seq_ops[] = {
5453         DEF_SEQ_HNDL_F(0, QUERY, (int (*)(struct mdt_thread_info *))seq_query)
5454 };
5455
5456 static struct mdt_opc_slice mdt_seq_handlers[] = {
5457         {
5458                 .mos_opc_start = SEQ_QUERY,
5459                 .mos_opc_end   = SEQ_LAST_OPC,
5460                 .mos_hs        = mdt_seq_ops
5461         },
5462         {
5463                 .mos_hs        = NULL
5464         }
5465 };
5466
5467 static struct mdt_handler mdt_fld_ops[] = {
5468         DEF_FLD_HNDL_F(0, QUERY, (int (*)(struct mdt_thread_info *))fld_query)
5469 };
5470
5471 static struct mdt_opc_slice mdt_fld_handlers[] = {
5472         {
5473                 .mos_opc_start = FLD_QUERY,
5474                 .mos_opc_end   = FLD_LAST_OPC,
5475                 .mos_hs        = mdt_fld_ops
5476         },
5477         {
5478                 .mos_hs        = NULL
5479         }
5480 };
5481
5482 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
5483 MODULE_DESCRIPTION("Lustre Meta-data Target ("LUSTRE_MDT_NAME")");
5484 MODULE_LICENSE("GPL");
5485
5486 CFS_MODULE_PARM(mdt_num_threads, "ul", ulong, 0444,
5487                 "number of mdt service threads to start");
5488
5489 cfs_module(mdt, "0.2.0", mdt_mod_init, mdt_mod_exit);