Whamcloud - gitweb
b=14230
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/mdt/mdt_handler.c
5  *  Lustre Metadata Target (mdt) request handler
6  *
7  *  Copyright (c) 2006 Cluster File Systems, Inc.
8  *   Author: Peter Braam <braam@clusterfs.com>
9  *   Author: Andreas Dilger <adilger@clusterfs.com>
10  *   Author: Phil Schwan <phil@clusterfs.com>
11  *   Author: Mike Shaver <shaver@clusterfs.com>
12  *   Author: Nikita Danilov <nikita@clusterfs.com>
13  *   Author: Huang Hua <huanghua@clusterfs.com>
14  *   Author: Yury Umanets <umka@clusterfs.com>
15  *
16  *   This file is part of the Lustre file system, http://www.lustre.org
17  *   Lustre is a trademark of Cluster File Systems, Inc.
18  *
19  *   You may have signed or agreed to another license before downloading
20  *   this software.  If so, you are bound by the terms and conditions
21  *   of that agreement, and the following does not apply to you.  See the
22  *   LICENSE file included with this distribution for more information.
23  *
24  *   If you did not agree to a different license, then this copy of Lustre
25  *   is open source software; you can redistribute it and/or modify it
26  *   under the terms of version 2 of the GNU General Public License as
27  *   published by the Free Software Foundation.
28  *
29  *   In either case, Lustre is distributed in the hope that it will be
30  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
31  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
32  *   license text for more details.
33  */
34
35 #ifndef EXPORT_SYMTAB
36 # define EXPORT_SYMTAB
37 #endif
38 #define DEBUG_SUBSYSTEM S_MDS
39
40 #include <linux/module.h>
41 /*
42  * struct OBD_{ALLOC,FREE}*()
43  */
44 #include <obd_support.h>
45 /* struct ptlrpc_request */
46 #include <lustre_net.h>
47 /* struct obd_export */
48 #include <lustre_export.h>
49 /* struct obd_device */
50 #include <obd.h>
51 /* lu2dt_dev() */
52 #include <dt_object.h>
53 #include <lustre_mds.h>
54 #include <lustre_mdt.h>
55 #include "mdt_internal.h"
56 #include <linux/lustre_acl.h>
57 #include <lustre_param.h>
58
59 mdl_mode_t mdt_mdl_lock_modes[] = {
60         [LCK_MINMODE] = MDL_MINMODE,
61         [LCK_EX]      = MDL_EX,
62         [LCK_PW]      = MDL_PW,
63         [LCK_PR]      = MDL_PR,
64         [LCK_CW]      = MDL_CW,
65         [LCK_CR]      = MDL_CR,
66         [LCK_NL]      = MDL_NL,
67         [LCK_GROUP]   = MDL_GROUP
68 };
69
70 ldlm_mode_t mdt_dlm_lock_modes[] = {
71         [MDL_MINMODE] = LCK_MINMODE,
72         [MDL_EX]      = LCK_EX,
73         [MDL_PW]      = LCK_PW,
74         [MDL_PR]      = LCK_PR,
75         [MDL_CW]      = LCK_CW,
76         [MDL_CR]      = LCK_CR,
77         [MDL_NL]      = LCK_NL,
78         [MDL_GROUP]   = LCK_GROUP
79 };
80
81 /*
82  * Initialized in mdt_mod_init().
83  */
84 unsigned long mdt_num_threads;
85
86 /* ptlrpc request handler for MDT. All handlers are
87  * grouped into several slices - struct mdt_opc_slice,
88  * and stored in an array - mdt_handlers[].
89  */
90 struct mdt_handler {
91         /* The name of this handler. */
92         const char *mh_name;
93         /* Fail id for this handler, checked at the beginning of this handler*/
94         int         mh_fail_id;
95         /* Operation code for this handler */
96         __u32       mh_opc;
97         /* flags are listed in enum mdt_handler_flags below. */
98         __u32       mh_flags;
99         /* The actual handler function to execute. */
100         int (*mh_act)(struct mdt_thread_info *info);
101         /* Request format for this request. */
102         const struct req_format *mh_fmt;
103 };
104
105 enum mdt_handler_flags {
106         /*
107          * struct mdt_body is passed in the incoming message, and object
108          * identified by this fid exists on disk.
109          *
110          * "habeo corpus" == "I have a body"
111          */
112         HABEO_CORPUS = (1 << 0),
113         /*
114          * struct ldlm_request is passed in the incoming message.
115          *
116          * "habeo clavis" == "I have a key"
117          */
118         HABEO_CLAVIS = (1 << 1),
119         /*
120          * this request has fixed reply format, so that reply message can be
121          * packed by generic code.
122          *
123          * "habeo refero" == "I have a reply"
124          */
125         HABEO_REFERO = (1 << 2),
126         /*
127          * this request will modify something, so check whether the filesystem
128          * is readonly or not, then return -EROFS to client asap if necessary.
129          *
130          * "mutabor" == "I shall modify"
131          */
132         MUTABOR      = (1 << 3)
133 };
134
135 struct mdt_opc_slice {
136         __u32               mos_opc_start;
137         int                 mos_opc_end;
138         struct mdt_handler *mos_hs;
139 };
140
141 static struct mdt_opc_slice mdt_regular_handlers[];
142 static struct mdt_opc_slice mdt_readpage_handlers[];
143 static struct mdt_opc_slice mdt_xmds_handlers[];
144 static struct mdt_opc_slice mdt_seq_handlers[];
145 static struct mdt_opc_slice mdt_fld_handlers[];
146
147 static struct mdt_device *mdt_dev(struct lu_device *d);
148 static int mdt_regular_handle(struct ptlrpc_request *req);
149 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
150
151 static struct lu_object_operations mdt_obj_ops;
152
153 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
154 {
155         if (!rep)
156                 return 0;
157         return (rep->lock_policy_res1 & flag);
158 }
159
160 void mdt_clear_disposition(struct mdt_thread_info *info,
161                            struct ldlm_reply *rep, int flag)
162 {
163         if (info)
164                 info->mti_opdata &= ~flag;
165         if (rep)
166                 rep->lock_policy_res1 &= ~flag;
167 }
168
169 void mdt_set_disposition(struct mdt_thread_info *info,
170                          struct ldlm_reply *rep, int flag)
171 {
172         if (info)
173                 info->mti_opdata |= flag;
174         if (rep)
175                 rep->lock_policy_res1 |= flag;
176 }
177
178 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
179 {
180         lh->mlh_pdo_hash = 0;
181         lh->mlh_reg_mode = lm;
182         lh->mlh_type = MDT_REG_LOCK;
183 }
184
185 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
186                        const char *name, int namelen)
187 {
188         lh->mlh_reg_mode = lm;
189         lh->mlh_type = MDT_PDO_LOCK;
190
191         if (name != NULL) {
192                 LASSERT(namelen > 0);
193                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
194         } else {
195                 LASSERT(namelen == 0);
196                 lh->mlh_pdo_hash = 0ull;
197         }
198 }
199
200 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
201                               struct mdt_lock_handle *lh)
202 {
203         mdl_mode_t mode;
204         ENTRY;
205
206         /*
207          * Any dir access needs couple of locks:
208          *
209          * 1) on part of dir we gonna take lookup/modify;
210          *
211          * 2) on whole dir to protect it from concurrent splitting and/or to
212          * flush client's cache for readdir().
213          *
214          * so, for a given mode and object this routine decides what lock mode
215          * to use for lock #2:
216          *
217          * 1) if caller's gonna lookup in dir then we need to protect dir from
218          * being splitted only - LCK_CR
219          *
220          * 2) if caller's gonna modify dir then we need to protect dir from
221          * being splitted and to flush cache - LCK_CW
222          *
223          * 3) if caller's gonna modify dir and that dir seems ready for
224          * splitting then we need to protect it from any type of access
225          * (lookup/modify/split) - LCK_EX --bzzz
226          */
227
228         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
229         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
230
231         /*
232          * Ask underlaying level its opinion about preferable PDO lock mode
233          * having access type passed as regular lock mode:
234          *
235          * - MDL_MINMODE means that lower layer does not want to specify lock
236          * mode;
237          *
238          * - MDL_NL means that no PDO lock should be taken. This is used in some
239          * cases. Say, for non-splittable directories no need to use PDO locks
240          * at all.
241          */
242         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
243                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
244
245         if (mode != MDL_MINMODE) {
246                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
247         } else {
248                 /*
249                  * Lower layer does not want to specify locking mode. We do it
250                  * our selves. No special protection is needed, just flush
251                  * client's cache on modification and allow concurrent
252                  * mondification.
253                  */
254                 switch (lh->mlh_reg_mode) {
255                 case LCK_EX:
256                         lh->mlh_pdo_mode = LCK_EX;
257                         break;
258                 case LCK_PR:
259                         lh->mlh_pdo_mode = LCK_CR;
260                         break;
261                 case LCK_PW:
262                         lh->mlh_pdo_mode = LCK_CW;
263                         break;
264                 default:
265                         CERROR("Not expected lock type (0x%x)\n",
266                                (int)lh->mlh_reg_mode);
267                         LBUG();
268                 }
269         }
270
271         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
272         EXIT;
273 }
274
275 static int mdt_getstatus(struct mdt_thread_info *info)
276 {
277         struct mdt_device *mdt  = info->mti_mdt;
278         struct md_device  *next = mdt->mdt_child;
279         struct mdt_body   *repbody;
280         int                rc;
281
282         ENTRY;
283
284         rc = mdt_check_ucred(info);
285         if (rc)
286                 RETURN(err_serious(rc));
287
288         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
289                 RETURN(err_serious(-ENOMEM));
290
291         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
292         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
293         if (rc != 0)
294                 RETURN(rc);
295
296         repbody->valid |= OBD_MD_FLID;
297
298         if (mdt->mdt_opts.mo_mds_capa) {
299                 struct mdt_object  *root;
300                 struct lustre_capa *capa;
301
302                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
303                 if (IS_ERR(root))
304                         RETURN(PTR_ERR(root));
305
306                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
307                 LASSERT(capa);
308                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
309
310                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
311                                  0);
312                 mdt_object_put(info->mti_env, root);
313                 if (rc == 0)
314                         repbody->valid |= OBD_MD_FLMDSCAPA;
315         }
316
317         RETURN(rc);
318 }
319
320 static int mdt_statfs(struct mdt_thread_info *info)
321 {
322         struct md_device  *next  = info->mti_mdt->mdt_child;
323         struct obd_statfs *osfs;
324         int                rc;
325
326         ENTRY;
327
328         /* This will trigger a watchdog timeout */
329         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
330                          (MDT_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1);
331
332         rc = mdt_check_ucred(info);
333         if (rc)
334                 RETURN(err_serious(rc));
335
336         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
337                 rc = err_serious(-ENOMEM);
338         } else {
339                 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
340                 rc = next->md_ops->mdo_statfs(info->mti_env, next,
341                                               &info->mti_u.ksfs);
342                 statfs_pack(osfs, &info->mti_u.ksfs);
343         }
344         RETURN(rc);
345 }
346
347 void mdt_pack_size2body(struct mdt_thread_info *info, struct mdt_object *o)
348 {
349         struct mdt_body *b;
350         struct lu_attr *attr = &info->mti_attr.ma_attr;
351
352         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
353
354         /* Check if Size-on-MDS is enabled. */
355         if ((mdt_conn_flags(info) & OBD_CONNECT_SOM) &&
356             S_ISREG(attr->la_mode) && mdt_sizeonmds_enabled(o)) {
357                 b->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
358                 b->size = attr->la_size;
359                 b->blocks = attr->la_blocks;
360         }
361 }
362
363 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
364                         const struct lu_attr *attr, const struct lu_fid *fid)
365 {
366         /*XXX should pack the reply body according to lu_valid*/
367         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
368                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
369                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
370                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
371
372         if (!S_ISREG(attr->la_mode))
373                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
374
375         b->atime      = attr->la_atime;
376         b->mtime      = attr->la_mtime;
377         b->ctime      = attr->la_ctime;
378         b->mode       = attr->la_mode;
379         b->size       = attr->la_size;
380         b->blocks     = attr->la_blocks;
381         b->uid        = attr->la_uid;
382         b->gid        = attr->la_gid;
383         b->flags      = attr->la_flags;
384         b->nlink      = attr->la_nlink;
385         b->rdev       = attr->la_rdev;
386
387         if (fid) {
388                 b->fid1 = *fid;
389                 b->valid |= OBD_MD_FLID;
390
391                 /* FIXME: these should be fixed when new igif ready.*/
392                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
393                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
394                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
395
396                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
397                                 PFID(fid), b->nlink, b->mode, b->size);
398         }
399
400         if (info)
401                 mdt_body_reverse_idmap(info, b);
402 }
403
404 static inline int mdt_body_has_lov(const struct lu_attr *la,
405                                    const struct mdt_body *body)
406 {
407         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
408                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
409 }
410
411 static int mdt_getattr_internal(struct mdt_thread_info *info,
412                                 struct mdt_object *o)
413 {
414         struct md_object        *next = mdt_object_child(o);
415         const struct mdt_body   *reqbody = info->mti_body;
416         struct ptlrpc_request   *req = mdt_info_req(info);
417         struct mdt_export_data  *med = &req->rq_export->exp_mdt_data;
418         struct md_attr          *ma = &info->mti_attr;
419         struct lu_attr          *la = &ma->ma_attr;
420         struct req_capsule      *pill = info->mti_pill;
421         const struct lu_env     *env = info->mti_env;
422         struct mdt_body         *repbody;
423         struct lu_buf           *buffer = &info->mti_buf;
424         int                     rc;
425         ENTRY;
426
427         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
428                 RETURN(err_serious(-ENOMEM));
429
430         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
431
432         ma->ma_valid = 0;
433
434         rc = mdt_object_exists(o);
435         if (rc < 0) {
436                 /* This object is located on remote node.*/
437                 repbody->fid1 = *mdt_object_fid(o);
438                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
439                 RETURN(0);
440         }
441
442         buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
443         buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
444
445         /* If it is dir object and client require MEA, then we got MEA */
446         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
447             reqbody->valid & OBD_MD_MEA) {
448                 /* Assumption: MDT_MD size is enough for lmv size. */
449                 ma->ma_lmv = buffer->lb_buf;
450                 ma->ma_lmv_size = buffer->lb_len;
451                 ma->ma_need = MA_LMV | MA_INODE;
452         } else {
453                 ma->ma_lmm = buffer->lb_buf;
454                 ma->ma_lmm_size = buffer->lb_len;
455                 ma->ma_need = MA_LOV | MA_INODE;
456         }
457
458         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
459             reqbody->valid & OBD_MD_FLDIREA  &&
460             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
461                 /* get default stripe info for this dir. */
462                 ma->ma_need |= MA_LOV_DEF;
463         }
464         rc = mo_attr_get(env, next, ma);
465         if (unlikely(rc)) {
466                 CERROR("getattr error for "DFID": %d\n",
467                         PFID(mdt_object_fid(o)), rc);
468                 RETURN(rc);
469         }
470
471         if (likely(ma->ma_valid & MA_INODE))
472                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
473         else
474                 RETURN(-EFAULT);
475
476         if (mdt_body_has_lov(la, reqbody)) {
477                 if (ma->ma_valid & MA_LOV) {
478                         LASSERT(ma->ma_lmm_size);
479                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
480                         repbody->eadatasize = ma->ma_lmm_size;
481                         if (S_ISDIR(la->la_mode))
482                                 repbody->valid |= OBD_MD_FLDIREA;
483                         else
484                                 repbody->valid |= OBD_MD_FLEASIZE;
485                 }
486                 if (ma->ma_valid & MA_LMV) {
487                         LASSERT(S_ISDIR(la->la_mode));
488                         repbody->eadatasize = ma->ma_lmv_size;
489                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
490                 }
491                 if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
492                         repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
493                 }
494         } else if (S_ISLNK(la->la_mode) &&
495                    reqbody->valid & OBD_MD_LINKNAME) {
496                 buffer->lb_buf = ma->ma_lmm;
497                 buffer->lb_len = reqbody->eadatasize;
498                 rc = mo_readlink(env, next, buffer);
499                 if (unlikely(rc <= 0)) {
500                         CERROR("readlink failed: %d\n", rc);
501                         rc = -EFAULT;
502                 } else {
503                         repbody->valid |= OBD_MD_LINKNAME;
504                         repbody->eadatasize = rc;
505                         /* NULL terminate */
506                         ((char*)ma->ma_lmm)[rc - 1] = 0;
507                         CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
508                                (char*)ma->ma_lmm, rc);
509                         rc = 0;
510                 }
511         }
512
513         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
514                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
515                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
516                 repbody->valid |= OBD_MD_FLMODEASIZE;
517                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
518                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
519                        repbody->max_cookiesize);
520         }
521
522         if (med->med_rmtclient && (reqbody->valid & OBD_MD_FLRMTPERM)) {
523                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
524
525                 /* mdt_getattr_lock only */
526                 rc = mdt_pack_remote_perm(info, o, buf);
527                 if (rc) {
528                         repbody->valid &= ~OBD_MD_FLRMTPERM;
529                         repbody->aclsize = 0;
530                         RETURN(rc);
531                 } else {
532                         repbody->valid |= OBD_MD_FLRMTPERM;
533                         repbody->aclsize = sizeof(struct mdt_remote_perm);
534                 }
535         }
536 #ifdef CONFIG_FS_POSIX_ACL
537         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
538                  (reqbody->valid & OBD_MD_FLACL)) {
539                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
540                 buffer->lb_len = req_capsule_get_size(pill,
541                                                       &RMF_ACL, RCL_SERVER);
542                 if (buffer->lb_len > 0) {
543                         rc = mo_xattr_get(env, next, buffer,
544                                           XATTR_NAME_ACL_ACCESS);
545                         if (rc < 0) {
546                                 if (rc == -ENODATA) {
547                                         repbody->aclsize = 0;
548                                         repbody->valid |= OBD_MD_FLACL;
549                                         rc = 0;
550                                 } else if (rc == -EOPNOTSUPP) {
551                                         rc = 0;
552                                 } else {
553                                         CERROR("got acl size: %d\n", rc);
554                                 }
555                         } else {
556                                 repbody->aclsize = rc;
557                                 repbody->valid |= OBD_MD_FLACL;
558                                 rc = 0;
559                         }
560                 }
561         }
562 #endif
563
564         if ((reqbody->valid & OBD_MD_FLMDSCAPA) &&
565             info->mti_mdt->mdt_opts.mo_mds_capa) {
566                 struct lustre_capa *capa;
567
568                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
569                 LASSERT(capa);
570                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
571                 rc = mo_capa_get(env, next, capa, 0);
572                 if (rc)
573                         RETURN(rc);
574                 repbody->valid |= OBD_MD_FLMDSCAPA;
575         }
576         RETURN(rc);
577 }
578
579 static int mdt_renew_capa(struct mdt_thread_info *info)
580 {
581         struct mdt_device  *mdt = info->mti_mdt;
582         struct mdt_object  *obj = info->mti_object;
583         struct mdt_body    *body;
584         struct lustre_capa *capa, *c;
585         int rc;
586         ENTRY;
587
588         /* if object doesn't exist, or server has disabled capability,
589          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
590          * flag not set.
591          */
592         if (!obj || !mdt->mdt_opts.mo_mds_capa)
593                 RETURN(0);
594
595         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
596         LASSERT(body != NULL);
597
598         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
599         LASSERT(c);
600
601         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
602         LASSERT(capa);
603
604         *capa = *c;
605         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
606         if (rc == 0)
607                 body->valid |= OBD_MD_FLOSSCAPA;
608         RETURN(rc);
609 }
610
611 static int mdt_getattr(struct mdt_thread_info *info)
612 {
613         struct mdt_object       *obj = info->mti_object;
614         struct req_capsule      *pill = info->mti_pill;
615         struct mdt_body         *reqbody;
616         struct mdt_body         *repbody;
617         mode_t                   mode;
618         int                      md_size;
619         int rc;
620         ENTRY;
621
622         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
623         LASSERT(reqbody);
624
625         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
626                 rc = req_capsule_server_pack(pill);
627                 if (unlikely(rc))
628                         RETURN(err_serious(rc));
629                 rc = mdt_renew_capa(info);
630                 GOTO(out_shrink, rc);
631         }
632
633         LASSERT(obj != NULL);
634         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
635
636         mode = lu_object_attr(&obj->mot_obj.mo_lu);
637         if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
638             (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
639                 md_size = reqbody->eadatasize;
640         else
641                 md_size = info->mti_mdt->mdt_max_mdsize;
642
643         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
644
645         rc = req_capsule_server_pack(pill);
646         if (unlikely(rc != 0))
647                 RETURN(err_serious(rc));
648
649         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
650         LASSERT(repbody != NULL);
651         repbody->eadatasize = 0;
652         repbody->aclsize = 0;
653
654         if (reqbody->valid & OBD_MD_FLRMTPERM)
655                 rc = mdt_init_ucred(info, reqbody);
656         else
657                 rc = mdt_check_ucred(info);
658         if (unlikely(rc))
659                 GOTO(out_shrink, rc);
660
661         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
662         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
663
664         /*
665          * Don't check capability at all, because rename might getattr for
666          * remote obj, and at that time no capability is available.
667          */
668         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
669         rc = mdt_getattr_internal(info, obj);
670         if (reqbody->valid & OBD_MD_FLRMTPERM)
671                 mdt_exit_ucred(info);
672         EXIT;
673 out_shrink:
674         mdt_shrink_reply(info);
675         return rc;
676 }
677
678 static int mdt_is_subdir(struct mdt_thread_info *info)
679 {
680         struct mdt_object     *o = info->mti_object;
681         struct req_capsule    *pill = info->mti_pill;
682         const struct mdt_body *body = info->mti_body;
683         struct mdt_body       *repbody;
684         int                    rc;
685         ENTRY;
686
687         LASSERT(o != NULL);
688
689         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
690
691         /*
692          * We save last checked parent fid to @repbody->fid1 for remote
693          * directory case.
694          */
695         LASSERT(fid_is_sane(&body->fid2));
696         LASSERT(mdt_object_exists(o) > 0);
697         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
698                            &body->fid2, &repbody->fid1);
699         if (rc == 0 || rc == -EREMOTE)
700                 repbody->valid |= OBD_MD_FLID;
701
702         RETURN(rc);
703 }
704
705 static int mdt_raw_lookup(struct mdt_thread_info *info,
706                           struct mdt_object *parent,
707                           const struct lu_name *lname,
708                           struct ldlm_reply *ldlm_rep)
709 {
710         struct md_object *next = mdt_object_child(info->mti_object);
711         const struct mdt_body *reqbody = info->mti_body;
712         struct lu_fid *child_fid = &info->mti_tmp_fid1;
713         struct mdt_body *repbody;
714         int rc;
715         ENTRY;
716
717         if (reqbody->valid != OBD_MD_FLID)
718                 RETURN(0);
719
720         LASSERT(!info->mti_cross_ref);
721
722         /* Only got the fid of this obj by name */
723         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
724                         &info->mti_spec);
725 #if 0
726         /* XXX is raw_lookup possible as intent operation? */
727         if (rc != 0) {
728                 if (rc == -ENOENT)
729                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
730                 RETURN(rc);
731         } else
732                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
733
734         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
735 #endif
736         if (rc == 0) {
737                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
738                 repbody->fid1 = *child_fid;
739                 repbody->valid = OBD_MD_FLID;
740         }
741         RETURN(1);
742 }
743
744 /*
745  * UPDATE lock should be taken against parent, and be release before exit;
746  * child_bits lock should be taken against child, and be returned back:
747  *            (1)normal request should release the child lock;
748  *            (2)intent request will grant the lock to client.
749  */
750 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
751                                  struct mdt_lock_handle *lhc,
752                                  __u64 child_bits,
753                                  struct ldlm_reply *ldlm_rep)
754 {
755         struct ptlrpc_request  *req       = mdt_info_req(info);
756         struct mdt_body        *reqbody   = NULL;
757         struct mdt_object      *parent    = info->mti_object;
758         struct mdt_object      *child;
759         struct md_object       *next      = mdt_object_child(parent);
760         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
761         struct lu_name         *lname     = NULL;
762         const char             *name;
763         int                     namelen   = 0;
764         struct mdt_lock_handle *lhp;
765         struct ldlm_lock       *lock;
766         struct ldlm_res_id     *res_id;
767         int                     is_resent;
768         int                     rc;
769
770         ENTRY;
771
772         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
773         LASSERT(ergo(is_resent,
774                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
775
776         LASSERT(parent != NULL);
777         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
778         if (name == NULL)
779                 RETURN(err_serious(-EFAULT));
780
781         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
782                                        RCL_CLIENT) - 1;
783         LASSERT(namelen >= 0);
784
785         /* XXX: "namelen == 0" is for getattr by fid (OBD_CONNECT_ATTRFID),
786          * otherwise do not allow empty name, that is the name must contain
787          * at least one character and the terminating '\0'*/
788         if (namelen == 0) {
789                 reqbody =req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
790                 LASSERT(fid_is_sane(&reqbody->fid2));
791                 name = NULL;
792
793                 CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
794                        "ldlm_rep = %p\n",
795                        PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
796                        ldlm_rep);
797         } else {
798                 lname = mdt_name(info->mti_env, (char *)name, namelen);
799                 CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
800                        "ldlm_rep = %p\n",
801                        PFID(mdt_object_fid(parent)), name, ldlm_rep);
802         }
803
804         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
805
806         rc = mdt_object_exists(parent);
807         if (unlikely(rc == 0)) {
808                 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
809                                 &parent->mot_obj.mo_lu,
810                                 "Parent doesn't exist!\n");
811                 RETURN(-ESTALE);
812         } else
813                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
814                          PFID(mdt_object_fid(parent)));
815
816         if (lname) {
817                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
818                 if (rc != 0) {
819                         if (rc > 0)
820                                 rc = 0;
821                         RETURN(rc);
822                 }
823         }
824
825         if (info->mti_cross_ref) {
826                 /* Only getattr on the child. Parent is on another node. */
827                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
828                 child = parent;
829                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
830                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
831
832                 if (is_resent) {
833                         /* Do not take lock for resent case. */
834                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
835                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
836                                  lhc->mlh_reg_lh.cookie);
837                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
838                                                 &lock->l_resource->lr_name));
839                         LDLM_LOCK_PUT(lock);
840                         rc = 0;
841                 } else {
842                         mdt_lock_handle_init(lhc);
843                         mdt_lock_reg_init(lhc, LCK_PR);
844
845                         /*
846                          * Object's name is on another MDS, no lookup lock is
847                          * needed here but update is.
848                          */
849                         child_bits &= ~MDS_INODELOCK_LOOKUP;
850                         child_bits |= MDS_INODELOCK_UPDATE;
851
852                         rc = mdt_object_lock(info, child, lhc, child_bits,
853                                              MDT_LOCAL_LOCK);
854                 }
855                 if (rc == 0) {
856                         /* Finally, we can get attr for child. */
857                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
858                                          BYPASS_CAPA);
859                         rc = mdt_getattr_internal(info, child);
860                         if (unlikely(rc != 0))
861                                 mdt_object_unlock(info, child, lhc, 1);
862                 }
863                 RETURN(rc);
864         }
865
866         /* step 1: lock parent */
867         lhp = &info->mti_lh[MDT_LH_PARENT];
868         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
869         rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
870                              MDT_LOCAL_LOCK);
871
872         if (unlikely(rc != 0))
873                 RETURN(rc);
874
875         if (lname) {
876                 /* step 2: lookup child's fid by name */
877                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
878                                 &info->mti_spec);
879
880                 if (rc != 0) {
881                         if (rc == -ENOENT)
882                                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
883                         GOTO(out_parent, rc);
884                 } else
885                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
886         } else {
887                 *child_fid = reqbody->fid2;
888                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
889         }
890
891         /*
892          *step 3: find the child object by fid & lock it.
893          *        regardless if it is local or remote.
894          */
895         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
896
897         if (unlikely(IS_ERR(child)))
898                 GOTO(out_parent, rc = PTR_ERR(child));
899         if (is_resent) {
900                 /* Do not take lock for resent case. */
901                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
902                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
903                          lhc->mlh_reg_lh.cookie);
904
905                 res_id = &lock->l_resource->lr_name;
906                 if (!fid_res_name_eq(mdt_object_fid(child),
907                                     &lock->l_resource->lr_name)) {
908                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
909                                                  &lock->l_resource->lr_name),
910                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
911                                  (unsigned long)res_id->name[0],
912                                  (unsigned long)res_id->name[1],
913                                  (unsigned long)res_id->name[2],
914                                  PFID(mdt_object_fid(parent)));
915                           CWARN("Although resent, but still not get child lock"
916                                 "parent:"DFID" child:"DFID"\n",
917                                 PFID(mdt_object_fid(parent)),
918                                 PFID(mdt_object_fid(child)));
919                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
920                           LDLM_LOCK_PUT(lock);
921                           GOTO(relock, 0);
922                 }
923                 LDLM_LOCK_PUT(lock);
924                 rc = 0;
925         } else {
926 relock:
927                 mdt_lock_handle_init(lhc);
928                 mdt_lock_reg_init(lhc, LCK_PR);
929
930                 if (mdt_object_exists(child) == 0) {
931                         LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
932                                         &child->mot_obj.mo_lu,
933                                         "Object doesn't exist!\n");
934                 }
935                 rc = mdt_object_lock(info, child, lhc, child_bits,
936                                      MDT_CROSS_LOCK);
937
938                 if (unlikely(rc != 0))
939                         GOTO(out_child, rc);
940         }
941
942         /* finally, we can get attr for child. */
943         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
944         rc = mdt_getattr_internal(info, child);
945         if (unlikely(rc != 0)) {
946                 mdt_object_unlock(info, child, lhc, 1);
947         } else {
948                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
949                 if (lock) {
950                         struct mdt_body *repbody;
951                         struct lu_attr *ma;
952
953                         /* Debugging code. */
954                         res_id = &lock->l_resource->lr_name;
955                         LDLM_DEBUG(lock, "Returning lock to client\n");
956                         LASSERTF(fid_res_name_eq(mdt_object_fid(child),
957                                                  &lock->l_resource->lr_name),
958                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
959                                  (unsigned long)res_id->name[0],
960                                  (unsigned long)res_id->name[1],
961                                  (unsigned long)res_id->name[2],
962                                  PFID(mdt_object_fid(child)));
963                         /*
964                          * Pack Size-on-MDS inode attributes to the body if
965                          * update lock is given.
966                          */
967                         repbody = req_capsule_server_get(info->mti_pill,
968                                                          &RMF_MDT_BODY);
969                         ma = &info->mti_attr.ma_attr;
970                         if (lock->l_policy_data.l_inodebits.bits &
971                             MDS_INODELOCK_UPDATE)
972                                 mdt_pack_size2body(info, child);
973                         LDLM_LOCK_PUT(lock);
974                 }
975         }
976         EXIT;
977 out_child:
978         mdt_object_put(info->mti_env, child);
979 out_parent:
980         mdt_object_unlock(info, parent, lhp, 1);
981         return rc;
982 }
983
984 /* normal handler: should release the child lock */
985 static int mdt_getattr_name(struct mdt_thread_info *info)
986 {
987         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
988         struct mdt_body        *reqbody;
989         struct mdt_body        *repbody;
990         int rc;
991         ENTRY;
992
993         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
994         LASSERT(reqbody != NULL);
995         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
996         LASSERT(repbody != NULL);
997
998         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
999         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1000         repbody->eadatasize = 0;
1001         repbody->aclsize = 0;
1002
1003         rc = mdt_init_ucred(info, reqbody);
1004         if (unlikely(rc))
1005                 GOTO(out_shrink, rc);
1006
1007         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1008         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1009                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1010                 lhc->mlh_reg_lh.cookie = 0;
1011         }
1012         mdt_exit_ucred(info);
1013         EXIT;
1014 out_shrink:
1015         mdt_shrink_reply(info);
1016         return rc;
1017 }
1018
1019 static struct lu_device_operations mdt_lu_ops;
1020
1021 static int lu_device_is_mdt(struct lu_device *d)
1022 {
1023         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1024 }
1025
1026 static int mdt_set_info(struct mdt_thread_info *info)
1027 {
1028         struct ptlrpc_request *req = mdt_info_req(info);
1029         char *key;
1030         __u32 *val;
1031         int keylen, rc = 0;
1032         ENTRY;
1033
1034         rc = req_capsule_server_pack(info->mti_pill);
1035         if (rc)
1036                 RETURN(rc);
1037
1038         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1039         if (key == NULL) {
1040                 DEBUG_REQ(D_HA, req, "no set_info key");
1041                 RETURN(-EFAULT);
1042         }
1043
1044         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1045                                       RCL_CLIENT);
1046
1047         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1048         if (val == NULL) {
1049                 DEBUG_REQ(D_HA, req, "no set_info val");
1050                 RETURN(-EFAULT);
1051         }
1052
1053         if (keylen != (sizeof(KEY_READ_ONLY) - 1) ||
1054             memcmp(key, KEY_READ_ONLY, keylen) != 0)
1055                 RETURN(-EINVAL);
1056
1057         req->rq_status = 0;
1058         lustre_msg_set_status(req->rq_repmsg, 0);
1059
1060         spin_lock(&req->rq_export->exp_lock);
1061         if (*val)
1062                 req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1063         else
1064                 req->rq_export->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1065         spin_unlock(&req->rq_export->exp_lock);
1066
1067         RETURN(0);
1068 }
1069
1070 static int mdt_connect(struct mdt_thread_info *info)
1071 {
1072         int rc;
1073         struct ptlrpc_request *req;
1074
1075         req = mdt_info_req(info);
1076         rc = target_handle_connect(req);
1077         if (rc == 0) {
1078                 LASSERT(req->rq_export != NULL);
1079                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1080                 rc = mdt_init_idmap(info);
1081                 if (rc != 0)
1082                         /* if mdt_init_idmap failed, revocation for connect */
1083                         obd_disconnect(class_export_get(req->rq_export));
1084         } else
1085                 rc = err_serious(rc);
1086         return rc;
1087 }
1088
1089 static int mdt_disconnect(struct mdt_thread_info *info)
1090 {
1091         int rc;
1092         ENTRY;
1093
1094         rc = target_handle_disconnect(mdt_info_req(info));
1095         if (rc)
1096                 rc = err_serious(rc);
1097         RETURN(rc);
1098 }
1099
1100 static int mdt_sendpage(struct mdt_thread_info *info,
1101                         struct lu_rdpg *rdpg)
1102 {
1103         struct ptlrpc_request   *req = mdt_info_req(info);
1104         struct ptlrpc_bulk_desc *desc;
1105         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1106         int                      tmpcount;
1107         int                      tmpsize;
1108         int                      i;
1109         int                      rc;
1110         ENTRY;
1111
1112         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1113                                     MDS_BULK_PORTAL);
1114         if (desc == NULL)
1115                 RETURN(-ENOMEM);
1116
1117         for (i = 0, tmpcount = rdpg->rp_count;
1118                 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1119                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1120                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1121         }
1122
1123         LASSERT(desc->bd_nob == rdpg->rp_count);
1124         rc = ptlrpc_start_bulk_transfer(desc);
1125         if (rc)
1126                 GOTO(free_desc, rc);
1127
1128         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1129                 GOTO(abort_bulk, rc);
1130
1131         *lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
1132         rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), lwi);
1133         LASSERT (rc == 0 || rc == -ETIMEDOUT);
1134
1135         if (rc == 0) {
1136                 if (desc->bd_success &&
1137                     desc->bd_nob_transferred == rdpg->rp_count)
1138                         GOTO(free_desc, rc);
1139
1140                 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
1141         }
1142
1143         DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1144                   (rc == -ETIMEDOUT) ? "timeout" : "network error",
1145                   desc->bd_nob_transferred, rdpg->rp_count,
1146                   req->rq_export->exp_client_uuid.uuid,
1147                   req->rq_export->exp_connection->c_remote_uuid.uuid);
1148
1149         class_fail_export(req->rq_export);
1150
1151         EXIT;
1152 abort_bulk:
1153         ptlrpc_abort_bulk(desc);
1154 free_desc:
1155         ptlrpc_free_bulk(desc);
1156         return rc;
1157 }
1158
1159 #ifdef HAVE_SPLIT_SUPPORT
1160 /*
1161  * Retrieve dir entry from the page and insert it to the slave object, actually,
1162  * this should be in osd layer, but since it will not in the final product, so
1163  * just do it here and do not define more moo api anymore for this.
1164  */
1165 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1166                               int size)
1167 {
1168         struct mdt_object *object = info->mti_object;
1169         struct lu_fid *lf = &info->mti_tmp_fid2;
1170         struct md_attr *ma = &info->mti_attr;
1171         struct lu_dirpage *dp;
1172         struct lu_dirent *ent;
1173         int rc = 0, offset = 0;
1174         ENTRY;
1175
1176         /* Make sure we have at least one entry. */
1177         if (size == 0)
1178                 RETURN(-EINVAL);
1179
1180         /*
1181          * Disable trans for this name insert, since it will include many trans
1182          * for this.
1183          */
1184         info->mti_no_need_trans = 1;
1185         /*
1186          * When write_dir_page, no need update parent's ctime,
1187          * and no permission check for name_insert.
1188          */
1189         ma->ma_attr.la_ctime = 0;
1190         ma->ma_attr.la_valid = LA_MODE;
1191         ma->ma_valid = MA_INODE;
1192
1193         kmap(page);
1194         dp = page_address(page);
1195         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1196
1197         for (ent = lu_dirent_start(dp); ent != NULL;
1198              ent = lu_dirent_next(ent)) {
1199                 struct lu_name *lname;
1200                 char *name;
1201
1202                 if (le16_to_cpu(ent->lde_namelen) == 0)
1203                         continue;
1204
1205                 fid_le_to_cpu(lf, &ent->lde_fid);
1206                 if (le32_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1207                         ma->ma_attr.la_mode = S_IFDIR;
1208                 else
1209                         ma->ma_attr.la_mode = 0;
1210                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1211                 if (name == NULL)
1212                         GOTO(out, rc = -ENOMEM);
1213
1214                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1215                 lname = mdt_name(info->mti_env, name,
1216                                  le16_to_cpu(ent->lde_namelen) + 1);
1217                 ma->ma_attr_flags |= MDS_PERM_BYPASS;
1218                 rc = mdo_name_insert(info->mti_env,
1219                                      md_object_next(&object->mot_obj),
1220                                      lname, lf, ma);
1221                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1222                 if (rc) {
1223                         CERROR("Can't insert %*.*s, rc %d\n",
1224                                le16_to_cpu(ent->lde_namelen),
1225                                le16_to_cpu(ent->lde_namelen),
1226                                ent->lde_name, rc);
1227                         GOTO(out, rc);
1228                 }
1229
1230                 offset += lu_dirent_size(ent);
1231                 if (offset >= size)
1232                         break;
1233         }
1234         EXIT;
1235 out:
1236         kunmap(page);
1237         return rc;
1238 }
1239
1240 static int mdt_bulk_timeout(void *data)
1241 {
1242         ENTRY;
1243
1244         CERROR("mdt bulk transfer timeout \n");
1245
1246         RETURN(1);
1247 }
1248
1249 static int mdt_writepage(struct mdt_thread_info *info)
1250 {
1251         struct ptlrpc_request   *req = mdt_info_req(info);
1252         struct mdt_body         *reqbody;
1253         struct l_wait_info      *lwi;
1254         struct ptlrpc_bulk_desc *desc;
1255         struct page             *page;
1256         int                rc;
1257         ENTRY;
1258
1259
1260         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1261         if (reqbody == NULL)
1262                 RETURN(err_serious(-EFAULT));
1263
1264         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1265         if (desc == NULL)
1266                 RETURN(err_serious(-ENOMEM));
1267
1268         /* allocate the page for the desc */
1269         page = cfs_alloc_page(CFS_ALLOC_STD);
1270         if (page == NULL)
1271                 GOTO(desc_cleanup, rc = -ENOMEM);
1272
1273         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1274                (int)reqbody->size, (int)reqbody->nlink);
1275
1276         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1277                               (int)reqbody->nlink);
1278
1279         /*
1280          * Check if client was evicted while we were doing i/o before touching
1281          * network.
1282          */
1283         OBD_ALLOC_PTR(lwi);
1284         if (!lwi)
1285                 GOTO(cleanup_page, rc = -ENOMEM);
1286
1287         if (desc->bd_export->exp_failed)
1288                 rc = -ENOTCONN;
1289         else
1290                 rc = ptlrpc_start_bulk_transfer (desc);
1291         if (rc == 0) {
1292                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ,
1293                                             mdt_bulk_timeout, desc);
1294                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1295                                   desc->bd_export->exp_failed, lwi);
1296                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1297                 if (rc == -ETIMEDOUT) {
1298                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1299                         ptlrpc_abort_bulk(desc);
1300                 } else if (desc->bd_export->exp_failed) {
1301                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1302                         rc = -ENOTCONN;
1303                         ptlrpc_abort_bulk(desc);
1304                 } else if (!desc->bd_success ||
1305                            desc->bd_nob_transferred != desc->bd_nob) {
1306                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1307                                   desc->bd_success ?
1308                                   "truncated" : "network error on",
1309                                   desc->bd_nob_transferred, desc->bd_nob);
1310                         /* XXX should this be a different errno? */
1311                         rc = -ETIMEDOUT;
1312                 }
1313         } else {
1314                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1315         }
1316         if (rc)
1317                 GOTO(cleanup_lwi, rc);
1318         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1319
1320 cleanup_lwi:
1321         OBD_FREE_PTR(lwi);
1322 cleanup_page:
1323         cfs_free_page(page);
1324 desc_cleanup:
1325         ptlrpc_free_bulk(desc);
1326         RETURN(rc);
1327 }
1328 #endif
1329
1330 static int mdt_readpage(struct mdt_thread_info *info)
1331 {
1332         struct mdt_object *object = info->mti_object;
1333         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1334         struct mdt_body   *reqbody;
1335         struct mdt_body   *repbody;
1336         int                rc;
1337         int                i;
1338         ENTRY;
1339
1340         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1341                 RETURN(err_serious(-ENOMEM));
1342
1343         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1344         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1345         if (reqbody == NULL || repbody == NULL)
1346                 RETURN(err_serious(-EFAULT));
1347
1348         /*
1349          * prepare @rdpg before calling lower layers and transfer itself. Here
1350          * reqbody->size contains offset of where to start to read and
1351          * reqbody->nlink contains number bytes to read.
1352          */
1353         rdpg->rp_hash = reqbody->size;
1354         if ((__u64)rdpg->rp_hash != reqbody->size) {
1355                 CERROR("Invalid hash: %#llx != %#llx\n",
1356                        (__u64)rdpg->rp_hash, reqbody->size);
1357                 RETURN(-EFAULT);
1358         }
1359         rdpg->rp_count  = reqbody->nlink;
1360         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1361         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1362         if (rdpg->rp_pages == NULL)
1363                 RETURN(-ENOMEM);
1364
1365         for (i = 0; i < rdpg->rp_npages; ++i) {
1366                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1367                 if (rdpg->rp_pages[i] == NULL)
1368                         GOTO(free_rdpg, rc = -ENOMEM);
1369         }
1370
1371         /* call lower layers to fill allocated pages with directory data */
1372         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1373         if (rc)
1374                 GOTO(free_rdpg, rc);
1375
1376         /* send pages to client */
1377         rc = mdt_sendpage(info, rdpg);
1378
1379         EXIT;
1380 free_rdpg:
1381
1382         for (i = 0; i < rdpg->rp_npages; i++)
1383                 if (rdpg->rp_pages[i] != NULL)
1384                         cfs_free_page(rdpg->rp_pages[i]);
1385         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1386
1387         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1388                 RETURN(0);
1389
1390         return rc;
1391 }
1392
1393 static int mdt_reint_internal(struct mdt_thread_info *info,
1394                               struct mdt_lock_handle *lhc,
1395                               __u32 op)
1396 {
1397         struct req_capsule      *pill = info->mti_pill;
1398         struct mdt_device       *mdt = info->mti_mdt;
1399         struct mdt_body         *repbody;
1400         int                      rc = 0;
1401         ENTRY;
1402
1403         /* pack reply */
1404         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1405                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1406                                      mdt->mdt_max_mdsize);
1407         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1408                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1409                                      mdt->mdt_max_cookiesize);
1410
1411         rc = req_capsule_server_pack(pill);
1412         if (rc != 0) {
1413                 CERROR("Can't pack response, rc %d\n", rc);
1414                 RETURN(err_serious(rc));
1415         }
1416
1417         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1418                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1419                 LASSERT(repbody);
1420                 repbody->eadatasize = 0;
1421                 repbody->aclsize = 0;
1422         }
1423
1424         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1425                 GOTO(out_shrink, rc = err_serious(-EFAULT));
1426
1427         rc = mdt_reint_unpack(info, op);
1428         if (rc != 0) {
1429                 CERROR("Can't unpack reint, rc %d\n", rc);
1430                 GOTO(out_shrink, rc = err_serious(rc));
1431         }
1432
1433         rc = mdt_init_ucred_reint(info);
1434         if (rc)
1435                 GOTO(out_shrink, rc);
1436
1437         rc = mdt_fix_attr_ucred(info, op);
1438         if (rc != 0)
1439                 GOTO(out_ucred, rc = err_serious(rc));
1440
1441         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1442                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1443                 GOTO(out_ucred, rc);
1444         }
1445         rc = mdt_reint_rec(info, lhc);
1446         EXIT;
1447 out_ucred:
1448         mdt_exit_ucred(info);
1449 out_shrink:
1450         mdt_shrink_reply(info);
1451         return rc;
1452 }
1453
1454 static long mdt_reint_opcode(struct mdt_thread_info *info,
1455                              const struct req_format **fmt)
1456 {
1457         struct mdt_rec_reint *rec;
1458         long opc;
1459
1460         opc = err_serious(-EFAULT);
1461         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1462         if (rec != NULL) {
1463                 opc = rec->rr_opcode;
1464                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1465                 if (opc < REINT_MAX && fmt[opc] != NULL)
1466                         req_capsule_extend(info->mti_pill, fmt[opc]);
1467                 else {
1468                         CERROR("Unsupported opc: %ld\n", opc);
1469                         opc = err_serious(opc);
1470                 }
1471         }
1472         return opc;
1473 }
1474
1475 static int mdt_reint(struct mdt_thread_info *info)
1476 {
1477         long opc;
1478         int  rc;
1479
1480         static const struct req_format *reint_fmts[REINT_MAX] = {
1481                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1482                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1483                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1484                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1485                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1486                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1487                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1488         };
1489
1490         ENTRY;
1491
1492         if (OBD_FAIL_CHECK_RESET(OBD_FAIL_MDS_REINT_NET,
1493                                  OBD_FAIL_MDS_REINT_NET)) {
1494                 info->mti_fail_id = OBD_FAIL_MDS_REINT_NET;
1495                 RETURN(0);
1496         }
1497
1498         opc = mdt_reint_opcode(info, reint_fmts);
1499         if (opc >= 0) {
1500                 /*
1501                  * No lock possible here from client to pass it to reint code
1502                  * path.
1503                  */
1504                 rc = mdt_reint_internal(info, NULL, opc);
1505         } else {
1506                 rc = opc;
1507         }
1508
1509         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1510         RETURN(rc);
1511 }
1512
1513 /* TODO these two methods not available now. */
1514
1515 /* this should sync the whole device */
1516 static int mdt_device_sync(struct mdt_thread_info *info)
1517 {
1518         return 0;
1519 }
1520
1521 /* this should sync this object */
1522 static int mdt_object_sync(struct mdt_thread_info *info)
1523 {
1524         return 0;
1525 }
1526
1527 static int mdt_sync(struct mdt_thread_info *info)
1528 {
1529         struct req_capsule *pill = info->mti_pill;
1530         struct mdt_body *body;
1531         int rc;
1532         ENTRY;
1533
1534         /* The fid may be zero, so we req_capsule_set manually */
1535         req_capsule_set(pill, &RQF_MDS_SYNC);
1536
1537         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1538         if (body == NULL)
1539                 RETURN(err_serious(-EINVAL));
1540
1541         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1542                 RETURN(err_serious(-ENOMEM));
1543
1544         if (fid_seq(&body->fid1) == 0) {
1545                 /* sync the whole device */
1546                 rc = req_capsule_server_pack(pill);
1547                 if (rc == 0)
1548                         rc = mdt_device_sync(info);
1549                 else
1550                         rc = err_serious(rc);
1551         } else {
1552                 /* sync an object */
1553                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1554                 if (rc == 0) {
1555                         rc = mdt_object_sync(info);
1556                         if (rc == 0) {
1557                                 struct md_object *next;
1558                                 const struct lu_fid *fid;
1559                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1560
1561                                 next = mdt_object_child(info->mti_object);
1562                                 info->mti_attr.ma_need = MA_INODE;
1563                                 info->mti_attr.ma_valid = 0;
1564                                 rc = mo_attr_get(info->mti_env, next,
1565                                                  &info->mti_attr);
1566                                 if (rc == 0) {
1567                                         body = req_capsule_server_get(pill,
1568                                                                 &RMF_MDT_BODY);
1569                                         fid = mdt_object_fid(info->mti_object);
1570                                         mdt_pack_attr2body(info, body, la, fid);
1571                                 }
1572                         }
1573                 } else
1574                         rc = err_serious(rc);
1575         }
1576         RETURN(rc);
1577 }
1578
1579 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1580 {
1581         return err_serious(-EOPNOTSUPP);
1582 }
1583
1584 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1585 {
1586         return err_serious(-EOPNOTSUPP);
1587 }
1588
1589 /*
1590  * OBD PING and other handlers.
1591  */
1592 static int mdt_obd_ping(struct mdt_thread_info *info)
1593 {
1594         int rc;
1595         ENTRY;
1596
1597         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1598
1599         rc = target_handle_ping(mdt_info_req(info));
1600         if (rc < 0)
1601                 rc = err_serious(rc);
1602         RETURN(rc);
1603 }
1604
1605 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1606 {
1607         return err_serious(-EOPNOTSUPP);
1608 }
1609
1610 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1611 {
1612         return err_serious(-EOPNOTSUPP);
1613 }
1614
1615
1616 /*
1617  * DLM handlers.
1618  */
1619 static struct ldlm_callback_suite cbs = {
1620         .lcs_completion = ldlm_server_completion_ast,
1621         .lcs_blocking   = ldlm_server_blocking_ast,
1622         .lcs_glimpse    = NULL
1623 };
1624
1625 static int mdt_enqueue(struct mdt_thread_info *info)
1626 {
1627         struct ptlrpc_request *req;
1628         __u64 req_bits;
1629         int rc;
1630
1631         /*
1632          * info->mti_dlm_req already contains swapped and (if necessary)
1633          * converted dlm request.
1634          */
1635         LASSERT(info->mti_dlm_req != NULL);
1636
1637         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE)) {
1638                 info->mti_fail_id = OBD_FAIL_LDLM_ENQUEUE;
1639                 return 0;
1640         }
1641
1642         req = mdt_info_req(info);
1643
1644         /*
1645          * Lock without inodebits makes no sense and will oops later in
1646          * ldlm. Let's check it now to see if we have wrong lock from client or
1647          * bits get corrupted somewhere in mdt_intent_policy().
1648          */
1649         req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1650         /* This is disabled because we need to support liblustre flock.
1651          * LASSERT(req_bits != 0);
1652          */
1653
1654         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
1655                                   req, info->mti_dlm_req, &cbs);
1656         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
1657         return rc ? err_serious(rc) : req->rq_status;
1658 }
1659
1660 static int mdt_convert(struct mdt_thread_info *info)
1661 {
1662         int rc;
1663         struct ptlrpc_request *req;
1664
1665         LASSERT(info->mti_dlm_req);
1666         req = mdt_info_req(info);
1667         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
1668         return rc ? err_serious(rc) : req->rq_status;
1669 }
1670
1671 static int mdt_bl_callback(struct mdt_thread_info *info)
1672 {
1673         CERROR("bl callbacks should not happen on MDS\n");
1674         LBUG();
1675         return err_serious(-EOPNOTSUPP);
1676 }
1677
1678 static int mdt_cp_callback(struct mdt_thread_info *info)
1679 {
1680         CERROR("cp callbacks should not happen on MDS\n");
1681         LBUG();
1682         return err_serious(-EOPNOTSUPP);
1683 }
1684
1685 /*
1686  * sec context handlers
1687  */
1688 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
1689 {
1690         int rc;
1691
1692         rc = mdt_handle_idmap(info);
1693
1694         if (unlikely(rc)) {
1695                 struct ptlrpc_request *req = mdt_info_req(info);
1696                 __u32                  opc;
1697
1698                 opc = lustre_msg_get_opc(req->rq_reqmsg);
1699                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
1700                         sptlrpc_svc_ctx_invalidate(req);
1701         }
1702
1703         return rc;
1704 }
1705
1706 static struct mdt_object *mdt_obj(struct lu_object *o)
1707 {
1708         LASSERT(lu_device_is_mdt(o->lo_dev));
1709         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
1710 }
1711
1712 struct mdt_object *mdt_object_find(const struct lu_env *env,
1713                                    struct mdt_device *d,
1714                                    const struct lu_fid *f)
1715 {
1716         struct lu_object *o;
1717         struct mdt_object *m;
1718         ENTRY;
1719
1720         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
1721         o = lu_object_find(env, d->mdt_md_dev.md_lu_dev.ld_site, f);
1722         if (unlikely(IS_ERR(o)))
1723                 m = (struct mdt_object *)o;
1724         else
1725                 m = mdt_obj(o);
1726         RETURN(m);
1727 }
1728
1729 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
1730                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
1731 {
1732         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
1733         ldlm_policy_data_t *policy = &info->mti_policy;
1734         struct ldlm_res_id *res_id = &info->mti_res_id;
1735         int rc;
1736         ENTRY;
1737
1738         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
1739         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
1740         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
1741         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
1742
1743         if (mdt_object_exists(o) < 0) {
1744                 if (locality == MDT_CROSS_LOCK) {
1745                         /* cross-ref object fix */
1746                         ibits &= ~MDS_INODELOCK_UPDATE;
1747                         ibits |= MDS_INODELOCK_LOOKUP;
1748                 } else {
1749                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
1750                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
1751                 }
1752                 /* No PDO lock on remote object */
1753                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
1754         }
1755
1756         memset(policy, 0, sizeof(*policy));
1757         fid_build_reg_res_name(mdt_object_fid(o), res_id);
1758
1759         /*
1760          * Take PDO lock on whole directory and build correct @res_id for lock
1761          * on part of directory.
1762          */
1763         if (lh->mlh_pdo_hash != 0) {
1764                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
1765                 mdt_lock_pdo_mode(info, o, lh);
1766                 if (lh->mlh_pdo_mode != LCK_NL) {
1767                         /*
1768                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
1769                          * is never going to be sent to client and we do not
1770                          * want it slowed down due to possible cancels.
1771                          */
1772                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
1773                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
1774                                           policy, res_id, LDLM_FL_ATOMIC_CB);
1775                         if (unlikely(rc))
1776                                 RETURN(rc);
1777                 }
1778
1779                 /*
1780                  * Finish res_id initializing by name hash marking patr of
1781                  * directory which is taking modification.
1782                  */
1783                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
1784         }
1785
1786         policy->l_inodebits.bits = ibits;
1787
1788         /*
1789          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
1790          * going to be sent to client. If it is - mdt_intent_policy() path will
1791          * fix it up and turns FL_LOCAL flag off.
1792          */
1793         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
1794                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB);
1795
1796         if (rc)
1797                 GOTO(out, rc);
1798
1799         if (lh->mlh_type == MDT_PDO_LOCK) {
1800                 /* check for exists after object is locked */
1801                 if (mdt_object_exists(o) == 0) {
1802                         /* Non-existent object shouldn't have PDO lock */
1803                         rc = -ESTALE;
1804                 } else {
1805                         /* Non-dir object shouldn't have PDO lock */
1806                         LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
1807                 }
1808         }
1809 out:
1810         if (rc)
1811                 mdt_object_unlock(info, o, lh, 1);
1812
1813
1814         RETURN(rc);
1815 }
1816
1817 /*
1818  * Just call ldlm_lock_decref() if decref, else we only call ptlrpc_save_lock()
1819  * to save this lock in req.  when transaction committed, req will be released,
1820  * and lock will, too.
1821  */
1822 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
1823                        struct mdt_lock_handle *lh, int decref)
1824 {
1825         struct ptlrpc_request *req = mdt_info_req(info);
1826         ENTRY;
1827
1828         if (lustre_handle_is_used(&lh->mlh_pdo_lh)) {
1829                 /* Do not save PDO locks to request, just decref. */
1830                 mdt_fid_unlock(&lh->mlh_pdo_lh,
1831                                lh->mlh_pdo_mode);
1832                 lh->mlh_pdo_lh.cookie = 0ull;
1833         }
1834
1835         if (lustre_handle_is_used(&lh->mlh_reg_lh)) {
1836                 if (decref) {
1837                         mdt_fid_unlock(&lh->mlh_reg_lh,
1838                                        lh->mlh_reg_mode);
1839                 } else {
1840                         ptlrpc_save_lock(req, &lh->mlh_reg_lh,
1841                                          lh->mlh_reg_mode);
1842                 }
1843                 lh->mlh_reg_lh.cookie = 0ull;
1844         }
1845
1846         EXIT;
1847 }
1848
1849 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
1850                                         const struct lu_fid *f,
1851                                         struct mdt_lock_handle *lh,
1852                                         __u64 ibits)
1853 {
1854         struct mdt_object *o;
1855
1856         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
1857         if (!IS_ERR(o)) {
1858                 int rc;
1859
1860                 rc = mdt_object_lock(info, o, lh, ibits,
1861                                      MDT_LOCAL_LOCK);
1862                 if (rc != 0) {
1863                         mdt_object_put(info->mti_env, o);
1864                         o = ERR_PTR(rc);
1865                 }
1866         }
1867         return o;
1868 }
1869
1870 void mdt_object_unlock_put(struct mdt_thread_info * info,
1871                            struct mdt_object * o,
1872                            struct mdt_lock_handle *lh,
1873                            int decref)
1874 {
1875         mdt_object_unlock(info, o, lh, decref);
1876         mdt_object_put(info->mti_env, o);
1877 }
1878
1879 static struct mdt_handler *mdt_handler_find(__u32 opc,
1880                                             struct mdt_opc_slice *supported)
1881 {
1882         struct mdt_opc_slice *s;
1883         struct mdt_handler   *h;
1884
1885         h = NULL;
1886         for (s = supported; s->mos_hs != NULL; s++) {
1887                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
1888                         h = s->mos_hs + (opc - s->mos_opc_start);
1889                         if (likely(h->mh_opc != 0))
1890                                 LASSERT(h->mh_opc == opc);
1891                         else
1892                                 h = NULL; /* unsupported opc */
1893                         break;
1894                 }
1895         }
1896         return h;
1897 }
1898
1899 static int mdt_lock_resname_compat(struct mdt_device *m,
1900                                    struct ldlm_request *req)
1901 {
1902         /* XXX something... later. */
1903         return 0;
1904 }
1905
1906 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
1907 {
1908         /* XXX something... later. */
1909         return 0;
1910 }
1911
1912 /*
1913  * Generic code handling requests that have struct mdt_body passed in:
1914  *
1915  *  - extract mdt_body from request and save it in @info, if present;
1916  *
1917  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
1918  *  @info;
1919  *
1920  *  - if HABEO_CORPUS flag is set for this request type check whether object
1921  *  actually exists on storage (lu_object_exists()).
1922  *
1923  */
1924 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
1925 {
1926         const struct mdt_body    *body;
1927         struct mdt_object        *obj;
1928         const struct lu_env      *env;
1929         struct req_capsule       *pill;
1930         int                       rc;
1931         ENTRY;
1932
1933         env = info->mti_env;
1934         pill = info->mti_pill;
1935
1936         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1937         if (body == NULL)
1938                 RETURN(-EFAULT);
1939
1940         if (!(body->valid & OBD_MD_FLID))
1941                 RETURN(0);
1942
1943         if (!fid_is_sane(&body->fid1)) {
1944                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
1945                 RETURN(-EINVAL);
1946         }
1947
1948         /*
1949          * Do not get size or any capa fields before we check that request
1950          * contains capa actually. There are some requests which do not, for
1951          * instance MDS_IS_SUBDIR.
1952          */
1953         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
1954             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
1955                 mdt_set_capainfo(info, 0, &body->fid1,
1956                                  req_capsule_client_get(pill, &RMF_CAPA1));
1957
1958         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
1959         if (!IS_ERR(obj)) {
1960                 if ((flags & HABEO_CORPUS) &&
1961                     !mdt_object_exists(obj)) {
1962                         mdt_object_put(env, obj);
1963                         /* for capability renew ENOENT will be handled in
1964                          * mdt_renew_capa */
1965                         if (body->valid & OBD_MD_FLOSSCAPA)
1966                                 rc = 0;
1967                         else
1968                                 rc = -ENOENT;
1969                 } else {
1970                         info->mti_object = obj;
1971                         rc = 0;
1972                 }
1973         } else
1974                 rc = PTR_ERR(obj);
1975
1976         RETURN(rc);
1977 }
1978
1979 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
1980 {
1981         struct req_capsule *pill = info->mti_pill;
1982         int rc;
1983         ENTRY;
1984
1985         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
1986                 rc = mdt_body_unpack(info, flags);
1987         else
1988                 rc = 0;
1989
1990         if (rc == 0 && (flags & HABEO_REFERO)) {
1991                 struct mdt_device *mdt = info->mti_mdt;
1992
1993                 /* Pack reply. */
1994                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1995                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1996                                              mdt->mdt_max_mdsize);
1997                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1998                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1999                                              mdt->mdt_max_cookiesize);
2000
2001                 rc = req_capsule_server_pack(pill);
2002         }
2003         RETURN(rc);
2004 }
2005
2006 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2007 {
2008         struct md_device *next = m->mdt_child;
2009
2010         return next->md_ops->mdo_init_capa_ctxt(env, next,
2011                                                 m->mdt_opts.mo_mds_capa,
2012                                                 m->mdt_capa_timeout,
2013                                                 m->mdt_capa_alg,
2014                                                 m->mdt_capa_keys);
2015 }
2016
2017 /*
2018  * Invoke handler for this request opc. Also do necessary preprocessing
2019  * (according to handler ->mh_flags), and post-processing (setting of
2020  * ->last_{xid,committed}).
2021  */
2022 static int mdt_req_handle(struct mdt_thread_info *info,
2023                           struct mdt_handler *h, struct ptlrpc_request *req)
2024 {
2025         int   rc, serious = 0;
2026         __u32 flags;
2027
2028         ENTRY;
2029
2030         LASSERT(h->mh_act != NULL);
2031         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2032         LASSERT(current->journal_info == NULL);
2033
2034         /*
2035          * Mask out OBD_FAIL_ONCE, because that will stop
2036          * correct handling of failed req later in ldlm due to doing
2037          * obd_fail_loc |= OBD_FAIL_ONCE without actually
2038          * correct actions like it is done in target_send_reply_msg().
2039          */
2040         if (h->mh_fail_id != 0) {
2041                 /*
2042                  * Set to info->mti_fail_id to handler fail_id, it will be used
2043                  * later, and better than use default fail_id.
2044                  */
2045                 if (OBD_FAIL_CHECK_RESET(h->mh_fail_id && OBD_FAIL_MASK_LOC,
2046                                          h->mh_fail_id & ~OBD_FAILED)) {
2047                         info->mti_fail_id = h->mh_fail_id;
2048                         RETURN(0);
2049                 }
2050         }
2051
2052         rc = 0;
2053         flags = h->mh_flags;
2054         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2055
2056         if (h->mh_fmt != NULL) {
2057                 req_capsule_set(info->mti_pill, h->mh_fmt);
2058                 rc = mdt_unpack_req_pack_rep(info, flags);
2059         }
2060
2061         if (rc == 0 && flags & MUTABOR &&
2062             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2063                 /* should it be rq_status? */
2064                 rc = -EROFS;
2065
2066         if (rc == 0 && flags & HABEO_CLAVIS) {
2067                 struct ldlm_request *dlm_req;
2068
2069                 LASSERT(h->mh_fmt != NULL);
2070
2071                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2072                 if (dlm_req != NULL) {
2073                         if (info->mti_mdt->mdt_opts.mo_compat_resname)
2074                                 rc = mdt_lock_resname_compat(info->mti_mdt,
2075                                                              dlm_req);
2076                         info->mti_dlm_req = dlm_req;
2077                 } else {
2078                         rc = -EFAULT;
2079                 }
2080         }
2081
2082         /* capability setting changed via /proc, needs reinitialize ctxt */
2083         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2084                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2085                 info->mti_mdt->mdt_capa_conf = 0;
2086         }
2087
2088         if (likely(rc == 0)) {
2089                 /*
2090                  * Process request, there can be two types of rc:
2091                  * 1) errors with msg unpack/pack, other failures outside the
2092                  * operation itself. This is counted as serious errors;
2093                  * 2) errors during fs operation, should be placed in rq_status
2094                  * only
2095                  */
2096                 rc = h->mh_act(info);
2097                 serious = is_serious(rc);
2098                 rc = clear_serious(rc);
2099         } else
2100                 serious = 1;
2101
2102         req->rq_status = rc;
2103
2104         /*
2105          * ELDLM_* codes which > 0 should be in rq_status only as well as
2106          * all non-serious errors.
2107          */
2108         if (rc > 0 || !serious)
2109                 rc = 0;
2110
2111         LASSERT(current->journal_info == NULL);
2112
2113         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2114             info->mti_mdt->mdt_opts.mo_compat_resname) {
2115                 struct ldlm_reply *dlmrep;
2116
2117                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2118                 if (dlmrep != NULL)
2119                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2120         }
2121
2122         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2123         if (likely(rc == 0 && h->mh_opc != MDS_DISCONNECT))
2124                 target_committed_to_req(req);
2125
2126         if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
2127                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2128                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2129                 LBUG();
2130         }
2131
2132         RETURN(rc);
2133 }
2134
2135 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2136 {
2137         lh->mlh_type = MDT_NUL_LOCK;
2138         lh->mlh_reg_lh.cookie = 0ull;
2139         lh->mlh_reg_mode = LCK_MINMODE;
2140         lh->mlh_pdo_lh.cookie = 0ull;
2141         lh->mlh_pdo_mode = LCK_MINMODE;
2142 }
2143
2144 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2145 {
2146         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2147         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2148 }
2149
2150 /*
2151  * Initialize fields of struct mdt_thread_info. Other fields are left in
2152  * uninitialized state, because it's too expensive to zero out whole
2153  * mdt_thread_info (> 1K) on each request arrival.
2154  */
2155 static void mdt_thread_info_init(struct ptlrpc_request *req,
2156                                  struct mdt_thread_info *info)
2157 {
2158         int i;
2159         struct md_capainfo *ci;
2160
2161         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2162         info->mti_pill = &req->rq_pill;
2163
2164         /* lock handle */
2165         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2166                 mdt_lock_handle_init(&info->mti_lh[i]);
2167
2168         /* mdt device: it can be NULL while CONNECT */
2169         if (req->rq_export) {
2170                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2171                 info->mti_exp = req->rq_export;
2172         } else
2173                 info->mti_mdt = NULL;
2174         info->mti_env = req->rq_svc_thread->t_env;
2175         ci = md_capainfo(info->mti_env);
2176         memset(ci, 0, sizeof *ci);
2177
2178         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2179         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2180
2181         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2182         info->mti_body = NULL;
2183         info->mti_object = NULL;
2184         info->mti_dlm_req = NULL;
2185         info->mti_has_trans = 0;
2186         info->mti_no_need_trans = 0;
2187         info->mti_cross_ref = 0;
2188         info->mti_opdata = 0;
2189
2190         /* To not check for split by default. */
2191         info->mti_spec.sp_ck_split = 0;
2192 }
2193
2194 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2195 {
2196         int i;
2197
2198         req_capsule_fini(info->mti_pill);
2199         if (info->mti_object != NULL) {
2200                 mdt_object_put(info->mti_env, info->mti_object);
2201                 info->mti_object = NULL;
2202         }
2203         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2204                 mdt_lock_handle_fini(&info->mti_lh[i]);
2205         info->mti_env = NULL;
2206 }
2207
2208 /* mds/handler.c */
2209 extern int mds_filter_recovery_request(struct ptlrpc_request *req,
2210                                        struct obd_device *obd, int *process);
2211 /*
2212  * Handle recovery. Return:
2213  *        +1: continue request processing;
2214  *       -ve: abort immediately with the given error code;
2215  *         0: send reply with error code in req->rq_status;
2216  */
2217 static int mdt_recovery(struct mdt_thread_info *info)
2218 {
2219         struct ptlrpc_request *req = mdt_info_req(info);
2220         int recovering;
2221         struct obd_device *obd;
2222
2223         ENTRY;
2224
2225         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2226         case MDS_CONNECT:
2227         case SEC_CTX_INIT:
2228         case SEC_CTX_INIT_CONT:
2229         case SEC_CTX_FINI:
2230                 {
2231 #if 0
2232                         int rc;
2233
2234                         rc = mdt_handle_idmap(info);
2235                         if (rc)
2236                                 RETURN(rc);
2237                         else
2238 #endif
2239                                 RETURN(+1);
2240                 }
2241         }
2242
2243         if (unlikely(req->rq_export == NULL)) {
2244                 CERROR("operation %d on unconnected MDS from %s\n",
2245                        lustre_msg_get_opc(req->rq_reqmsg),
2246                        libcfs_id2str(req->rq_peer));
2247                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2248                  * mds_A will get -ENOTCONN(especially for ping req),
2249                  * which will cause that mds_A deactive timeout, then when
2250                  * mds_A cleanup, the cleanup process will be suspended since
2251                  * deactive timeout is not zero.
2252                  */
2253                 req->rq_status = -ENOTCONN;
2254                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2255                 RETURN(0);
2256         }
2257
2258         /* sanity check: if the xid matches, the request must be marked as a
2259          * resent or replayed */
2260         if (req_xid_is_last(req)) {
2261                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2262                       (MSG_RESENT | MSG_REPLAY))) {
2263                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2264                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2265                                   lustre_msg_get_flags(req->rq_reqmsg));
2266                         LBUG();
2267                         req->rq_status = -ENOTCONN;
2268                         RETURN(-ENOTCONN);
2269                 }
2270         }
2271
2272         /* else: note the opposite is not always true; a RESENT req after a
2273          * failover will usually not match the last_xid, since it was likely
2274          * never committed. A REPLAYed request will almost never match the
2275          * last xid, however it could for a committed, but still retained,
2276          * open. */
2277
2278         obd = req->rq_export->exp_obd;
2279
2280         /* Check for aborted recovery... */
2281         spin_lock_bh(&obd->obd_processing_task_lock);
2282         recovering = obd->obd_recovering;
2283         spin_unlock_bh(&obd->obd_processing_task_lock);
2284         if (unlikely(recovering)) {
2285                 int rc;
2286                 int should_process;
2287                 DEBUG_REQ(D_INFO, req, "Got new replay");
2288                 rc = mds_filter_recovery_request(req, obd, &should_process);
2289                 if (rc != 0 || !should_process)
2290                         RETURN(rc);
2291                 else if (should_process < 0) {
2292                         req->rq_status = should_process;
2293                         rc = ptlrpc_error(req);
2294                         RETURN(rc);
2295                 }
2296         }
2297         RETURN(+1);
2298 }
2299
2300 static int mdt_reply(struct ptlrpc_request *req, int rc,
2301                      struct mdt_thread_info *info)
2302 {
2303         ENTRY;
2304
2305 #if 0
2306         if (req->rq_reply_state == NULL && rc == 0) {
2307                 req->rq_status = rc;
2308                 lustre_pack_reply(req, 1, NULL, NULL);
2309         }
2310 #endif
2311         target_send_reply(req, rc, info->mti_fail_id);
2312         RETURN(0);
2313 }
2314
2315 /* mds/handler.c */
2316 extern int mds_msg_check_version(struct lustre_msg *msg);
2317
2318 static int mdt_handle0(struct ptlrpc_request *req,
2319                        struct mdt_thread_info *info,
2320                        struct mdt_opc_slice *supported)
2321 {
2322         struct mdt_handler *h;
2323         struct lustre_msg  *msg;
2324         int                 rc;
2325
2326         ENTRY;
2327
2328         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2329                 RETURN(0);
2330
2331         LASSERT(current->journal_info == NULL);
2332
2333         msg = req->rq_reqmsg;
2334         rc = mds_msg_check_version(msg);
2335         if (likely(rc == 0)) {
2336                 rc = mdt_recovery(info);
2337                 if (likely(rc == +1)) {
2338                         h = mdt_handler_find(lustre_msg_get_opc(msg),
2339                                              supported);
2340                         if (likely(h != NULL)) {
2341                                 rc = mdt_req_handle(info, h, req);
2342                                 rc = mdt_reply(req, rc, info);
2343                         } else {
2344                                 CERROR("The unsupported opc: 0x%x\n", lustre_msg_get_opc(msg) );
2345                                 req->rq_status = -ENOTSUPP;
2346                                 rc = ptlrpc_error(req);
2347                                 RETURN(rc);
2348                         }
2349                 }
2350         } else
2351                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2352         RETURN(rc);
2353 }
2354
2355 /*
2356  * MDT handler function called by ptlrpc service thread when request comes.
2357  *
2358  * XXX common "target" functionality should be factored into separate module
2359  * shared by mdt, ost and stand-alone services like fld.
2360  */
2361 static int mdt_handle_common(struct ptlrpc_request *req,
2362                              struct mdt_opc_slice *supported)
2363 {
2364         struct lu_env          *env;
2365         struct mdt_thread_info *info;
2366         int                     rc;
2367         ENTRY;
2368
2369         env = req->rq_svc_thread->t_env;
2370         LASSERT(env != NULL);
2371         LASSERT(env->le_ses != NULL);
2372         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2373         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2374         LASSERT(info != NULL);
2375
2376         mdt_thread_info_init(req, info);
2377
2378         rc = mdt_handle0(req, info, supported);
2379
2380         mdt_thread_info_fini(info);
2381         RETURN(rc);
2382 }
2383
2384 /*
2385  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
2386  * as well.
2387  */
2388 int mdt_recovery_handle(struct ptlrpc_request *req)
2389 {
2390         int rc;
2391         ENTRY;
2392
2393         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2394         case FLD_QUERY:
2395                 rc = mdt_handle_common(req, mdt_fld_handlers);
2396                 break;
2397         case SEQ_QUERY:
2398                 rc = mdt_handle_common(req, mdt_seq_handlers);
2399                 break;
2400         default:
2401                 rc = mdt_handle_common(req, mdt_regular_handlers);
2402                 break;
2403         }
2404
2405         RETURN(rc);
2406 }
2407
2408 static int mdt_regular_handle(struct ptlrpc_request *req)
2409 {
2410         return mdt_handle_common(req, mdt_regular_handlers);
2411 }
2412
2413 static int mdt_readpage_handle(struct ptlrpc_request *req)
2414 {
2415         return mdt_handle_common(req, mdt_readpage_handlers);
2416 }
2417
2418 static int mdt_xmds_handle(struct ptlrpc_request *req)
2419 {
2420         return mdt_handle_common(req, mdt_xmds_handlers);
2421 }
2422
2423 static int mdt_mdsc_handle(struct ptlrpc_request *req)
2424 {
2425         return mdt_handle_common(req, mdt_seq_handlers);
2426 }
2427
2428 static int mdt_mdss_handle(struct ptlrpc_request *req)
2429 {
2430         return mdt_handle_common(req, mdt_seq_handlers);
2431 }
2432
2433 static int mdt_dtss_handle(struct ptlrpc_request *req)
2434 {
2435         return mdt_handle_common(req, mdt_seq_handlers);
2436 }
2437
2438 static int mdt_fld_handle(struct ptlrpc_request *req)
2439 {
2440         return mdt_handle_common(req, mdt_fld_handlers);
2441 }
2442
2443 enum mdt_it_code {
2444         MDT_IT_OPEN,
2445         MDT_IT_OCREAT,
2446         MDT_IT_CREATE,
2447         MDT_IT_GETATTR,
2448         MDT_IT_READDIR,
2449         MDT_IT_LOOKUP,
2450         MDT_IT_UNLINK,
2451         MDT_IT_TRUNC,
2452         MDT_IT_GETXATTR,
2453         MDT_IT_NR
2454 };
2455
2456 static int mdt_intent_getattr(enum mdt_it_code opcode,
2457                               struct mdt_thread_info *info,
2458                               struct ldlm_lock **,
2459                               int);
2460 static int mdt_intent_reint(enum mdt_it_code opcode,
2461                             struct mdt_thread_info *info,
2462                             struct ldlm_lock **,
2463                             int);
2464
2465 static struct mdt_it_flavor {
2466         const struct req_format *it_fmt;
2467         __u32                    it_flags;
2468         int                    (*it_act)(enum mdt_it_code ,
2469                                          struct mdt_thread_info *,
2470                                          struct ldlm_lock **,
2471                                          int);
2472         long                     it_reint;
2473 } mdt_it_flavor[] = {
2474         [MDT_IT_OPEN]     = {
2475                 .it_fmt   = &RQF_LDLM_INTENT,
2476                 /*.it_flags = HABEO_REFERO,*/
2477                 .it_flags = 0,
2478                 .it_act   = mdt_intent_reint,
2479                 .it_reint = REINT_OPEN
2480         },
2481         [MDT_IT_OCREAT]   = {
2482                 .it_fmt   = &RQF_LDLM_INTENT,
2483                 .it_flags = MUTABOR,
2484                 .it_act   = mdt_intent_reint,
2485                 .it_reint = REINT_OPEN
2486         },
2487         [MDT_IT_CREATE]   = {
2488                 .it_fmt   = &RQF_LDLM_INTENT,
2489                 .it_flags = MUTABOR,
2490                 .it_act   = mdt_intent_reint,
2491                 .it_reint = REINT_CREATE
2492         },
2493         [MDT_IT_GETATTR]  = {
2494                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
2495                 .it_flags = HABEO_REFERO,
2496                 .it_act   = mdt_intent_getattr
2497         },
2498         [MDT_IT_READDIR]  = {
2499                 .it_fmt   = NULL,
2500                 .it_flags = 0,
2501                 .it_act   = NULL
2502         },
2503         [MDT_IT_LOOKUP]   = {
2504                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
2505                 .it_flags = HABEO_REFERO,
2506                 .it_act   = mdt_intent_getattr
2507         },
2508         [MDT_IT_UNLINK]   = {
2509                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
2510                 .it_flags = MUTABOR,
2511                 .it_act   = NULL,
2512                 .it_reint = REINT_UNLINK
2513         },
2514         [MDT_IT_TRUNC]    = {
2515                 .it_fmt   = NULL,
2516                 .it_flags = MUTABOR,
2517                 .it_act   = NULL
2518         },
2519         [MDT_IT_GETXATTR] = {
2520                 .it_fmt   = NULL,
2521                 .it_flags = 0,
2522                 .it_act   = NULL
2523         }
2524 };
2525
2526 int mdt_intent_lock_replace(struct mdt_thread_info *info,
2527                             struct ldlm_lock **lockp,
2528                             struct ldlm_lock *new_lock,
2529                             struct mdt_lock_handle *lh,
2530                             int flags)
2531 {
2532         struct ptlrpc_request  *req = mdt_info_req(info);
2533         struct ldlm_lock       *lock = *lockp;
2534
2535         /*
2536          * Get new lock only for cases when possible resent did not find any
2537          * lock.
2538          */
2539         if (new_lock == NULL)
2540                 new_lock = ldlm_handle2lock(&lh->mlh_reg_lh);
2541
2542         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
2543                 lh->mlh_reg_lh.cookie = 0;
2544                 RETURN(0);
2545         }
2546
2547         LASSERTF(new_lock != NULL,
2548                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
2549
2550         /*
2551          * If we've already given this lock to a client once, then we should
2552          * have no readers or writers.  Otherwise, we should have one reader
2553          * _or_ writer ref (which will be zeroed below) before returning the
2554          * lock to a client.
2555          */
2556         if (new_lock->l_export == req->rq_export) {
2557                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2558         } else {
2559                 LASSERT(new_lock->l_export == NULL);
2560                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2561         }
2562
2563         *lockp = new_lock;
2564
2565         if (new_lock->l_export == req->rq_export) {
2566                 /*
2567                  * Already gave this to the client, which means that we
2568                  * reconstructed a reply.
2569                  */
2570                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2571                         MSG_RESENT);
2572                 lh->mlh_reg_lh.cookie = 0;
2573                 RETURN(ELDLM_LOCK_REPLACED);
2574         }
2575
2576         /* This lock might already be given to the client by an resent req,
2577          * in this case we should return ELDLM_LOCK_ABORTED,
2578          * so we should check led_held_locks here, but it will affect
2579          * performance, FIXME
2580          */
2581         /* Fixup the lock to be given to the client */
2582         lock_res_and_lock(new_lock);
2583         new_lock->l_readers = 0;
2584         new_lock->l_writers = 0;
2585
2586         new_lock->l_export = class_export_get(req->rq_export);
2587         spin_lock(&req->rq_export->exp_ldlm_data.led_lock);
2588         list_add(&new_lock->l_export_chain,
2589                  &new_lock->l_export->exp_ldlm_data.led_held_locks);
2590         spin_unlock(&req->rq_export->exp_ldlm_data.led_lock);
2591
2592         new_lock->l_blocking_ast = lock->l_blocking_ast;
2593         new_lock->l_completion_ast = lock->l_completion_ast;
2594         new_lock->l_remote_handle = lock->l_remote_handle;
2595         new_lock->l_flags &= ~LDLM_FL_LOCAL;
2596
2597         unlock_res_and_lock(new_lock);
2598         LDLM_LOCK_PUT(new_lock);
2599         lh->mlh_reg_lh.cookie = 0;
2600
2601         RETURN(ELDLM_LOCK_REPLACED);
2602 }
2603
2604 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
2605                                     struct ldlm_lock *new_lock,
2606                                     struct ldlm_lock **old_lock,
2607                                     struct mdt_lock_handle *lh)
2608 {
2609         struct ptlrpc_request  *req = mdt_info_req(info);
2610         struct obd_export      *exp = req->rq_export;
2611         struct lustre_handle    remote_hdl;
2612         struct ldlm_request    *dlmreq;
2613         struct list_head       *iter;
2614
2615         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2616                 return;
2617
2618         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2619         remote_hdl = dlmreq->lock_handle[0];
2620
2621         spin_lock(&exp->exp_ldlm_data.led_lock);
2622         list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
2623                 struct ldlm_lock *lock;
2624                 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
2625                 if (lock == new_lock)
2626                         continue;
2627                 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
2628                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
2629                         lh->mlh_reg_mode = lock->l_granted_mode;
2630
2631                         LDLM_DEBUG(lock, "restoring lock cookie");
2632                         DEBUG_REQ(D_DLMTRACE, req,
2633                                   "restoring lock cookie "LPX64,
2634                                   lh->mlh_reg_lh.cookie);
2635                         if (old_lock)
2636                                 *old_lock = LDLM_LOCK_GET(lock);
2637                         spin_unlock(&exp->exp_ldlm_data.led_lock);
2638                         return;
2639                 }
2640         }
2641         spin_unlock(&exp->exp_ldlm_data.led_lock);
2642
2643         /*
2644          * If the xid matches, then we know this is a resent request, and allow
2645          * it. (It's probably an OPEN, for which we don't send a lock.
2646          */
2647         if (req_xid_is_last(req))
2648                 return;
2649
2650         /*
2651          * This remote handle isn't enqueued, so we never received or processed
2652          * this request.  Clear MSG_RESENT, because it can be handled like any
2653          * normal request now.
2654          */
2655         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2656
2657         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
2658                   remote_hdl.cookie);
2659 }
2660
2661 static int mdt_intent_getattr(enum mdt_it_code opcode,
2662                               struct mdt_thread_info *info,
2663                               struct ldlm_lock **lockp,
2664                               int flags)
2665 {
2666         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
2667         struct ldlm_lock       *new_lock = NULL;
2668         __u64                   child_bits;
2669         struct ldlm_reply      *ldlm_rep;
2670         struct ptlrpc_request  *req;
2671         struct mdt_body        *reqbody;
2672         struct mdt_body        *repbody;
2673         int                     rc;
2674         ENTRY;
2675
2676         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
2677         LASSERT(reqbody);
2678
2679         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
2680         LASSERT(repbody);
2681
2682         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
2683         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
2684         repbody->eadatasize = 0;
2685         repbody->aclsize = 0;
2686
2687         switch (opcode) {
2688         case MDT_IT_LOOKUP:
2689                 child_bits = MDS_INODELOCK_LOOKUP;
2690                 break;
2691         case MDT_IT_GETATTR:
2692                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
2693                 break;
2694         default:
2695                 CERROR("Unhandled till now");
2696                 GOTO(out_shrink, rc = -EINVAL);
2697         }
2698
2699         rc = mdt_init_ucred(info, reqbody);
2700         if (rc)
2701                 GOTO(out_shrink, rc);
2702
2703         req = info->mti_pill->rc_req;
2704         ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2705         mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD);
2706
2707         /* Get lock from request for possible resent case. */
2708         mdt_intent_fixup_resent(info, *lockp, &new_lock, lhc);
2709
2710         ldlm_rep->lock_policy_res2 =
2711                 mdt_getattr_name_lock(info, lhc, child_bits, ldlm_rep);
2712
2713         if (mdt_get_disposition(ldlm_rep, DISP_LOOKUP_NEG))
2714                 ldlm_rep->lock_policy_res2 = 0;
2715         if (!mdt_get_disposition(ldlm_rep, DISP_LOOKUP_POS) ||
2716             ldlm_rep->lock_policy_res2) {
2717                 lhc->mlh_reg_lh.cookie = 0ull;
2718                 GOTO(out_ucred, rc = ELDLM_LOCK_ABORTED);
2719         }
2720
2721         rc = mdt_intent_lock_replace(info, lockp, new_lock, lhc, flags);
2722         EXIT;
2723 out_ucred:
2724         mdt_exit_ucred(info);
2725 out_shrink:
2726         mdt_shrink_reply(info);
2727         return rc;
2728 }
2729
2730 static int mdt_intent_reint(enum mdt_it_code opcode,
2731                             struct mdt_thread_info *info,
2732                             struct ldlm_lock **lockp,
2733                             int flags)
2734 {
2735         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
2736         struct ldlm_reply      *rep = NULL;
2737         long                    opc;
2738         int                     rc;
2739
2740         static const struct req_format *intent_fmts[REINT_MAX] = {
2741                 [REINT_CREATE]  = &RQF_LDLM_INTENT_CREATE,
2742                 [REINT_OPEN]    = &RQF_LDLM_INTENT_OPEN
2743         };
2744
2745         ENTRY;
2746
2747         opc = mdt_reint_opcode(info, intent_fmts);
2748         if (opc < 0)
2749                 RETURN(opc);
2750
2751         if (mdt_it_flavor[opcode].it_reint != opc) {
2752                 CERROR("Reint code %ld doesn't match intent: %d\n",
2753                        opc, opcode);
2754                 RETURN(err_serious(-EPROTO));
2755         }
2756
2757         /* Get lock from request for possible resent case. */
2758         mdt_intent_fixup_resent(info, *lockp, NULL, lhc);
2759
2760         rc = mdt_reint_internal(info, lhc, opc);
2761
2762         /* Check whether the reply has been packed successfully. */
2763         if (mdt_info_req(info)->rq_repmsg != NULL)
2764                 rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2765         if (rep == NULL)
2766                 RETURN(err_serious(-EFAULT));
2767
2768         /* MDC expects this in any case */
2769         if (rc != 0)
2770                 mdt_set_disposition(info, rep, DISP_LOOKUP_EXECD);
2771
2772         /* Cross-ref case, the lock should be returned to the client */
2773         if (rc == -EREMOTE) {
2774                 LASSERT(lustre_handle_is_used(&lhc->mlh_reg_lh));
2775                 rep->lock_policy_res2 = 0;
2776                 rc = mdt_intent_lock_replace(info, lockp, NULL, lhc, flags);
2777                 RETURN(rc);
2778         }
2779         rep->lock_policy_res2 = clear_serious(rc);
2780
2781         lhc->mlh_reg_lh.cookie = 0ull;
2782         rc = ELDLM_LOCK_ABORTED;
2783         RETURN(rc);
2784 }
2785
2786 static int mdt_intent_code(long itcode)
2787 {
2788         int rc;
2789
2790         switch(itcode) {
2791         case IT_OPEN:
2792                 rc = MDT_IT_OPEN;
2793                 break;
2794         case IT_OPEN|IT_CREAT:
2795                 rc = MDT_IT_OCREAT;
2796                 break;
2797         case IT_CREAT:
2798                 rc = MDT_IT_CREATE;
2799                 break;
2800         case IT_READDIR:
2801                 rc = MDT_IT_READDIR;
2802                 break;
2803         case IT_GETATTR:
2804                 rc = MDT_IT_GETATTR;
2805                 break;
2806         case IT_LOOKUP:
2807                 rc = MDT_IT_LOOKUP;
2808                 break;
2809         case IT_UNLINK:
2810                 rc = MDT_IT_UNLINK;
2811                 break;
2812         case IT_TRUNC:
2813                 rc = MDT_IT_TRUNC;
2814                 break;
2815         case IT_GETXATTR:
2816                 rc = MDT_IT_GETXATTR;
2817                 break;
2818         default:
2819                 CERROR("Unknown intent opcode: %ld\n", itcode);
2820                 rc = -EINVAL;
2821                 break;
2822         }
2823         return rc;
2824 }
2825
2826 static int mdt_intent_opc(long itopc, struct mdt_thread_info *info,
2827                           struct ldlm_lock **lockp, int flags)
2828 {
2829         struct req_capsule   *pill;
2830         struct mdt_it_flavor *flv;
2831         int opc;
2832         int rc;
2833         ENTRY;
2834
2835         opc = mdt_intent_code(itopc);
2836         if (opc < 0)
2837                 RETURN(-EINVAL);
2838
2839         pill = info->mti_pill;
2840         flv  = &mdt_it_flavor[opc];
2841
2842         if (flv->it_fmt != NULL)
2843                 req_capsule_extend(pill, flv->it_fmt);
2844
2845         rc = mdt_unpack_req_pack_rep(info, flv->it_flags);
2846         if (rc == 0) {
2847                 struct ptlrpc_request *req = mdt_info_req(info);
2848                 if (flv->it_flags & MUTABOR &&
2849                     req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2850                         RETURN(-EROFS);
2851         }
2852         if (rc == 0 && flv->it_act != NULL) {
2853                 /* execute policy */
2854                 rc = flv->it_act(opc, info, lockp, flags);
2855         } else {
2856                 rc = -EOPNOTSUPP;
2857         }
2858         RETURN(rc);
2859 }
2860
2861 static int mdt_intent_policy(struct ldlm_namespace *ns,
2862                              struct ldlm_lock **lockp, void *req_cookie,
2863                              ldlm_mode_t mode, int flags, void *data)
2864 {
2865         struct mdt_thread_info *info;
2866         struct ptlrpc_request  *req  =  req_cookie;
2867         struct ldlm_intent     *it;
2868         struct req_capsule     *pill;
2869         int rc;
2870
2871         ENTRY;
2872
2873         LASSERT(req != NULL);
2874
2875         info = lu_context_key_get(&req->rq_svc_thread->t_env->le_ctx,
2876                                   &mdt_thread_key);
2877         LASSERT(info != NULL);
2878         pill = info->mti_pill;
2879         LASSERT(pill->rc_req == req);
2880
2881         if (req->rq_reqmsg->lm_bufcount > DLM_INTENT_IT_OFF) {
2882                 req_capsule_extend(pill, &RQF_LDLM_INTENT);
2883                 it = req_capsule_client_get(pill, &RMF_LDLM_INTENT);
2884                 if (it != NULL) {
2885                         const struct ldlm_request *dlmreq;
2886                         __u64 req_bits;
2887 #if 0
2888                         struct ldlm_lock       *lock = *lockp;
2889
2890                         LDLM_DEBUG(lock, "intent policy opc: %s\n",
2891                                    ldlm_it2str(it->opc));
2892 #endif
2893
2894                         rc = mdt_intent_opc(it->opc, info, lockp, flags);
2895                         if (rc == 0)
2896                                 rc = ELDLM_OK;
2897
2898                         /*
2899                          * Lock without inodebits makes no sense and will oops
2900                          * later in ldlm. Let's check it now to see if we have
2901                          * wrong lock from client or bits get corrupted
2902                          * somewhere in mdt_intent_opc().
2903                          */
2904                         dlmreq = info->mti_dlm_req;
2905                         req_bits = dlmreq->lock_desc.l_policy_data.l_inodebits.bits;
2906                         LASSERT(req_bits != 0);
2907
2908                 } else
2909                         rc = err_serious(-EFAULT);
2910         } else {
2911                 /* No intent was provided */
2912                 LASSERT(pill->rc_fmt == &RQF_LDLM_ENQUEUE);
2913                 rc = req_capsule_server_pack(pill);
2914                 if (rc)
2915                         rc = err_serious(rc);
2916         }
2917         RETURN(rc);
2918 }
2919
2920 /*
2921  * Seq wrappers
2922  */
2923 static void mdt_seq_adjust(const struct lu_env *env,
2924                           struct mdt_device *m, int lost)
2925 {
2926         struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site;
2927         struct lu_range out;
2928         ENTRY;
2929
2930         LASSERT(ls && ls->ls_server_seq);
2931         LASSERT(lost >= 0);
2932         /* get extra seq from seq_server, moving it's range up */
2933         while (lost-- > 0) {
2934                 seq_server_alloc_meta(ls->ls_server_seq, NULL, &out, env);
2935         }
2936         EXIT;
2937 }
2938
2939 static int mdt_seq_fini(const struct lu_env *env,
2940                         struct mdt_device *m)
2941 {
2942         struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site;
2943         ENTRY;
2944
2945         if (ls && ls->ls_server_seq) {
2946                 seq_server_fini(ls->ls_server_seq, env);
2947                 OBD_FREE_PTR(ls->ls_server_seq);
2948                 ls->ls_server_seq = NULL;
2949         }
2950
2951         if (ls && ls->ls_control_seq) {
2952                 seq_server_fini(ls->ls_control_seq, env);
2953                 OBD_FREE_PTR(ls->ls_control_seq);
2954                 ls->ls_control_seq = NULL;
2955         }
2956
2957         if (ls && ls->ls_client_seq) {
2958                 seq_client_fini(ls->ls_client_seq);
2959                 OBD_FREE_PTR(ls->ls_client_seq);
2960                 ls->ls_client_seq = NULL;
2961         }
2962
2963         RETURN(0);
2964 }
2965
2966 static int mdt_seq_init(const struct lu_env *env,
2967                         const char *uuid,
2968                         struct mdt_device *m)
2969 {
2970         struct lu_site *ls;
2971         char *prefix;
2972         int rc;
2973         ENTRY;
2974
2975         ls = m->mdt_md_dev.md_lu_dev.ld_site;
2976
2977         /*
2978          * This is sequence-controller node. Init seq-controller server on local
2979          * MDT.
2980          */
2981         if (ls->ls_node_id == 0) {
2982                 LASSERT(ls->ls_control_seq == NULL);
2983
2984                 OBD_ALLOC_PTR(ls->ls_control_seq);
2985                 if (ls->ls_control_seq == NULL)
2986                         RETURN(-ENOMEM);
2987
2988                 rc = seq_server_init(ls->ls_control_seq,
2989                                      m->mdt_bottom, uuid,
2990                                      LUSTRE_SEQ_CONTROLLER,
2991                                      env);
2992
2993                 if (rc)
2994                         GOTO(out_seq_fini, rc);
2995
2996                 OBD_ALLOC_PTR(ls->ls_client_seq);
2997                 if (ls->ls_client_seq == NULL)
2998                         GOTO(out_seq_fini, rc = -ENOMEM);
2999
3000                 OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
3001                 if (prefix == NULL) {
3002                         OBD_FREE_PTR(ls->ls_client_seq);
3003                         GOTO(out_seq_fini, rc = -ENOMEM);
3004                 }
3005
3006                 snprintf(prefix, MAX_OBD_NAME + 5, "ctl-%s",
3007                          uuid);
3008
3009                 /*
3010                  * Init seq-controller client after seq-controller server is
3011                  * ready. Pass ls->ls_control_seq to it for direct talking.
3012                  */
3013                 rc = seq_client_init(ls->ls_client_seq, NULL,
3014                                      LUSTRE_SEQ_METADATA, prefix,
3015                                      ls->ls_control_seq);
3016                 OBD_FREE(prefix, MAX_OBD_NAME + 5);
3017
3018                 if (rc)
3019                         GOTO(out_seq_fini, rc);
3020         }
3021
3022         /* Init seq-server on local MDT */
3023         LASSERT(ls->ls_server_seq == NULL);
3024
3025         OBD_ALLOC_PTR(ls->ls_server_seq);
3026         if (ls->ls_server_seq == NULL)
3027                 GOTO(out_seq_fini, rc = -ENOMEM);
3028
3029         rc = seq_server_init(ls->ls_server_seq,
3030                              m->mdt_bottom, uuid,
3031                              LUSTRE_SEQ_SERVER,
3032                              env);
3033         if (rc)
3034                 GOTO(out_seq_fini, rc = -ENOMEM);
3035
3036         /* Assign seq-controller client to local seq-server. */
3037         if (ls->ls_node_id == 0) {
3038                 LASSERT(ls->ls_client_seq != NULL);
3039
3040                 rc = seq_server_set_cli(ls->ls_server_seq,
3041                                         ls->ls_client_seq,
3042                                         env);
3043         }
3044
3045         EXIT;
3046 out_seq_fini:
3047         if (rc)
3048                 mdt_seq_fini(env, m);
3049
3050         return rc;
3051 }
3052 /*
3053  * Init client sequence manager which is used by local MDS to talk to sequence
3054  * controller on remote node.
3055  */
3056 static int mdt_seq_init_cli(const struct lu_env *env,
3057                             struct mdt_device *m,
3058                             struct lustre_cfg *cfg)
3059 {
3060         struct lu_site    *ls = m->mdt_md_dev.md_lu_dev.ld_site;
3061         struct obd_device *mdc;
3062         struct obd_uuid   *uuidp, *mdcuuidp;
3063         char              *uuid_str, *mdc_uuid_str;
3064         int                rc;
3065         int                index;
3066         struct mdt_thread_info *info;
3067         char *p, *index_string = lustre_cfg_string(cfg, 2);
3068         ENTRY;
3069
3070         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3071         uuidp = &info->mti_u.uuid[0];
3072         mdcuuidp = &info->mti_u.uuid[1];
3073
3074         LASSERT(index_string);
3075
3076         index = simple_strtol(index_string, &p, 10);
3077         if (*p) {
3078                 CERROR("Invalid index in lustre_cgf, offset 2\n");
3079                 RETURN(-EINVAL);
3080         }
3081
3082         /* check if this is adding the first MDC and controller is not yet
3083          * initialized. */
3084         if (index != 0 || ls->ls_client_seq)
3085                 RETURN(0);
3086
3087         uuid_str = lustre_cfg_string(cfg, 1);
3088         mdc_uuid_str = lustre_cfg_string(cfg, 4);
3089         obd_str2uuid(uuidp, uuid_str);
3090         obd_str2uuid(mdcuuidp, mdc_uuid_str);
3091
3092         mdc = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, mdcuuidp);
3093         if (!mdc) {
3094                 CERROR("can't find controller MDC by uuid %s\n",
3095                        uuid_str);
3096                 rc = -ENOENT;
3097         } else if (!mdc->obd_set_up) {
3098                 CERROR("target %s not set up\n", mdc->obd_name);
3099                 rc = -EINVAL;
3100         } else {
3101                 LASSERT(ls->ls_control_exp);
3102                 OBD_ALLOC_PTR(ls->ls_client_seq);
3103                 if (ls->ls_client_seq != NULL) {
3104                         char *prefix;
3105
3106                         OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
3107                         if (!prefix)
3108                                 RETURN(-ENOMEM);
3109
3110                         snprintf(prefix, MAX_OBD_NAME + 5, "ctl-%s",
3111                                  mdc->obd_name);
3112
3113                         rc = seq_client_init(ls->ls_client_seq,
3114                                              ls->ls_control_exp,
3115                                              LUSTRE_SEQ_METADATA,
3116                                              prefix, NULL);
3117                         OBD_FREE(prefix, MAX_OBD_NAME + 5);
3118                 } else
3119                         rc = -ENOMEM;
3120
3121                 if (rc)
3122                         RETURN(rc);
3123
3124                 LASSERT(ls->ls_server_seq != NULL);
3125                 rc = seq_server_set_cli(ls->ls_server_seq, ls->ls_client_seq,
3126                                         env);
3127         }
3128
3129         RETURN(rc);
3130 }
3131
3132 static void mdt_seq_fini_cli(struct mdt_device *m)
3133 {
3134         struct lu_site *ls;
3135
3136         ENTRY;
3137
3138         ls = m->mdt_md_dev.md_lu_dev.ld_site;
3139
3140         if (ls && ls->ls_server_seq)
3141                 seq_server_set_cli(ls->ls_server_seq,
3142                                    NULL, NULL);
3143
3144         if (ls && ls->ls_control_exp) {
3145                 class_export_put(ls->ls_control_exp);
3146                 ls->ls_control_exp = NULL;
3147         }
3148         EXIT;
3149 }
3150
3151 /*
3152  * FLD wrappers
3153  */
3154 static int mdt_fld_fini(const struct lu_env *env,
3155                         struct mdt_device *m)
3156 {
3157         struct lu_site *ls = m->mdt_md_dev.md_lu_dev.ld_site;
3158         ENTRY;
3159
3160         if (ls && ls->ls_server_fld) {
3161                 fld_server_fini(ls->ls_server_fld, env);
3162                 OBD_FREE_PTR(ls->ls_server_fld);
3163                 ls->ls_server_fld = NULL;
3164         }
3165
3166         RETURN(0);
3167 }
3168
3169 static int mdt_fld_init(const struct lu_env *env,
3170                         const char *uuid,
3171                         struct mdt_device *m)
3172 {
3173         struct lu_site *ls;
3174         int rc;
3175         ENTRY;
3176
3177         ls = m->mdt_md_dev.md_lu_dev.ld_site;
3178
3179         OBD_ALLOC_PTR(ls->ls_server_fld);
3180         if (ls->ls_server_fld == NULL)
3181                 RETURN(rc = -ENOMEM);
3182
3183         rc = fld_server_init(ls->ls_server_fld,
3184                              m->mdt_bottom, uuid, env);
3185         if (rc) {
3186                 OBD_FREE_PTR(ls->ls_server_fld);
3187                 ls->ls_server_fld = NULL;
3188                 RETURN(rc);
3189         }
3190
3191         RETURN(0);
3192 }
3193
3194 /* device init/fini methods */
3195 static void mdt_stop_ptlrpc_service(struct mdt_device *m)
3196 {
3197         ENTRY;
3198         if (m->mdt_regular_service != NULL) {
3199                 ptlrpc_unregister_service(m->mdt_regular_service);
3200                 m->mdt_regular_service = NULL;
3201         }
3202         if (m->mdt_readpage_service != NULL) {
3203                 ptlrpc_unregister_service(m->mdt_readpage_service);
3204                 m->mdt_readpage_service = NULL;
3205         }
3206         if (m->mdt_xmds_service != NULL) {
3207                 ptlrpc_unregister_service(m->mdt_xmds_service);
3208                 m->mdt_xmds_service = NULL;
3209         }
3210         if (m->mdt_setattr_service != NULL) {
3211                 ptlrpc_unregister_service(m->mdt_setattr_service);
3212                 m->mdt_setattr_service = NULL;
3213         }
3214         if (m->mdt_mdsc_service != NULL) {
3215                 ptlrpc_unregister_service(m->mdt_mdsc_service);
3216                 m->mdt_mdsc_service = NULL;
3217         }
3218         if (m->mdt_mdss_service != NULL) {
3219                 ptlrpc_unregister_service(m->mdt_mdss_service);
3220                 m->mdt_mdss_service = NULL;
3221         }
3222         if (m->mdt_dtss_service != NULL) {
3223                 ptlrpc_unregister_service(m->mdt_dtss_service);
3224                 m->mdt_dtss_service = NULL;
3225         }
3226         if (m->mdt_fld_service != NULL) {
3227                 ptlrpc_unregister_service(m->mdt_fld_service);
3228                 m->mdt_fld_service = NULL;
3229         }
3230         ENTRY;
3231 }
3232
3233 static int mdt_start_ptlrpc_service(struct mdt_device *m)
3234 {
3235         int rc;
3236         static struct ptlrpc_service_conf conf;
3237         cfs_proc_dir_entry_t *procfs_entry;
3238         ENTRY;
3239
3240         procfs_entry = m->mdt_md_dev.md_lu_dev.ld_obd->obd_proc_entry;
3241
3242         conf = (typeof(conf)) {
3243                 .psc_nbufs            = MDS_NBUFS,
3244                 .psc_bufsize          = MDS_BUFSIZE,
3245                 .psc_max_req_size     = MDS_MAXREQSIZE,
3246                 .psc_max_reply_size   = MDS_MAXREPSIZE,
3247                 .psc_req_portal       = MDS_REQUEST_PORTAL,
3248                 .psc_rep_portal       = MDC_REPLY_PORTAL,
3249                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3250                 /*
3251                  * We'd like to have a mechanism to set this on a per-device
3252                  * basis, but alas...
3253                  */
3254                 .psc_min_threads   = min(max(mdt_num_threads, MDT_MIN_THREADS),
3255                                        MDT_MAX_THREADS),
3256                 .psc_max_threads   = MDT_MAX_THREADS,
3257                 .psc_ctx_tags      = LCT_MD_THREAD
3258         };
3259
3260         m->mdt_ldlm_client = &m->mdt_md_dev.md_lu_dev.ld_obd->obd_ldlm_client;
3261         ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
3262                            "mdt_ldlm_client", m->mdt_ldlm_client);
3263
3264         m->mdt_regular_service =
3265                 ptlrpc_init_svc_conf(&conf, mdt_regular_handle, LUSTRE_MDT_NAME,
3266                                      procfs_entry, NULL, LUSTRE_MDT_NAME);
3267         if (m->mdt_regular_service == NULL)
3268                 RETURN(-ENOMEM);
3269
3270         rc = ptlrpc_start_threads(NULL, m->mdt_regular_service);
3271         if (rc)
3272                 GOTO(err_mdt_svc, rc);
3273
3274         /*
3275          * readpage service configuration. Parameters have to be adjusted,
3276          * ideally.
3277          */
3278         conf = (typeof(conf)) {
3279                 .psc_nbufs            = MDS_NBUFS,
3280                 .psc_bufsize          = MDS_BUFSIZE,
3281                 .psc_max_req_size     = MDS_MAXREQSIZE,
3282                 .psc_max_reply_size   = MDS_MAXREPSIZE,
3283                 .psc_req_portal       = MDS_READPAGE_PORTAL,
3284                 .psc_rep_portal       = MDC_REPLY_PORTAL,
3285                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3286                 .psc_min_threads   = min(max(mdt_num_threads, MDT_MIN_THREADS),
3287                                        MDT_MAX_THREADS),
3288                 .psc_max_threads   = MDT_MAX_THREADS,
3289                 .psc_ctx_tags      = LCT_MD_THREAD
3290         };
3291         m->mdt_readpage_service =
3292                 ptlrpc_init_svc_conf(&conf, mdt_readpage_handle,
3293                                      LUSTRE_MDT_NAME "_readpage",
3294                                      procfs_entry, NULL, "mdt_rdpg");
3295
3296         if (m->mdt_readpage_service == NULL) {
3297                 CERROR("failed to start readpage service\n");
3298                 GOTO(err_mdt_svc, rc = -ENOMEM);
3299         }
3300
3301         rc = ptlrpc_start_threads(NULL, m->mdt_readpage_service);
3302
3303         /*
3304          * setattr service configuration.
3305          */
3306         conf = (typeof(conf)) {
3307                 .psc_nbufs            = MDS_NBUFS,
3308                 .psc_bufsize          = MDS_BUFSIZE,
3309                 .psc_max_req_size     = MDS_MAXREQSIZE,
3310                 .psc_max_reply_size   = MDS_MAXREPSIZE,
3311                 .psc_req_portal       = MDS_SETATTR_PORTAL,
3312                 .psc_rep_portal       = MDC_REPLY_PORTAL,
3313                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3314                 .psc_min_threads   = min(max(mdt_num_threads, MDT_MIN_THREADS),
3315                                        MDT_MAX_THREADS),
3316                 .psc_max_threads   = MDT_MAX_THREADS,
3317                 .psc_ctx_tags      = LCT_MD_THREAD
3318         };
3319
3320         m->mdt_setattr_service =
3321                 ptlrpc_init_svc_conf(&conf, mdt_regular_handle,
3322                                      LUSTRE_MDT_NAME "_setattr",
3323                                      procfs_entry, NULL, "mdt_attr");
3324
3325         if (!m->mdt_setattr_service) {
3326                 CERROR("failed to start setattr service\n");
3327                 GOTO(err_mdt_svc, rc = -ENOMEM);
3328         }
3329
3330         rc = ptlrpc_start_threads(NULL, m->mdt_setattr_service);
3331         if (rc)
3332                 GOTO(err_mdt_svc, rc);
3333
3334         /*
3335          * sequence controller service configuration
3336          */
3337         conf = (typeof(conf)) {
3338                 .psc_nbufs = MDS_NBUFS,
3339                 .psc_bufsize = MDS_BUFSIZE,
3340                 .psc_max_req_size = SEQ_MAXREQSIZE,
3341                 .psc_max_reply_size = SEQ_MAXREPSIZE,
3342                 .psc_req_portal = SEQ_CONTROLLER_PORTAL,
3343                 .psc_rep_portal = MDC_REPLY_PORTAL,
3344                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3345                 .psc_min_threads = SEQ_NUM_THREADS,
3346                 .psc_max_threads = SEQ_NUM_THREADS,
3347                 .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
3348         };
3349
3350         m->mdt_mdsc_service =
3351                 ptlrpc_init_svc_conf(&conf, mdt_mdsc_handle,
3352                                      LUSTRE_MDT_NAME"_mdsc",
3353                                      procfs_entry, NULL, "mdt_mdsc");
3354         if (!m->mdt_mdsc_service) {
3355                 CERROR("failed to start seq controller service\n");
3356                 GOTO(err_mdt_svc, rc = -ENOMEM);
3357         }
3358
3359         rc = ptlrpc_start_threads(NULL, m->mdt_mdsc_service);
3360         if (rc)
3361                 GOTO(err_mdt_svc, rc);
3362
3363         /*
3364          * metadata sequence server service configuration
3365          */
3366         conf = (typeof(conf)) {
3367                 .psc_nbufs = MDS_NBUFS,
3368                 .psc_bufsize = MDS_BUFSIZE,
3369                 .psc_max_req_size = SEQ_MAXREQSIZE,
3370                 .psc_max_reply_size = SEQ_MAXREPSIZE,
3371                 .psc_req_portal = SEQ_METADATA_PORTAL,
3372                 .psc_rep_portal = MDC_REPLY_PORTAL,
3373                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3374                 .psc_min_threads = SEQ_NUM_THREADS,
3375                 .psc_max_threads = SEQ_NUM_THREADS,
3376                 .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
3377         };
3378
3379         m->mdt_mdss_service =
3380                 ptlrpc_init_svc_conf(&conf, mdt_mdss_handle,
3381                                      LUSTRE_MDT_NAME"_mdss",
3382                                      procfs_entry, NULL, "mdt_mdss");
3383         if (!m->mdt_mdss_service) {
3384                 CERROR("failed to start metadata seq server service\n");
3385                 GOTO(err_mdt_svc, rc = -ENOMEM);
3386         }
3387
3388         rc = ptlrpc_start_threads(NULL, m->mdt_mdss_service);
3389         if (rc)
3390                 GOTO(err_mdt_svc, rc);
3391
3392
3393         /*
3394          * Data sequence server service configuration. We want to have really
3395          * cluster-wide sequences space. This is why we start only one sequence
3396          * controller which manages space.
3397          */
3398         conf = (typeof(conf)) {
3399                 .psc_nbufs = MDS_NBUFS,
3400                 .psc_bufsize = MDS_BUFSIZE,
3401                 .psc_max_req_size = SEQ_MAXREQSIZE,
3402                 .psc_max_reply_size = SEQ_MAXREPSIZE,
3403                 .psc_req_portal = SEQ_DATA_PORTAL,
3404                 .psc_rep_portal = OSC_REPLY_PORTAL,
3405                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3406                 .psc_min_threads = SEQ_NUM_THREADS,
3407                 .psc_max_threads = SEQ_NUM_THREADS,
3408                 .psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
3409         };
3410
3411         m->mdt_dtss_service =
3412                 ptlrpc_init_svc_conf(&conf, mdt_dtss_handle,
3413                                      LUSTRE_MDT_NAME"_dtss",
3414                                      procfs_entry, NULL, "mdt_dtss");
3415         if (!m->mdt_dtss_service) {
3416                 CERROR("failed to start data seq server service\n");
3417                 GOTO(err_mdt_svc, rc = -ENOMEM);
3418         }
3419
3420         rc = ptlrpc_start_threads(NULL, m->mdt_dtss_service);
3421         if (rc)
3422                 GOTO(err_mdt_svc, rc);
3423
3424         /* FLD service start */
3425         conf = (typeof(conf)) {
3426                 .psc_nbufs            = MDS_NBUFS,
3427                 .psc_bufsize          = MDS_BUFSIZE,
3428                 .psc_max_req_size     = FLD_MAXREQSIZE,
3429                 .psc_max_reply_size   = FLD_MAXREPSIZE,
3430                 .psc_req_portal       = FLD_REQUEST_PORTAL,
3431                 .psc_rep_portal       = MDC_REPLY_PORTAL,
3432                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3433                 .psc_min_threads      = FLD_NUM_THREADS,
3434                 .psc_max_threads      = FLD_NUM_THREADS,
3435                 .psc_ctx_tags         = LCT_DT_THREAD|LCT_MD_THREAD
3436         };
3437
3438         m->mdt_fld_service =
3439                 ptlrpc_init_svc_conf(&conf, mdt_fld_handle,
3440                                      LUSTRE_MDT_NAME"_fld",
3441                                      procfs_entry, NULL, "mdt_fld");
3442         if (!m->mdt_fld_service) {
3443                 CERROR("failed to start fld service\n");
3444                 GOTO(err_mdt_svc, rc = -ENOMEM);
3445         }
3446
3447         rc = ptlrpc_start_threads(NULL, m->mdt_fld_service);
3448         if (rc)
3449                 GOTO(err_mdt_svc, rc);
3450
3451         /*
3452          * mds-mds service configuration. Separate portal is used to allow
3453          * mds-mds requests be not blocked during recovery.
3454          */
3455         conf = (typeof(conf)) {
3456                 .psc_nbufs            = MDS_NBUFS,
3457                 .psc_bufsize          = MDS_BUFSIZE,
3458                 .psc_max_req_size     = MDS_MAXREQSIZE,
3459                 .psc_max_reply_size   = MDS_MAXREPSIZE,
3460                 .psc_req_portal       = MDS_MDS_PORTAL,
3461                 .psc_rep_portal       = MDC_REPLY_PORTAL,
3462                 .psc_watchdog_timeout = MDT_SERVICE_WATCHDOG_TIMEOUT,
3463                 .psc_min_threads      = min(max(mdt_num_threads, MDT_MIN_THREADS),
3464                                             MDT_MAX_THREADS),
3465                 .psc_max_threads      = MDT_MAX_THREADS,
3466                 .psc_ctx_tags         = LCT_MD_THREAD
3467         };
3468         m->mdt_xmds_service = ptlrpc_init_svc_conf(&conf, mdt_xmds_handle,
3469                                                   LUSTRE_MDT_NAME "_mds",
3470                                                   procfs_entry, NULL, "mdt_xmds");
3471
3472         if (m->mdt_xmds_service == NULL) {
3473                 CERROR("failed to start readpage service\n");
3474                 GOTO(err_mdt_svc, rc = -ENOMEM);
3475         }
3476
3477         rc = ptlrpc_start_threads(NULL, m->mdt_xmds_service);
3478         if (rc)
3479                 GOTO(err_mdt_svc, rc);
3480
3481         EXIT;
3482 err_mdt_svc:
3483         if (rc)
3484                 mdt_stop_ptlrpc_service(m);
3485
3486         return rc;
3487 }
3488
3489 static void mdt_stack_fini(const struct lu_env *env,
3490                            struct mdt_device *m, struct lu_device *top)
3491 {
3492         struct lu_device        *d = top, *n;
3493         struct obd_device       *obd = m->mdt_md_dev.md_lu_dev.ld_obd;
3494         struct lustre_cfg_bufs  *bufs;
3495         struct lustre_cfg       *lcfg;
3496         struct mdt_thread_info  *info;
3497         char flags[3]="";
3498         ENTRY;
3499
3500         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3501         LASSERT(info != NULL);
3502
3503         bufs = &info->mti_u.bufs;
3504         /* process cleanup, pass mdt obd name to get obd umount flags */
3505         lustre_cfg_bufs_reset(bufs, obd->obd_name);
3506         if (obd->obd_force)
3507                 strcat(flags, "F");
3508         if (obd->obd_fail)
3509                 strcat(flags, "A");
3510         lustre_cfg_bufs_set_string(bufs, 1, flags);
3511         lcfg = lustre_cfg_new(LCFG_CLEANUP, bufs);
3512         if (!lcfg) {
3513                 CERROR("Cannot alloc lcfg!\n");
3514                 return;
3515         }
3516
3517         LASSERT(top);
3518         top->ld_ops->ldo_process_config(env, top, lcfg);
3519         lustre_cfg_free(lcfg);
3520
3521         lu_site_purge(env, top->ld_site, ~0);
3522         while (d != NULL) {
3523                 struct lu_device_type *ldt = d->ld_type;
3524
3525                 /* each fini() returns next device in stack of layers
3526                  * so we can avoid the recursion */
3527                 n = ldt->ldt_ops->ldto_device_fini(env, d);
3528                 lu_device_put(d);
3529
3530                 /* switch to the next device in the layer */
3531                 d = n;
3532         }
3533  
3534         /* purge again. */
3535         lu_site_purge(env, top->ld_site, ~0);
3536
3537         if (!list_empty(&top->ld_site->ls_lru) || top->ld_site->ls_total != 0) {
3538                 /*
3539                  * Uh-oh, objects still exist.
3540                  */
3541                 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
3542
3543                 lu_site_print(env, top->ld_site, &cookie, lu_cdebug_printer);
3544         }
3545
3546         d = top;
3547         while (d != NULL) {
3548                 struct obd_type *type;
3549                 struct lu_device_type *ldt = d->ld_type;
3550
3551                 /* each free() returns next device in stack of layers
3552                  * so we can avoid the recursion */
3553                 n = ldt->ldt_ops->ldto_device_free(env, d);
3554                 type = ldt->ldt_obd_type;
3555                 type->typ_refcnt--;
3556                 class_put_type(type);
3557
3558                 /* switch to the next device in the layer */
3559                 d = n;
3560         }
3561         m->mdt_child = NULL;
3562         m->mdt_bottom = NULL;
3563 }
3564
3565 static struct lu_device *mdt_layer_setup(const struct lu_env *env,
3566                                          const char *typename,
3567                                          struct lu_device *child,
3568                                          struct lustre_cfg *cfg)
3569 {
3570         const char            *dev = lustre_cfg_string(cfg, 0);
3571         struct obd_type       *type;
3572         struct lu_device_type *ldt;
3573         struct lu_device      *d;
3574         int rc;
3575         ENTRY;
3576
3577         /* find the type */
3578         type = class_get_type(typename);
3579         if (!type) {
3580                 CERROR("Unknown type: '%s'\n", typename);
3581                 GOTO(out, rc = -ENODEV);
3582         }
3583
3584         rc = lu_context_refill(&env->le_ctx);
3585         if (rc != 0) {
3586                 CERROR("Failure to refill context: '%d'\n", rc);
3587                 GOTO(out_type, rc);
3588         }
3589
3590         if (env->le_ses != NULL) {
3591                 rc = lu_context_refill(env->le_ses);
3592                 if (rc != 0) {
3593                         CERROR("Failure to refill session: '%d'\n", rc);
3594                         GOTO(out_type, rc);
3595                 }
3596         }
3597
3598         ldt = type->typ_lu;
3599         if (ldt == NULL) {
3600                 CERROR("type: '%s'\n", typename);
3601                 GOTO(out_type, rc = -EINVAL);
3602         }
3603
3604         ldt->ldt_obd_type = type;
3605         d = ldt->ldt_ops->ldto_device_alloc(env, ldt, cfg);
3606         if (IS_ERR(d)) {
3607                 CERROR("Cannot allocate device: '%s'\n", typename);
3608                 GOTO(out_type, rc = -ENODEV);
3609         }
3610
3611         LASSERT(child->ld_site);
3612         d->ld_site = child->ld_site;
3613
3614         type->typ_refcnt++;
3615         rc = ldt->ldt_ops->ldto_device_init(env, d, dev, child);
3616         if (rc) {
3617                 CERROR("can't init device '%s', rc %d\n", typename, rc);
3618                 GOTO(out_alloc, rc);
3619         }
3620         lu_device_get(d);
3621
3622         RETURN(d);
3623
3624 out_alloc:
3625         ldt->ldt_ops->ldto_device_free(env, d);
3626         type->typ_refcnt--;
3627 out_type:
3628         class_put_type(type);
3629 out:
3630         return ERR_PTR(rc);
3631 }
3632
3633 static int mdt_stack_init(const struct lu_env *env,
3634                           struct mdt_device *m, struct lustre_cfg *cfg)
3635 {
3636         struct lu_device  *d = &m->mdt_md_dev.md_lu_dev;
3637         struct lu_device  *tmp;
3638         struct md_device  *md;
3639         int rc;
3640         ENTRY;
3641
3642         /* init the stack */
3643         tmp = mdt_layer_setup(env, LUSTRE_OSD_NAME, d, cfg);
3644         if (IS_ERR(tmp)) {
3645                 RETURN(PTR_ERR(tmp));
3646         }
3647         m->mdt_bottom = lu2dt_dev(tmp);
3648         d = tmp;
3649         tmp = mdt_layer_setup(env, LUSTRE_MDD_NAME, d, cfg);
3650         if (IS_ERR(tmp)) {
3651                 GOTO(out, rc = PTR_ERR(tmp));
3652         }
3653         d = tmp;
3654         md = lu2md_dev(d);
3655
3656         tmp = mdt_layer_setup(env, LUSTRE_CMM_NAME, d, cfg);
3657         if (IS_ERR(tmp)) {
3658                 GOTO(out, rc = PTR_ERR(tmp));
3659         }
3660         d = tmp;
3661         /*set mdd upcall device*/
3662         md_upcall_dev_set(md, lu2md_dev(d));
3663
3664         md = lu2md_dev(d);
3665         /*set cmm upcall device*/
3666         md_upcall_dev_set(md, &m->mdt_md_dev);
3667
3668         m->mdt_child = lu2md_dev(d);
3669
3670         /* process setup config */
3671         tmp = &m->mdt_md_dev.md_lu_dev;
3672         rc = tmp->ld_ops->ldo_process_config(env, tmp, cfg);
3673         GOTO(out, rc);
3674 out:
3675         /* fini from last known good lu_device */
3676         if (rc)
3677                 mdt_stack_fini(env, m, d);
3678
3679         return rc;
3680 }
3681
3682 static void mdt_fini(const struct lu_env *env, struct mdt_device *m)
3683 {
3684         struct md_device *next = m->mdt_child;
3685         struct lu_device *d    = &m->mdt_md_dev.md_lu_dev;
3686         struct lu_site   *ls   = d->ld_site;
3687         struct obd_device *obd = m->mdt_md_dev.md_lu_dev.ld_obd;
3688         ENTRY;
3689
3690         ping_evictor_stop();
3691
3692         target_recovery_fini(obd);
3693         mdt_stop_ptlrpc_service(m);
3694
3695         mdt_fs_cleanup(env, m);
3696
3697         upcall_cache_cleanup(m->mdt_identity_cache);
3698         m->mdt_identity_cache = NULL;
3699
3700         if (m->mdt_namespace != NULL) {
3701                 ldlm_namespace_free(m->mdt_namespace, d->ld_obd->obd_force);
3702                 d->ld_obd->obd_namespace = m->mdt_namespace = NULL;
3703         }
3704
3705         mdt_seq_fini(env, m);
3706         mdt_seq_fini_cli(m);
3707         mdt_fld_fini(env, m);
3708         mdt_procfs_fini(m);
3709         ptlrpc_lprocfs_unregister_obd(d->ld_obd);
3710         lprocfs_obd_cleanup(d->ld_obd);
3711
3712         sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
3713
3714         next->md_ops->mdo_init_capa_ctxt(env, next, 0, 0, 0, NULL);
3715         del_timer(&m->mdt_ck_timer);
3716         mdt_ck_thread_stop(m);
3717
3718         /* finish the stack */
3719         mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
3720
3721         if (ls) {
3722                 lu_site_fini(ls);
3723                 OBD_FREE_PTR(ls);
3724                 d->ld_site = NULL;
3725         }
3726         LASSERT(atomic_read(&d->ld_ref) == 0);
3727         md_device_fini(&m->mdt_md_dev);
3728
3729         EXIT;
3730 }
3731
3732 static void fsoptions_to_mdt_flags(struct mdt_device *m, char *options)
3733 {
3734         char *p = options;
3735
3736 #ifdef CONFIG_FS_POSIX_ACL
3737         /* ACLs should be enabled by default (b=13829) */
3738         m->mdt_opts.mo_acl = 1;
3739         LCONSOLE_INFO("Enabling ACL\n");
3740 #else
3741         m->mdt_opts.mo_acl = 0;
3742         LCONSOLE_INFO("Disabling ACL\n");
3743 #endif
3744
3745         if (!options)
3746                 return;
3747
3748         while (*options) {
3749                 int len;
3750
3751                 while (*p && *p != ',')
3752                         p++;
3753
3754                 len = p - options;
3755                 if ((len == sizeof("user_xattr") - 1) &&
3756                     (memcmp(options, "user_xattr", len) == 0)) {
3757                         m->mdt_opts.mo_user_xattr = 1;
3758                         LCONSOLE_INFO("Enabling user_xattr\n");
3759                 } else if ((len == sizeof("nouser_xattr") - 1) &&
3760                            (memcmp(options, "nouser_xattr", len) == 0)) {
3761                         m->mdt_opts.mo_user_xattr = 0;
3762                         LCONSOLE_INFO("Disabling user_xattr\n");
3763                 } else if ((len == sizeof("noacl") - 1) &&
3764                            (memcmp(options, "noacl", len) == 0)) {
3765                         m->mdt_opts.mo_acl = 0;
3766                         LCONSOLE_INFO("Disabling ACL\n");
3767                 }
3768
3769                 options = ++p;
3770         }
3771 }
3772
3773 int mdt_postrecov(const struct lu_env *, struct mdt_device *);
3774
3775 static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
3776                      struct lu_device_type *ldt, struct lustre_cfg *cfg)
3777 {
3778         struct lprocfs_static_vars lvars;
3779         struct mdt_thread_info    *info;
3780         struct obd_device         *obd;
3781         const char                *dev = lustre_cfg_string(cfg, 0);
3782         const char                *num = lustre_cfg_string(cfg, 2);
3783         struct lustre_mount_info  *lmi;
3784         struct lustre_sb_info     *lsi;
3785         struct lu_site            *s;
3786         const char                *identity_upcall = "NONE";
3787         int                        rc;
3788         ENTRY;
3789
3790         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3791         LASSERT(info != NULL);
3792
3793         obd = class_name2obd(dev);
3794         LASSERT(obd != NULL);
3795
3796         spin_lock_init(&m->mdt_transno_lock);
3797
3798         m->mdt_max_mdsize = MAX_MD_SIZE;
3799         m->mdt_max_cookiesize = sizeof(struct llog_cookie);
3800
3801         m->mdt_opts.mo_user_xattr = 0;
3802         m->mdt_opts.mo_acl = 0;
3803         lmi = server_get_mount_2(dev);
3804         if (lmi == NULL) {
3805                 CERROR("Cannot get mount info for %s!\n", dev);
3806                 RETURN(-EFAULT);
3807         } else {
3808                 lsi = s2lsi(lmi->lmi_sb);
3809                 fsoptions_to_mdt_flags(m, lsi->lsi_lmd->lmd_opts);
3810                 server_put_mount_2(dev, lmi->lmi_mnt);
3811         }
3812
3813         m->mdt_sptlrpc_lock = RW_LOCK_UNLOCKED;
3814         sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset);
3815
3816         spin_lock_init(&m->mdt_ioepoch_lock);
3817         m->mdt_opts.mo_compat_resname = 0;
3818         m->mdt_capa_timeout = CAPA_TIMEOUT;
3819         m->mdt_capa_alg = CAPA_HMAC_ALG_SHA1;
3820         m->mdt_ck_timeout = CAPA_KEY_TIMEOUT;
3821
3822         spin_lock_init(&m->mdt_client_bitmap_lock);
3823
3824         OBD_ALLOC_PTR(s);
3825         if (s == NULL)
3826                 RETURN(-ENOMEM);
3827
3828         md_device_init(&m->mdt_md_dev, ldt);
3829         m->mdt_md_dev.md_lu_dev.ld_ops = &mdt_lu_ops;
3830         m->mdt_md_dev.md_lu_dev.ld_obd = obd;
3831         /* set this lu_device to obd, because error handling need it */
3832         obd->obd_lu_dev = &m->mdt_md_dev.md_lu_dev;
3833
3834         rc = lu_site_init(s, &m->mdt_md_dev.md_lu_dev);
3835         if (rc) {
3836                 CERROR("Can't init lu_site, rc %d\n", rc);
3837                 GOTO(err_free_site, rc);
3838         }
3839
3840         lprocfs_mdt_init_vars(&lvars);
3841         rc = lprocfs_obd_setup(obd, lvars.obd_vars);
3842         if (rc) {
3843                 CERROR("Can't init lprocfs, rc %d\n", rc);
3844                 GOTO(err_fini_site, rc);
3845         }
3846         ptlrpc_lprocfs_register_obd(obd);
3847
3848         rc = mdt_procfs_init(m, dev);
3849         if (rc) {
3850                 CERROR("Can't init MDT lprocfs, rc %d\n", rc);
3851                 GOTO(err_fini_proc, rc);
3852         }
3853
3854         /* set server index */
3855         LASSERT(num);
3856         s->ls_node_id = simple_strtol(num, NULL, 10);
3857
3858         /* failover is the default
3859          * FIXME: we do not failout mds0/mgs, which may cause some problems.
3860          * assumed whose ls_node_id == 0 XXX
3861          * */
3862         obd->obd_replayable = 1;
3863         /* No connection accepted until configurations will finish */
3864         obd->obd_no_conn = 1;
3865
3866         if (cfg->lcfg_bufcount > 4 && LUSTRE_CFG_BUFLEN(cfg, 4) > 0) {
3867                 char *str = lustre_cfg_string(cfg, 4);
3868                 if (strchr(str, 'n')) {
3869                         CWARN("%s: recovery disabled\n", obd->obd_name);
3870                         obd->obd_replayable = 0;
3871                 }
3872         }
3873
3874         /* init the stack */
3875         rc = mdt_stack_init(env, m, cfg);
3876         if (rc) {
3877                 CERROR("Can't init device stack, rc %d\n", rc);
3878                 GOTO(err_fini_proc, rc);
3879         }
3880
3881         rc = mdt_fld_init(env, obd->obd_name, m);
3882         if (rc)
3883                 GOTO(err_fini_stack, rc);
3884
3885         rc = mdt_seq_init(env, obd->obd_name, m);
3886         if (rc)
3887                 GOTO(err_fini_fld, rc);
3888
3889         snprintf(info->mti_u.ns_name, sizeof info->mti_u.ns_name,
3890                  LUSTRE_MDT_NAME"-%p", m);
3891         m->mdt_namespace = ldlm_namespace_new(info->mti_u.ns_name,
3892                                               LDLM_NAMESPACE_SERVER,
3893                                               LDLM_NAMESPACE_GREEDY);
3894         if (m->mdt_namespace == NULL)
3895                 GOTO(err_fini_seq, rc = -ENOMEM);
3896
3897         ldlm_register_intent(m->mdt_namespace, mdt_intent_policy);
3898         /* set obd_namespace for compatibility with old code */
3899         obd->obd_namespace = m->mdt_namespace;
3900
3901         /* XXX: to support suppgid for ACL, we enable identity_upcall
3902          * by default, otherwise, maybe got unexpected -EACCESS. */
3903         if (m->mdt_opts.mo_acl)
3904                 identity_upcall = MDT_IDENTITY_UPCALL_PATH;
3905
3906         m->mdt_identity_cache = upcall_cache_init(obd->obd_name, identity_upcall,
3907                                                   &mdt_identity_upcall_cache_ops);
3908         if (IS_ERR(m->mdt_identity_cache)) {
3909                 rc = PTR_ERR(m->mdt_identity_cache);
3910                 m->mdt_identity_cache = NULL;
3911                 GOTO(err_free_ns, rc);
3912         }
3913
3914         m->mdt_ck_timer.function = mdt_ck_timer_callback;
3915         m->mdt_ck_timer.data = (unsigned long)m;
3916         init_timer(&m->mdt_ck_timer);
3917         rc = mdt_ck_thread_start(m);
3918         if (rc)
3919                 GOTO(err_free_ns, rc);
3920
3921         rc = mdt_fs_setup(env, m, obd);
3922         if (rc)
3923                 GOTO(err_capa, rc);
3924
3925         target_recovery_init(obd, mdt_recovery_handle);
3926
3927         rc = mdt_start_ptlrpc_service(m);
3928         if (rc)
3929                 GOTO(err_fs_cleanup, rc);
3930
3931         ping_evictor_start();
3932
3933         rc = lu_site_init_finish(s);
3934         if (rc)
3935                 GOTO(err_stop_service, rc);
3936
3937         if (obd->obd_recovering == 0)
3938                 mdt_postrecov(env, m);
3939
3940         mdt_init_capa_ctxt(env, m);
3941
3942         if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
3943                 ldlm_timeout = 6;
3944
3945         RETURN(0);
3946
3947 err_stop_service:
3948         ping_evictor_stop();
3949         mdt_stop_ptlrpc_service(m);
3950 err_fs_cleanup:
3951         target_recovery_fini(obd);
3952         mdt_fs_cleanup(env, m);
3953 err_capa:
3954         del_timer(&m->mdt_ck_timer);
3955         mdt_ck_thread_stop(m);
3956 err_free_ns:
3957         upcall_cache_cleanup(m->mdt_identity_cache);
3958         m->mdt_identity_cache = NULL;
3959         ldlm_namespace_free(m->mdt_namespace, 0);
3960         obd->obd_namespace = m->mdt_namespace = NULL;
3961 err_fini_seq:
3962         mdt_seq_fini(env, m);
3963 err_fini_fld:
3964         mdt_fld_fini(env, m);
3965 err_fini_stack:
3966         mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
3967 err_fini_proc:
3968         mdt_procfs_fini(m);
3969         lprocfs_obd_cleanup(obd);
3970 err_fini_site:
3971         lu_site_fini(s);
3972 err_free_site:
3973         OBD_FREE_PTR(s);
3974
3975         md_device_fini(&m->mdt_md_dev);
3976         return (rc);
3977 }
3978
3979 /* used by MGS to process specific configurations */
3980 static int mdt_process_config(const struct lu_env *env,
3981                               struct lu_device *d, struct lustre_cfg *cfg)
3982 {
3983         struct mdt_device *m = mdt_dev(d);
3984         struct md_device *md_next = m->mdt_child;
3985         struct lu_device *next = md2lu_dev(md_next);
3986         int rc = 0;
3987         ENTRY;
3988
3989         switch (cfg->lcfg_command) {
3990         case LCFG_SPTLRPC_CONF: {
3991                 struct sptlrpc_conf_log *log;
3992                 struct sptlrpc_rule_set  tmp_rset;
3993
3994                 log = sptlrpc_conf_log_extract(cfg);
3995                 if (IS_ERR(log)) {
3996                         rc = PTR_ERR(log);
3997                         break;
3998                 }
3999
4000                 sptlrpc_rule_set_init(&tmp_rset);
4001
4002                 rc = sptlrpc_rule_set_from_log(&tmp_rset, log);
4003                 if (rc) {
4004                         CERROR("mdt %p: failed get sptlrpc rules: %d\n", m, rc);
4005                         break;
4006                 }
4007
4008                 write_lock(&m->mdt_sptlrpc_lock);
4009                 sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
4010                 m->mdt_sptlrpc_rset = tmp_rset;
4011                 write_unlock(&m->mdt_sptlrpc_lock);
4012
4013                 sptlrpc_target_update_exp_flavor(
4014                                 md2lu_dev(&m->mdt_md_dev)->ld_obd, &tmp_rset);
4015
4016                 break;
4017         }
4018         case LCFG_PARAM: {
4019                 struct lprocfs_static_vars lvars;
4020                 struct obd_device *obd = d->ld_obd;
4021
4022                 lprocfs_mdt_init_vars(&lvars);
4023                 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, cfg, obd);
4024                 if (rc)
4025                         /* others are passed further */
4026                         rc = next->ld_ops->ldo_process_config(env, next, cfg);
4027                 break;
4028         }
4029         case LCFG_ADD_MDC:
4030                 /*
4031                  * Add mdc hook to get first MDT uuid and connect it to
4032                  * ls->controller to use for seq manager.
4033                  */
4034                 rc = next->ld_ops->ldo_process_config(env, next, cfg);
4035                 if (rc)
4036                         CERROR("Can't add mdc, rc %d\n", rc);
4037                 else
4038                         rc = mdt_seq_init_cli(env, mdt_dev(d), cfg);
4039                 break;
4040         default:
4041                 /* others are passed further */
4042                 rc = next->ld_ops->ldo_process_config(env, next, cfg);
4043                 break;
4044         }
4045         RETURN(rc);
4046 }
4047
4048 static struct lu_object *mdt_object_alloc(const struct lu_env *env,
4049                                           const struct lu_object_header *hdr,
4050                                           struct lu_device *d)
4051 {
4052         struct mdt_object *mo;
4053
4054         ENTRY;
4055
4056         OBD_ALLOC_PTR(mo);
4057         if (mo != NULL) {
4058                 struct lu_object *o;
4059                 struct lu_object_header *h;
4060
4061                 o = &mo->mot_obj.mo_lu;
4062                 h = &mo->mot_header;
4063                 lu_object_header_init(h);
4064                 lu_object_init(o, h, d);
4065                 lu_object_add_top(h, o);
4066                 o->lo_ops = &mdt_obj_ops;
4067                 RETURN(o);
4068         } else
4069                 RETURN(NULL);
4070 }
4071
4072 static int mdt_object_init(const struct lu_env *env, struct lu_object *o)
4073 {
4074         struct mdt_device *d = mdt_dev(o->lo_dev);
4075         struct lu_device  *under;
4076         struct lu_object  *below;
4077         int                rc = 0;
4078         ENTRY;
4079
4080         CDEBUG(D_INFO, "object init, fid = "DFID"\n",
4081                PFID(lu_object_fid(o)));
4082
4083         under = &d->mdt_child->md_lu_dev;
4084         below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
4085         if (below != NULL) {
4086                 lu_object_add(o, below);
4087         } else
4088                 rc = -ENOMEM;
4089
4090         RETURN(rc);
4091 }
4092
4093 static void mdt_object_free(const struct lu_env *env, struct lu_object *o)
4094 {
4095         struct mdt_object *mo = mdt_obj(o);
4096         struct lu_object_header *h;
4097         ENTRY;
4098
4099         h = o->lo_header;
4100         CDEBUG(D_INFO, "object free, fid = "DFID"\n",
4101                PFID(lu_object_fid(o)));
4102
4103         lu_object_fini(o);
4104         lu_object_header_fini(h);
4105         OBD_FREE_PTR(mo);
4106         EXIT;
4107 }
4108
4109 static int mdt_object_print(const struct lu_env *env, void *cookie,
4110                             lu_printer_t p, const struct lu_object *o)
4111 {
4112         return (*p)(env, cookie, LUSTRE_MDT_NAME"-object@%p", o);
4113 }
4114
4115 static struct lu_device_operations mdt_lu_ops = {
4116         .ldo_object_alloc   = mdt_object_alloc,
4117         .ldo_process_config = mdt_process_config
4118 };
4119
4120 static struct lu_object_operations mdt_obj_ops = {
4121         .loo_object_init    = mdt_object_init,
4122         .loo_object_free    = mdt_object_free,
4123         .loo_object_print   = mdt_object_print
4124 };
4125
4126 /* mds_connect_internal */
4127 static int mdt_connect_internal(struct obd_export *exp,
4128                                 struct mdt_device *mdt,
4129                                 struct obd_connect_data *data)
4130 {
4131         __u64 flags;
4132
4133         if (data != NULL) {
4134                 data->ocd_connect_flags &= MDT_CONNECT_SUPPORTED;
4135                 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
4136
4137                 /* If no known bits (which should not happen, probably,
4138                    as everybody should support LOOKUP and UPDATE bits at least)
4139                    revert to compat mode with plain locks. */
4140                 if (!data->ocd_ibits_known &&
4141                     data->ocd_connect_flags & OBD_CONNECT_IBITS)
4142                         data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
4143
4144                 if (!mdt->mdt_opts.mo_acl)
4145                         data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
4146
4147                 if (!mdt->mdt_opts.mo_user_xattr)
4148                         data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
4149
4150                 if (!mdt->mdt_opts.mo_mds_capa)
4151                         data->ocd_connect_flags &= ~OBD_CONNECT_MDS_CAPA;
4152
4153                 if (!mdt->mdt_opts.mo_oss_capa)
4154                         data->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
4155
4156                 spin_lock(&exp->exp_lock);
4157                 exp->exp_connect_flags = data->ocd_connect_flags;
4158                 spin_unlock(&exp->exp_lock);
4159                 data->ocd_version = LUSTRE_VERSION_CODE;
4160                 exp->exp_mdt_data.med_ibits_known = data->ocd_ibits_known;
4161         }
4162
4163 #if 0
4164         if (mdt->mdt_opts.mo_acl &&
4165             ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
4166                 CWARN("%s: MDS requires ACL support but client does not\n",
4167                       mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
4168                 return -EBADE;
4169         }
4170 #endif
4171
4172         flags = OBD_CONNECT_LCL_CLIENT | OBD_CONNECT_RMT_CLIENT;
4173         if ((exp->exp_connect_flags & flags) == flags) {
4174                 CWARN("%s: both local and remote client flags are set\n",
4175                       mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
4176                 return -EBADE;
4177         }
4178
4179         if (mdt->mdt_opts.mo_mds_capa &&
4180             ((exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) == 0)) {
4181                 CWARN("%s: MDS requires capability support, but client not\n",
4182                       mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
4183                 return -EBADE;
4184         }
4185
4186         if (mdt->mdt_opts.mo_oss_capa &&
4187             ((exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA) == 0)) {
4188                 CWARN("%s: MDS requires OSS capability support, "
4189                       "but client not\n",
4190                       mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
4191                 return -EBADE;
4192         }
4193
4194         if ((exp->exp_connect_flags & OBD_CONNECT_FID) == 0) {
4195                 CWARN("%s: MDS requires FID support, but client not\n",
4196                       mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
4197                 return -EBADE;
4198         }
4199
4200         return 0;
4201 }
4202
4203 /* mds_connect copy */
4204 static int mdt_obd_connect(const struct lu_env *env,
4205                            struct lustre_handle *conn, struct obd_device *obd,
4206                            struct obd_uuid *cluuid,
4207                            struct obd_connect_data *data)
4208 {
4209         struct mdt_thread_info *info;
4210         struct mdt_client_data *mcd;
4211         struct obd_export      *exp;
4212         struct mdt_device      *mdt;
4213         struct ptlrpc_request  *req;
4214         int                     rc;
4215         ENTRY;
4216
4217         LASSERT(env != NULL);
4218         if (!conn || !obd || !cluuid)
4219                 RETURN(-EINVAL);
4220
4221         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
4222         req = info->mti_pill->rc_req;
4223         mdt = mdt_dev(obd->obd_lu_dev);
4224
4225         rc = class_connect(conn, obd, cluuid);
4226         if (rc)
4227                 RETURN(rc);
4228
4229         exp = class_conn2export(conn);
4230         LASSERT(exp != NULL);
4231
4232         CDEBUG(D_SEC, "from %s\n", sptlrpc_part2name(req->rq_sp_from));
4233
4234         spin_lock(&exp->exp_lock);
4235         exp->exp_sp_peer = req->rq_sp_from;
4236
4237         read_lock(&mdt->mdt_sptlrpc_lock);
4238         sptlrpc_rule_set_choose(&mdt->mdt_sptlrpc_rset, exp->exp_sp_peer,
4239                                 req->rq_peer.nid, &exp->exp_flvr);
4240         read_unlock(&mdt->mdt_sptlrpc_lock);
4241
4242         if (exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
4243                 CERROR("invalid rpc flavor %x, expect %x, from %s\n",
4244                        req->rq_flvr.sf_rpc, exp->exp_flvr.sf_rpc,
4245                        libcfs_nid2str(req->rq_peer.nid));
4246                 exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
4247                 spin_unlock(&exp->exp_lock);
4248                 RETURN(-EACCES);
4249         }
4250         spin_unlock(&exp->exp_lock);
4251
4252         rc = mdt_connect_internal(exp, mdt, data);
4253         if (rc == 0) {
4254                 OBD_ALLOC_PTR(mcd);
4255                 if (mcd != NULL) {
4256                         struct mdt_thread_info *mti;
4257                         mti = lu_context_key_get(&env->le_ctx,
4258                                                  &mdt_thread_key);
4259                         LASSERT(mti != NULL);
4260                         mti->mti_exp = exp;
4261                         memcpy(mcd->mcd_uuid, cluuid, sizeof mcd->mcd_uuid);
4262                         exp->exp_mdt_data.med_mcd = mcd;
4263                         rc = mdt_client_new(env, mdt);
4264                         if (rc != 0) {
4265                                 OBD_FREE_PTR(mcd);
4266                                 exp->exp_mdt_data.med_mcd = NULL;
4267                         }
4268                 } else
4269                         rc = -ENOMEM;
4270         }
4271
4272         if (rc != 0)
4273                 class_disconnect(exp);
4274         else
4275                 class_export_put(exp);
4276
4277         RETURN(rc);
4278 }
4279
4280 static int mdt_obd_reconnect(const struct lu_env *env,
4281                              struct obd_export *exp, struct obd_device *obd,
4282                              struct obd_uuid *cluuid,
4283                              struct obd_connect_data *data)
4284 {
4285         struct mdt_thread_info *info;
4286         struct mdt_device      *mdt;
4287         struct ptlrpc_request  *req;
4288         int                     rc;
4289         ENTRY;
4290
4291         if (exp == NULL || obd == NULL || cluuid == NULL)
4292                 RETURN(-EINVAL);
4293
4294         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
4295         req = info->mti_pill->rc_req;
4296         mdt = mdt_dev(obd->obd_lu_dev);
4297
4298         CDEBUG(D_SEC, "from %s\n", sptlrpc_part2name(req->rq_sp_from));
4299
4300         spin_lock(&exp->exp_lock);
4301         if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
4302                 exp->exp_sp_peer = req->rq_sp_from;
4303
4304                 read_lock(&mdt->mdt_sptlrpc_lock);
4305                 sptlrpc_rule_set_choose(&mdt->mdt_sptlrpc_rset,
4306                                         exp->exp_sp_peer,
4307                                         req->rq_peer.nid, &exp->exp_flvr);
4308                 read_unlock(&mdt->mdt_sptlrpc_lock);
4309
4310                 if (exp->exp_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
4311                         CERROR("invalid rpc flavor %x, expect %x, from %s\n",
4312                                req->rq_flvr.sf_rpc, exp->exp_flvr.sf_rpc,
4313                                libcfs_nid2str(req->rq_peer.nid));
4314                         exp->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
4315                         spin_unlock(&exp->exp_lock);
4316                         RETURN(-EACCES);
4317                 }
4318         }
4319         spin_unlock(&exp->exp_lock);
4320
4321         rc = mdt_connect_internal(exp, mdt_dev(obd->obd_lu_dev), data);
4322
4323         RETURN(rc);
4324 }
4325
4326 static int mdt_obd_disconnect(struct obd_export *exp)
4327 {
4328         struct mdt_device *mdt = mdt_dev(exp->exp_obd->obd_lu_dev);
4329         int rc;
4330         ENTRY;
4331
4332         LASSERT(exp);
4333         class_export_get(exp);
4334
4335         /* Disconnect early so that clients can't keep using export */
4336         rc = class_disconnect(exp);
4337         if (mdt->mdt_namespace != NULL || exp->exp_obd->obd_namespace != NULL)
4338                 ldlm_cancel_locks_for_export(exp);
4339
4340         /* complete all outstanding replies */
4341         spin_lock(&exp->exp_lock);
4342         while (!list_empty(&exp->exp_outstanding_replies)) {
4343                 struct ptlrpc_reply_state *rs =
4344                         list_entry(exp->exp_outstanding_replies.next,
4345                                    struct ptlrpc_reply_state, rs_exp_list);
4346                 struct ptlrpc_service *svc = rs->rs_service;
4347
4348                 spin_lock(&svc->srv_lock);
4349                 list_del_init(&rs->rs_exp_list);
4350                 ptlrpc_schedule_difficult_reply(rs);
4351                 spin_unlock(&svc->srv_lock);
4352         }
4353         spin_unlock(&exp->exp_lock);
4354
4355         class_export_put(exp);
4356         RETURN(rc);
4357 }
4358
4359 /* FIXME: Can we avoid using these two interfaces? */
4360 static int mdt_init_export(struct obd_export *exp)
4361 {
4362         struct mdt_export_data *med = &exp->exp_mdt_data;
4363         ENTRY;
4364
4365         INIT_LIST_HEAD(&med->med_open_head);
4366         spin_lock_init(&med->med_open_lock);
4367         sema_init(&med->med_idmap_sem, 1);
4368         med->med_idmap = NULL;
4369         spin_lock(&exp->exp_lock);
4370         exp->exp_connecting = 1;
4371         spin_unlock(&exp->exp_lock);
4372         RETURN(0);
4373 }
4374
4375 static int mdt_destroy_export(struct obd_export *export)
4376 {
4377         struct mdt_export_data *med;
4378         struct obd_device      *obd = export->exp_obd;
4379         struct mdt_device      *mdt;
4380         struct mdt_thread_info *info;
4381         struct lu_env           env;
4382         struct md_attr         *ma;
4383         int lmm_size;
4384         int cookie_size;
4385         int rc = 0;
4386         ENTRY;
4387
4388         med = &export->exp_mdt_data;
4389         if (med->med_rmtclient)
4390                 mdt_cleanup_idmap(med);
4391
4392         target_destroy_export(export);
4393
4394         if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
4395                 RETURN(0);
4396
4397         mdt = mdt_dev(obd->obd_lu_dev);
4398         LASSERT(mdt != NULL);
4399
4400         rc = lu_env_init(&env, NULL, LCT_MD_THREAD);
4401         if (rc)
4402                 RETURN(rc);
4403
4404         info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
4405         LASSERT(info != NULL);
4406         memset(info, 0, sizeof *info);
4407         info->mti_env = &env;
4408         info->mti_mdt = mdt;
4409         info->mti_exp = export;
4410
4411         ma = &info->mti_attr;
4412         lmm_size = ma->ma_lmm_size = mdt->mdt_max_mdsize;
4413         cookie_size = ma->ma_cookie_size = mdt->mdt_max_cookiesize;
4414         OBD_ALLOC(ma->ma_lmm, lmm_size);
4415         OBD_ALLOC(ma->ma_cookie, cookie_size);
4416
4417         if (ma->ma_lmm == NULL || ma->ma_cookie == NULL)
4418                 GOTO(out, rc = -ENOMEM);
4419         ma->ma_need = MA_LOV | MA_COOKIE;
4420         ma->ma_valid = 0;
4421         /* Close any open files (which may also cause orphan unlinking). */
4422         spin_lock(&med->med_open_lock);
4423         while (!list_empty(&med->med_open_head)) {
4424                 struct list_head *tmp = med->med_open_head.next;
4425                 struct mdt_file_data *mfd =
4426                         list_entry(tmp, struct mdt_file_data, mfd_list);
4427
4428                 /* Remove mfd handle so it can't be found again.
4429                  * We are consuming the mfd_list reference here. */
4430                 class_handle_unhash(&mfd->mfd_handle);
4431                 list_del_init(&mfd->mfd_list);
4432                 spin_unlock(&med->med_open_lock);
4433                 mdt_mfd_close(info, mfd);
4434                 /* TODO: if we close the unlinked file,
4435                  * we need to remove it's objects from OST */
4436                 memset(&ma->ma_attr, 0, sizeof(ma->ma_attr));
4437                 spin_lock(&med->med_open_lock);
4438                 ma->ma_lmm_size = lmm_size;
4439                 ma->ma_cookie_size = cookie_size;
4440                 ma->ma_need = MA_LOV | MA_COOKIE;
4441                 ma->ma_valid = 0;
4442         }
4443         spin_unlock(&med->med_open_lock);
4444         info->mti_mdt = NULL;
4445         mdt_client_del(&env, mdt);
4446
4447         EXIT;
4448 out:
4449         if (lmm_size) {
4450                 OBD_FREE(ma->ma_lmm, lmm_size);
4451                 ma->ma_lmm = NULL;
4452         }
4453         if (cookie_size) {
4454                 OBD_FREE(ma->ma_cookie, cookie_size);
4455                 ma->ma_cookie = NULL;
4456         }
4457         lu_env_fini(&env);
4458
4459         return rc;
4460 }
4461
4462 static void mdt_allow_cli(struct mdt_device *m, unsigned int flag)
4463 {
4464         if (flag & CONFIG_LOG)
4465                 m->mdt_fl_cfglog = 1;
4466         if (flag & CONFIG_SYNC)
4467                 m->mdt_fl_synced = 1;
4468
4469         if (m->mdt_fl_cfglog /* bz11778: && m->mdt_fl_synced */)
4470                 /* Open for clients */
4471                 m->mdt_md_dev.md_lu_dev.ld_obd->obd_no_conn = 0;
4472 }
4473
4474 static int mdt_upcall(const struct lu_env *env, struct md_device *md,
4475                       enum md_upcall_event ev)
4476 {
4477         struct mdt_device *m = mdt_dev(&md->md_lu_dev);
4478         struct md_device  *next  = m->mdt_child;
4479         struct mdt_thread_info *mti;
4480         int rc = 0;
4481         ENTRY;
4482
4483         switch (ev) {
4484                 case MD_LOV_SYNC:
4485                         rc = next->md_ops->mdo_maxsize_get(env, next,
4486                                         &m->mdt_max_mdsize,
4487                                         &m->mdt_max_cookiesize);
4488                         CDEBUG(D_INFO, "get max mdsize %d max cookiesize %d\n",
4489                                      m->mdt_max_mdsize, m->mdt_max_cookiesize);
4490                         mdt_allow_cli(m, CONFIG_SYNC);
4491                         break;
4492                 case MD_NO_TRANS:
4493                         mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
4494                         mti->mti_no_need_trans = 1;
4495                         CDEBUG(D_INFO, "disable mdt trans for this thread\n");
4496                         break;
4497                 case MD_LOV_CONFIG:
4498                         /* Check that MDT is not yet configured */
4499                         LASSERT(!m->mdt_fl_cfglog);
4500                         break;
4501                 default:
4502                         CERROR("invalid event\n");
4503                         rc = -EINVAL;
4504                         break;
4505         }
4506         RETURN(rc);
4507 }
4508
4509 static int mdt_obd_notify(struct obd_device *host,
4510                           struct obd_device *watched,
4511                           enum obd_notify_event ev, void *data)
4512 {
4513         ENTRY;
4514
4515         switch (ev) {
4516         case OBD_NOTIFY_CONFIG:
4517                 mdt_allow_cli(mdt_dev(host->obd_lu_dev), (unsigned int)data);
4518                 break;
4519         default:
4520                 CDEBUG(D_INFO, "Unhandled notification %#x\n", ev);
4521         }
4522         RETURN(0);
4523 }
4524
4525 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
4526                          void *karg, void *uarg)
4527 {
4528         struct lu_env      env;
4529         struct obd_device *obd= exp->exp_obd;
4530         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
4531         struct dt_device  *dt = mdt->mdt_bottom;
4532         int rc;
4533
4534         ENTRY;
4535         CDEBUG(D_IOCTL, "handling ioctl cmd %#x\n", cmd);
4536         rc = lu_env_init(&env, NULL, LCT_MD_THREAD);
4537         if (rc)
4538                 RETURN(rc);
4539
4540         switch (cmd) {
4541         case OBD_IOC_SYNC:
4542                 rc = dt->dd_ops->dt_sync(&env, dt);
4543                 break;
4544         case OBD_IOC_SET_READONLY:
4545                 rc = dt->dd_ops->dt_sync(&env, dt);
4546                 dt->dd_ops->dt_ro(&env, dt);
4547                 break;
4548         case OBD_IOC_ABORT_RECOVERY:
4549                 CERROR("Aborting recovery for device %s\n", obd->obd_name);
4550                 target_stop_recovery_thread(obd);
4551                 rc = 0;
4552                 break;
4553         default:
4554                 CERROR("Not supported cmd = %d for device %s\n",
4555                        cmd, obd->obd_name);
4556                 rc = -EOPNOTSUPP;
4557         }
4558
4559         lu_env_fini(&env);
4560         RETURN(rc);
4561 }
4562
4563 int mdt_postrecov(const struct lu_env *env, struct mdt_device *mdt)
4564 {
4565         struct lu_device *ld = md2lu_dev(mdt->mdt_child);
4566         struct obd_device *obd = mdt->mdt_md_dev.md_lu_dev.ld_obd;
4567         int rc, lost;
4568         ENTRY;
4569         /* if some clients didn't participate in recovery then we can possibly
4570          * lost sequence. Now we should increase sequence for safe value */
4571         lost = obd->obd_max_recoverable_clients - obd->obd_connected_clients;
4572         mdt_seq_adjust(env, mdt, lost);
4573
4574         rc = ld->ld_ops->ldo_recovery_complete(env, ld);
4575         RETURN(rc);
4576 }
4577
4578 int mdt_obd_postrecov(struct obd_device *obd)
4579 {
4580         struct lu_env env;
4581         int rc;
4582
4583         rc = lu_env_init(&env, NULL, LCT_MD_THREAD);
4584         if (rc)
4585                 RETURN(rc);
4586         rc = mdt_postrecov(&env, mdt_dev(obd->obd_lu_dev));
4587         lu_env_fini(&env);
4588         return rc;
4589 }
4590
4591 static struct obd_ops mdt_obd_device_ops = {
4592         .o_owner          = THIS_MODULE,
4593         .o_connect        = mdt_obd_connect,
4594         .o_reconnect      = mdt_obd_reconnect,
4595         .o_disconnect     = mdt_obd_disconnect,
4596         .o_init_export    = mdt_init_export,
4597         .o_destroy_export = mdt_destroy_export,
4598         .o_iocontrol      = mdt_iocontrol,
4599         .o_postrecov      = mdt_obd_postrecov,
4600         .o_notify         = mdt_obd_notify
4601 };
4602
4603 static struct lu_device* mdt_device_fini(const struct lu_env *env,
4604                                          struct lu_device *d)
4605 {
4606         struct mdt_device *m = mdt_dev(d);
4607         ENTRY;
4608
4609         mdt_fini(env, m);
4610         RETURN(NULL);
4611 }
4612
4613 static struct lu_device *mdt_device_free(const struct lu_env *env,
4614                                          struct lu_device *d)
4615 {
4616         struct mdt_device *m = mdt_dev(d);
4617         ENTRY;
4618
4619         OBD_FREE_PTR(m);
4620         RETURN(NULL);
4621 }
4622
4623 static struct lu_device *mdt_device_alloc(const struct lu_env *env,
4624                                           struct lu_device_type *t,
4625                                           struct lustre_cfg *cfg)
4626 {
4627         struct lu_device  *l;
4628         struct mdt_device *m;
4629
4630         OBD_ALLOC_PTR(m);
4631         if (m != NULL) {
4632                 int rc;
4633
4634                 l = &m->mdt_md_dev.md_lu_dev;
4635                 rc = mdt_init0(env, m, t, cfg);
4636                 if (rc != 0) {
4637                         OBD_FREE_PTR(m);
4638                         l = ERR_PTR(rc);
4639                         return l;
4640                 }
4641                 md_upcall_init(&m->mdt_md_dev, mdt_upcall);
4642         } else
4643                 l = ERR_PTR(-ENOMEM);
4644         return l;
4645 }
4646
4647 /* context key constructor/destructor: mdt_key_init, mdt_key_fini */
4648 LU_KEY_INIT_FINI(mdt, struct mdt_thread_info);
4649
4650 /* context key: mdt_thread_key */
4651 LU_CONTEXT_KEY_DEFINE(mdt, LCT_MD_THREAD);
4652
4653 /* context key constructor/destructor: mdt_txn_key_init, mdt_txn_key_fini */
4654 LU_KEY_INIT_FINI(mdt_txn, struct mdt_txn_info);
4655
4656 struct lu_context_key mdt_txn_key = {
4657         .lct_tags = LCT_TX_HANDLE,
4658         .lct_init = mdt_txn_key_init,
4659         .lct_fini = mdt_txn_key_fini
4660 };
4661
4662 struct md_ucred *mdt_ucred(const struct mdt_thread_info *info)
4663 {
4664         return md_ucred(info->mti_env);
4665 }
4666
4667 /* type constructor/destructor: mdt_type_init, mdt_type_fini */
4668 LU_TYPE_INIT_FINI(mdt, &mdt_thread_key, &mdt_txn_key);
4669
4670 static struct lu_device_type_operations mdt_device_type_ops = {
4671         .ldto_init = mdt_type_init,
4672         .ldto_fini = mdt_type_fini,
4673
4674         .ldto_device_alloc = mdt_device_alloc,
4675         .ldto_device_free  = mdt_device_free,
4676         .ldto_device_fini  = mdt_device_fini
4677 };
4678
4679 static struct lu_device_type mdt_device_type = {
4680         .ldt_tags     = LU_DEVICE_MD,
4681         .ldt_name     = LUSTRE_MDT_NAME,
4682         .ldt_ops      = &mdt_device_type_ops,
4683         .ldt_ctx_tags = LCT_MD_THREAD
4684 };
4685
4686 static int __init mdt_mod_init(void)
4687 {
4688         struct lprocfs_static_vars lvars;
4689         int rc;
4690
4691         mdt_num_threads = MDT_NUM_THREADS;
4692         lprocfs_mdt_init_vars(&lvars);
4693         rc = class_register_type(&mdt_obd_device_ops, NULL,
4694                                  lvars.module_vars, LUSTRE_MDT_NAME,
4695                                  &mdt_device_type);
4696
4697         return rc;
4698 }
4699
4700 static void __exit mdt_mod_exit(void)
4701 {
4702         class_unregister_type(LUSTRE_MDT_NAME);
4703 }
4704
4705
4706 #define DEF_HNDL(prefix, base, suffix, flags, opc, fn, fmt)             \
4707 [prefix ## _ ## opc - prefix ## _ ## base] = {                          \
4708         .mh_name    = #opc,                                             \
4709         .mh_fail_id = OBD_FAIL_ ## prefix ## _  ## opc ## suffix,       \
4710         .mh_opc     = prefix ## _  ## opc,                              \
4711         .mh_flags   = flags,                                            \
4712         .mh_act     = fn,                                               \
4713         .mh_fmt     = fmt                                               \
4714 }
4715
4716 #define DEF_MDT_HNDL(flags, name, fn, fmt)                                  \
4717         DEF_HNDL(MDS, GETATTR, _NET, flags, name, fn, fmt)
4718
4719 #define DEF_SEQ_HNDL(flags, name, fn, fmt)                      \
4720         DEF_HNDL(SEQ, QUERY, _NET, flags, name, fn, fmt)
4721
4722 #define DEF_FLD_HNDL(flags, name, fn, fmt)                      \
4723         DEF_HNDL(FLD, QUERY, _NET, flags, name, fn, fmt)
4724 /*
4725  * Request with a format known in advance
4726  */
4727 #define DEF_MDT_HNDL_F(flags, name, fn)                                 \
4728         DEF_HNDL(MDS, GETATTR, _NET, flags, name, fn, &RQF_MDS_ ## name)
4729
4730 #define DEF_SEQ_HNDL_F(flags, name, fn)                                 \
4731         DEF_HNDL(SEQ, QUERY, _NET, flags, name, fn, &RQF_SEQ_ ## name)
4732
4733 #define DEF_FLD_HNDL_F(flags, name, fn)                                 \
4734         DEF_HNDL(FLD, QUERY, _NET, flags, name, fn, &RQF_FLD_ ## name)
4735 /*
4736  * Request with a format we do not yet know
4737  */
4738 #define DEF_MDT_HNDL_0(flags, name, fn)                                 \
4739         DEF_HNDL(MDS, GETATTR, _NET, flags, name, fn, NULL)
4740
4741 static struct mdt_handler mdt_mds_ops[] = {
4742 DEF_MDT_HNDL_F(0,                         CONNECT,      mdt_connect),
4743 DEF_MDT_HNDL_F(0,                         DISCONNECT,   mdt_disconnect),
4744 DEF_MDT_HNDL_F(0,                         SET_INFO,     mdt_set_info),
4745 DEF_MDT_HNDL_F(0           |HABEO_REFERO, GETSTATUS,    mdt_getstatus),
4746 DEF_MDT_HNDL_F(HABEO_CORPUS,              GETATTR,      mdt_getattr),
4747 DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, GETATTR_NAME, mdt_getattr_name),
4748 DEF_MDT_HNDL_F(HABEO_CORPUS,              GETXATTR,     mdt_getxattr),
4749 DEF_MDT_HNDL_F(0           |HABEO_REFERO, STATFS,       mdt_statfs),
4750 DEF_MDT_HNDL_F(0           |MUTABOR,      REINT,        mdt_reint),
4751 DEF_MDT_HNDL_F(HABEO_CORPUS,              CLOSE,        mdt_close),
4752 DEF_MDT_HNDL_F(HABEO_CORPUS,              DONE_WRITING, mdt_done_writing),
4753 DEF_MDT_HNDL_F(0           |HABEO_REFERO, PIN,          mdt_pin),
4754 DEF_MDT_HNDL_0(0,                         SYNC,         mdt_sync),
4755 DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, IS_SUBDIR,    mdt_is_subdir),
4756 DEF_MDT_HNDL_F(0,                         QUOTACHECK,   mdt_quotacheck_handle),
4757 DEF_MDT_HNDL_F(0,                         QUOTACTL,     mdt_quotactl_handle)
4758 };
4759
4760 #define DEF_OBD_HNDL(flags, name, fn)                   \
4761         DEF_HNDL(OBD, PING, _NET, flags, name, fn, NULL)
4762
4763
4764 static struct mdt_handler mdt_obd_ops[] = {
4765         DEF_OBD_HNDL(0, PING,           mdt_obd_ping),
4766         DEF_OBD_HNDL(0, LOG_CANCEL,     mdt_obd_log_cancel),
4767         DEF_OBD_HNDL(0, QC_CALLBACK,    mdt_obd_qc_callback)
4768 };
4769
4770 #define DEF_DLM_HNDL_0(flags, name, fn)                   \
4771         DEF_HNDL(LDLM, ENQUEUE, , flags, name, fn, NULL)
4772 #define DEF_DLM_HNDL_F(flags, name, fn)                   \
4773         DEF_HNDL(LDLM, ENQUEUE, , flags, name, fn, &RQF_LDLM_ ## name)
4774
4775 static struct mdt_handler mdt_dlm_ops[] = {
4776         DEF_DLM_HNDL_F(HABEO_CLAVIS, ENQUEUE,        mdt_enqueue),
4777         DEF_DLM_HNDL_0(HABEO_CLAVIS, CONVERT,        mdt_convert),
4778         DEF_DLM_HNDL_0(0,            BL_CALLBACK,    mdt_bl_callback),
4779         DEF_DLM_HNDL_0(0,            CP_CALLBACK,    mdt_cp_callback)
4780 };
4781
4782 static struct mdt_handler mdt_llog_ops[] = {
4783 };
4784
4785 #define DEF_SEC_CTX_HNDL(name, fn)                      \
4786         DEF_HNDL(SEC_CTX, INIT, _NET, 0, name, fn, NULL)
4787
4788 static struct mdt_handler mdt_sec_ctx_ops[] = {
4789         DEF_SEC_CTX_HNDL(INIT,          mdt_sec_ctx_handle),
4790         DEF_SEC_CTX_HNDL(INIT_CONT,     mdt_sec_ctx_handle),
4791         DEF_SEC_CTX_HNDL(FINI,          mdt_sec_ctx_handle)
4792 };
4793
4794 static struct mdt_opc_slice mdt_regular_handlers[] = {
4795         {
4796                 .mos_opc_start = MDS_GETATTR,
4797                 .mos_opc_end   = MDS_LAST_OPC,
4798                 .mos_hs        = mdt_mds_ops
4799         },
4800         {
4801                 .mos_opc_start = OBD_PING,
4802                 .mos_opc_end   = OBD_LAST_OPC,
4803                 .mos_hs        = mdt_obd_ops
4804         },
4805         {
4806                 .mos_opc_start = LDLM_ENQUEUE,
4807                 .mos_opc_end   = LDLM_LAST_OPC,
4808                 .mos_hs        = mdt_dlm_ops
4809         },
4810         {
4811                 .mos_opc_start = LLOG_ORIGIN_HANDLE_CREATE,
4812                 .mos_opc_end   = LLOG_LAST_OPC,
4813                 .mos_hs        = mdt_llog_ops
4814         },
4815         {
4816                 .mos_opc_start = SEC_CTX_INIT,
4817                 .mos_opc_end   = SEC_LAST_OPC,
4818                 .mos_hs        = mdt_sec_ctx_ops
4819         },
4820         {
4821                 .mos_hs        = NULL
4822         }
4823 };
4824
4825 static struct mdt_handler mdt_readpage_ops[] = {
4826         DEF_MDT_HNDL_F(0,                         CONNECT,  mdt_connect),
4827         DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, READPAGE, mdt_readpage),
4828 #ifdef HAVE_SPLIT_SUPPORT
4829         DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, WRITEPAGE, mdt_writepage),
4830 #endif
4831
4832         /*
4833          * XXX: this is ugly and should be fixed one day, see mdc_close() for
4834          * detailed comments. --umka
4835          */
4836         DEF_MDT_HNDL_F(HABEO_CORPUS,              CLOSE,    mdt_close),
4837         DEF_MDT_HNDL_F(HABEO_CORPUS,              DONE_WRITING,    mdt_done_writing),
4838 };
4839
4840 static struct mdt_opc_slice mdt_readpage_handlers[] = {
4841         {
4842                 .mos_opc_start = MDS_GETATTR,
4843                 .mos_opc_end   = MDS_LAST_OPC,
4844                 .mos_hs        = mdt_readpage_ops
4845         },
4846         {
4847                 .mos_hs        = NULL
4848         }
4849 };
4850
4851 static struct mdt_handler mdt_xmds_ops[] = {
4852         DEF_MDT_HNDL_F(0,                         CONNECT,      mdt_connect),
4853         DEF_MDT_HNDL_F(HABEO_CORPUS             , GETATTR,      mdt_getattr),
4854         DEF_MDT_HNDL_F(0 | MUTABOR              , REINT,        mdt_reint),
4855         DEF_MDT_HNDL_F(HABEO_CORPUS|HABEO_REFERO, IS_SUBDIR,    mdt_is_subdir),
4856 };
4857
4858 static struct mdt_opc_slice mdt_xmds_handlers[] = {
4859         {
4860                 .mos_opc_start = MDS_GETATTR,
4861                 .mos_opc_end   = MDS_LAST_OPC,
4862                 .mos_hs        = mdt_xmds_ops
4863         },
4864         {
4865                 .mos_opc_start = OBD_PING,
4866                 .mos_opc_end   = OBD_LAST_OPC,
4867                 .mos_hs        = mdt_obd_ops
4868         },
4869         {
4870                 .mos_opc_start = SEC_CTX_INIT,
4871                 .mos_opc_end   = SEC_LAST_OPC,
4872                 .mos_hs        = mdt_sec_ctx_ops
4873         },
4874         {
4875                 .mos_hs        = NULL
4876         }
4877 };
4878
4879 static struct mdt_handler mdt_seq_ops[] = {
4880         DEF_SEQ_HNDL_F(0, QUERY, (int (*)(struct mdt_thread_info *))seq_query)
4881 };
4882
4883 static struct mdt_opc_slice mdt_seq_handlers[] = {
4884         {
4885                 .mos_opc_start = SEQ_QUERY,
4886                 .mos_opc_end   = SEQ_LAST_OPC,
4887                 .mos_hs        = mdt_seq_ops
4888         },
4889         {
4890                 .mos_hs        = NULL
4891         }
4892 };
4893
4894 static struct mdt_handler mdt_fld_ops[] = {
4895         DEF_FLD_HNDL_F(0, QUERY, (int (*)(struct mdt_thread_info *))fld_query)
4896 };
4897
4898 static struct mdt_opc_slice mdt_fld_handlers[] = {
4899         {
4900                 .mos_opc_start = FLD_QUERY,
4901                 .mos_opc_end   = FLD_LAST_OPC,
4902                 .mos_hs        = mdt_fld_ops
4903         },
4904         {
4905                 .mos_hs        = NULL
4906         }
4907 };
4908
4909 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
4910 MODULE_DESCRIPTION("Lustre Meta-data Target ("LUSTRE_MDT_NAME")");
4911 MODULE_LICENSE("GPL");
4912
4913 CFS_MODULE_PARM(mdt_num_threads, "ul", ulong, 0444,
4914                 "number of mdt service threads to start");
4915
4916 cfs_module(mdt, "0.2.0", mdt_mod_init, mdt_mod_exit);