Whamcloud - gitweb
LU-2145 target: move target code to the separate directory
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71
72 mdl_mode_t mdt_mdl_lock_modes[] = {
73         [LCK_MINMODE] = MDL_MINMODE,
74         [LCK_EX]      = MDL_EX,
75         [LCK_PW]      = MDL_PW,
76         [LCK_PR]      = MDL_PR,
77         [LCK_CW]      = MDL_CW,
78         [LCK_CR]      = MDL_CR,
79         [LCK_NL]      = MDL_NL,
80         [LCK_GROUP]   = MDL_GROUP
81 };
82
83 ldlm_mode_t mdt_dlm_lock_modes[] = {
84         [MDL_MINMODE] = LCK_MINMODE,
85         [MDL_EX]      = LCK_EX,
86         [MDL_PW]      = LCK_PW,
87         [MDL_PR]      = LCK_PR,
88         [MDL_CW]      = LCK_CW,
89         [MDL_CR]      = LCK_CR,
90         [MDL_NL]      = LCK_NL,
91         [MDL_GROUP]   = LCK_GROUP
92 };
93
94 /*
95  * Initialized in mdt_mod_init().
96  */
97 static unsigned long mdt_num_threads;
98 CFS_MODULE_PARM(mdt_num_threads, "ul", ulong, 0444,
99                 "number of MDS service threads to start "
100                 "(deprecated in favor of mds_num_threads)");
101
102 static unsigned long mds_num_threads;
103 CFS_MODULE_PARM(mds_num_threads, "ul", ulong, 0444,
104                 "number of MDS service threads to start");
105
106 static char *mds_num_cpts;
107 CFS_MODULE_PARM(mds_num_cpts, "c", charp, 0444,
108                 "CPU partitions MDS threads should run on");
109
110 static unsigned long mds_rdpg_num_threads;
111 CFS_MODULE_PARM(mds_rdpg_num_threads, "ul", ulong, 0444,
112                 "number of MDS readpage service threads to start");
113
114 static char *mds_rdpg_num_cpts;
115 CFS_MODULE_PARM(mds_rdpg_num_cpts, "c", charp, 0444,
116                 "CPU partitions MDS readpage threads should run on");
117
118 /* NB: these two should be removed along with setattr service in the future */
119 static unsigned long mds_attr_num_threads;
120 CFS_MODULE_PARM(mds_attr_num_threads, "ul", ulong, 0444,
121                 "number of MDS setattr service threads to start");
122
123 static char *mds_attr_num_cpts;
124 CFS_MODULE_PARM(mds_attr_num_cpts, "c", charp, 0444,
125                 "CPU partitions MDS setattr threads should run on");
126
127 /* ptlrpc request handler for MDT. All handlers are
128  * grouped into several slices - struct mdt_opc_slice,
129  * and stored in an array - mdt_handlers[].
130  */
131 struct mdt_handler {
132         /* The name of this handler. */
133         const char *mh_name;
134         /* Fail id for this handler, checked at the beginning of this handler*/
135         int         mh_fail_id;
136         /* Operation code for this handler */
137         __u32       mh_opc;
138         /* flags are listed in enum mdt_handler_flags below. */
139         __u32       mh_flags;
140         /* The actual handler function to execute. */
141         int (*mh_act)(struct mdt_thread_info *info);
142         /* Request format for this request. */
143         const struct req_format *mh_fmt;
144 };
145
146 enum mdt_handler_flags {
147         /*
148          * struct mdt_body is passed in the incoming message, and object
149          * identified by this fid exists on disk.
150          *
151          * "habeo corpus" == "I have a body"
152          */
153         HABEO_CORPUS = (1 << 0),
154         /*
155          * struct ldlm_request is passed in the incoming message.
156          *
157          * "habeo clavis" == "I have a key"
158          */
159         HABEO_CLAVIS = (1 << 1),
160         /*
161          * this request has fixed reply format, so that reply message can be
162          * packed by generic code.
163          *
164          * "habeo refero" == "I have a reply"
165          */
166         HABEO_REFERO = (1 << 2),
167         /*
168          * this request will modify something, so check whether the filesystem
169          * is readonly or not, then return -EROFS to client asap if necessary.
170          *
171          * "mutabor" == "I shall modify"
172          */
173         MUTABOR      = (1 << 3)
174 };
175
176 struct mdt_opc_slice {
177         __u32               mos_opc_start;
178         int                 mos_opc_end;
179         struct mdt_handler *mos_hs;
180 };
181
182 static struct mdt_opc_slice mdt_regular_handlers[];
183 static struct mdt_opc_slice mdt_readpage_handlers[];
184 static struct mdt_opc_slice mdt_xmds_handlers[];
185 static struct mdt_opc_slice mdt_seq_handlers[];
186 static struct mdt_opc_slice mdt_fld_handlers[];
187
188 static struct mdt_device *mdt_dev(struct lu_device *d);
189 static int mdt_regular_handle(struct ptlrpc_request *req);
190 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
191 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
192                         struct getinfo_fid2path *fp);
193
194 static const struct lu_object_operations mdt_obj_ops;
195
196 /* Slab for MDT object allocation */
197 static cfs_mem_cache_t *mdt_object_kmem;
198
199 static struct lu_kmem_descr mdt_caches[] = {
200         {
201                 .ckd_cache = &mdt_object_kmem,
202                 .ckd_name  = "mdt_obj",
203                 .ckd_size  = sizeof(struct mdt_object)
204         },
205         {
206                 .ckd_cache = NULL
207         }
208 };
209
210 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
211 {
212         if (!rep)
213                 return 0;
214         return (rep->lock_policy_res1 & flag);
215 }
216
217 void mdt_clear_disposition(struct mdt_thread_info *info,
218                            struct ldlm_reply *rep, int flag)
219 {
220         if (info)
221                 info->mti_opdata &= ~flag;
222         if (rep)
223                 rep->lock_policy_res1 &= ~flag;
224 }
225
226 void mdt_set_disposition(struct mdt_thread_info *info,
227                          struct ldlm_reply *rep, int flag)
228 {
229         if (info)
230                 info->mti_opdata |= flag;
231         if (rep)
232                 rep->lock_policy_res1 |= flag;
233 }
234
235 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
236 {
237         lh->mlh_pdo_hash = 0;
238         lh->mlh_reg_mode = lm;
239         lh->mlh_type = MDT_REG_LOCK;
240 }
241
242 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
243                        const char *name, int namelen)
244 {
245         lh->mlh_reg_mode = lm;
246         lh->mlh_type = MDT_PDO_LOCK;
247
248         if (name != NULL && (name[0] != '\0')) {
249                 LASSERT(namelen > 0);
250                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
251         } else {
252                 LASSERT(namelen == 0);
253                 lh->mlh_pdo_hash = 0ull;
254         }
255 }
256
257 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
258                               struct mdt_lock_handle *lh)
259 {
260         mdl_mode_t mode;
261         ENTRY;
262
263         /*
264          * Any dir access needs couple of locks:
265          *
266          * 1) on part of dir we gonna take lookup/modify;
267          *
268          * 2) on whole dir to protect it from concurrent splitting and/or to
269          * flush client's cache for readdir().
270          *
271          * so, for a given mode and object this routine decides what lock mode
272          * to use for lock #2:
273          *
274          * 1) if caller's gonna lookup in dir then we need to protect dir from
275          * being splitted only - LCK_CR
276          *
277          * 2) if caller's gonna modify dir then we need to protect dir from
278          * being splitted and to flush cache - LCK_CW
279          *
280          * 3) if caller's gonna modify dir and that dir seems ready for
281          * splitting then we need to protect it from any type of access
282          * (lookup/modify/split) - LCK_EX --bzzz
283          */
284
285         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
286         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
287
288         /*
289          * Ask underlaying level its opinion about preferable PDO lock mode
290          * having access type passed as regular lock mode:
291          *
292          * - MDL_MINMODE means that lower layer does not want to specify lock
293          * mode;
294          *
295          * - MDL_NL means that no PDO lock should be taken. This is used in some
296          * cases. Say, for non-splittable directories no need to use PDO locks
297          * at all.
298          */
299         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
300                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
301
302         if (mode != MDL_MINMODE) {
303                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
304         } else {
305                 /*
306                  * Lower layer does not want to specify locking mode. We do it
307                  * our selves. No special protection is needed, just flush
308                  * client's cache on modification and allow concurrent
309                  * mondification.
310                  */
311                 switch (lh->mlh_reg_mode) {
312                 case LCK_EX:
313                         lh->mlh_pdo_mode = LCK_EX;
314                         break;
315                 case LCK_PR:
316                         lh->mlh_pdo_mode = LCK_CR;
317                         break;
318                 case LCK_PW:
319                         lh->mlh_pdo_mode = LCK_CW;
320                         break;
321                 default:
322                         CERROR("Not expected lock type (0x%x)\n",
323                                (int)lh->mlh_reg_mode);
324                         LBUG();
325                 }
326         }
327
328         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
329         EXIT;
330 }
331
332 static int mdt_getstatus(struct mdt_thread_info *info)
333 {
334         struct mdt_device *mdt  = info->mti_mdt;
335         struct md_device  *next = mdt->mdt_child;
336         struct mdt_body   *repbody;
337         int                rc;
338
339         ENTRY;
340
341         rc = mdt_check_ucred(info);
342         if (rc)
343                 RETURN(err_serious(rc));
344
345         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
346                 RETURN(err_serious(-ENOMEM));
347
348         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
349         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
350         if (rc != 0)
351                 RETURN(rc);
352
353         repbody->valid |= OBD_MD_FLID;
354
355         if (mdt->mdt_opts.mo_mds_capa &&
356             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
357                 struct mdt_object  *root;
358                 struct lustre_capa *capa;
359
360                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
361                 if (IS_ERR(root))
362                         RETURN(PTR_ERR(root));
363
364                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
365                 LASSERT(capa);
366                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
367                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
368                                  0);
369                 mdt_object_put(info->mti_env, root);
370                 if (rc == 0)
371                         repbody->valid |= OBD_MD_FLMDSCAPA;
372         }
373
374         RETURN(rc);
375 }
376
377 static int mdt_statfs(struct mdt_thread_info *info)
378 {
379         struct ptlrpc_request           *req = mdt_info_req(info);
380         struct md_device                *next = info->mti_mdt->mdt_child;
381         struct ptlrpc_service_part      *svcpt;
382         struct obd_statfs               *osfs;
383         int                             rc;
384
385         ENTRY;
386
387         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
388
389         /* This will trigger a watchdog timeout */
390         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
391                          (MDT_SERVICE_WATCHDOG_FACTOR *
392                           at_get(&svcpt->scp_at_estimate)) + 1);
393
394         rc = mdt_check_ucred(info);
395         if (rc)
396                 RETURN(err_serious(rc));
397
398         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
399                 RETURN(err_serious(-ENOMEM));
400
401         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
402         if (!osfs)
403                 RETURN(-EPROTO);
404
405         /** statfs information are cached in the mdt_device */
406         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
407                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
408                 /** statfs data is too old, get up-to-date one */
409                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
410                 if (rc)
411                         RETURN(rc);
412                 cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
413                 info->mti_mdt->mdt_osfs = *osfs;
414                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
415                 cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
416         } else {
417                 /** use cached statfs data */
418                 cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
419                 *osfs = info->mti_mdt->mdt_osfs;
420                 cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
421         }
422
423         if (rc == 0)
424                 mdt_counter_incr(req, LPROC_MDT_STATFS);
425
426         RETURN(rc);
427 }
428
429 /**
430  * Pack SOM attributes into the reply.
431  * Call under a DLM UPDATE lock.
432  */
433 static void mdt_pack_size2body(struct mdt_thread_info *info,
434                                struct mdt_object *mo)
435 {
436         struct mdt_body *b;
437         struct md_attr *ma = &info->mti_attr;
438
439         LASSERT(ma->ma_attr.la_valid & LA_MODE);
440         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
441
442         /* Check if Size-on-MDS is supported, if this is a regular file,
443          * if SOM is enabled on the object and if SOM cache exists and valid.
444          * Otherwise do not pack Size-on-MDS attributes to the reply. */
445         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
446             !S_ISREG(ma->ma_attr.la_mode) ||
447             !mdt_object_is_som_enabled(mo) ||
448             !(ma->ma_valid & MA_SOM))
449                 return;
450
451         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
452         b->size = ma->ma_som->msd_size;
453         b->blocks = ma->ma_som->msd_blocks;
454 }
455
456 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
457                         const struct lu_attr *attr, const struct lu_fid *fid)
458 {
459         struct md_attr *ma = &info->mti_attr;
460
461         LASSERT(ma->ma_valid & MA_INODE);
462
463         b->atime      = attr->la_atime;
464         b->mtime      = attr->la_mtime;
465         b->ctime      = attr->la_ctime;
466         b->mode       = attr->la_mode;
467         b->size       = attr->la_size;
468         b->blocks     = attr->la_blocks;
469         b->uid        = attr->la_uid;
470         b->gid        = attr->la_gid;
471         b->flags      = attr->la_flags;
472         b->nlink      = attr->la_nlink;
473         b->rdev       = attr->la_rdev;
474
475         /*XXX should pack the reply body according to lu_valid*/
476         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
477                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
478                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
479                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
480
481         if (!S_ISREG(attr->la_mode)) {
482                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
483         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
484                 /* means no objects are allocated on osts. */
485                 LASSERT(!(ma->ma_valid & MA_LOV));
486                 /* just ignore blocks occupied by extend attributes on MDS */
487                 b->blocks = 0;
488                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
489                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
490         }
491
492         if (fid) {
493                 b->fid1 = *fid;
494                 b->valid |= OBD_MD_FLID;
495
496                 /* FIXME: these should be fixed when new igif ready.*/
497                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
498                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
499                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
500
501                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
502                                 PFID(fid), b->nlink, b->mode, b->size);
503         }
504
505         if (info)
506                 mdt_body_reverse_idmap(info, b);
507
508         if (b->valid & OBD_MD_FLSIZE)
509                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
510                        PFID(fid), (unsigned long long)b->size);
511 }
512
513 static inline int mdt_body_has_lov(const struct lu_attr *la,
514                                    const struct mdt_body *body)
515 {
516         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
517                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
518 }
519
520 void mdt_client_compatibility(struct mdt_thread_info *info)
521 {
522         struct mdt_body       *body;
523         struct ptlrpc_request *req = mdt_info_req(info);
524         struct obd_export     *exp = req->rq_export;
525         struct md_attr        *ma = &info->mti_attr;
526         struct lu_attr        *la = &ma->ma_attr;
527         ENTRY;
528
529         if (exp->exp_connect_flags & OBD_CONNECT_LAYOUTLOCK)
530                 /* the client can deal with 16-bit lmm_stripe_count */
531                 RETURN_EXIT;
532
533         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
534
535         if (!mdt_body_has_lov(la, body))
536                 RETURN_EXIT;
537
538         /* now we have a reply with a lov for a client not compatible with the
539          * layout lock so we have to clean the layout generation number */
540         if (S_ISREG(la->la_mode))
541                 ma->ma_lmm->lmm_layout_gen = 0;
542         EXIT;
543 }
544
545 static int mdt_big_lmm_get(const struct lu_env *env, struct mdt_object *o,
546                            struct md_attr *ma)
547 {
548         struct mdt_thread_info *info;
549         int rc;
550         ENTRY;
551
552         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
553         LASSERT(info != NULL);
554         LASSERT(ma->ma_lmm_size > 0);
555         LASSERT(info->mti_big_lmm_used == 0);
556         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL,
557                           XATTR_NAME_LOV);
558         if (rc < 0)
559                 RETURN(rc);
560
561         /* big_lmm may need to be grown */
562         if (info->mti_big_lmmsize < rc) {
563                 int size = size_roundup_power2(rc);
564
565                 if (info->mti_big_lmmsize > 0) {
566                         /* free old buffer */
567                         LASSERT(info->mti_big_lmm);
568                         OBD_FREE_LARGE(info->mti_big_lmm,
569                                        info->mti_big_lmmsize);
570                         info->mti_big_lmm = NULL;
571                         info->mti_big_lmmsize = 0;
572                 }
573
574                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
575                 if (info->mti_big_lmm == NULL)
576                         RETURN(-ENOMEM);
577                 info->mti_big_lmmsize = size;
578         }
579         LASSERT(info->mti_big_lmmsize >= rc);
580
581         info->mti_buf.lb_buf = info->mti_big_lmm;
582         info->mti_buf.lb_len = info->mti_big_lmmsize;
583         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf,
584                           XATTR_NAME_LOV);
585         if (rc < 0)
586                 RETURN(rc);
587
588         info->mti_big_lmm_used = 1;
589         ma->ma_valid |= MA_LOV;
590         ma->ma_lmm = info->mti_big_lmm;
591         ma->ma_lmm_size = rc;
592
593         /* update mdt_max_mdsize so all clients will be aware about that */
594         if (info->mti_mdt->mdt_max_mdsize < rc)
595                 info->mti_mdt->mdt_max_mdsize = rc;
596
597         RETURN(0);
598 }
599
600 int mdt_attr_get_lov(struct mdt_thread_info *info,
601                      struct mdt_object *o, struct md_attr *ma)
602 {
603         struct md_object *next = mdt_object_child(o);
604         struct lu_buf    *buf = &info->mti_buf;
605         int rc;
606
607         buf->lb_buf = ma->ma_lmm;
608         buf->lb_len = ma->ma_lmm_size;
609         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
610         if (rc > 0) {
611                 ma->ma_lmm_size = rc;
612                 ma->ma_valid |= MA_LOV;
613                 rc = 0;
614         } else if (rc == -ENODATA) {
615                 /* no LOV EA */
616                 rc = 0;
617         } else if (rc == -ERANGE) {
618                 rc = mdt_big_lmm_get(info->mti_env, o, ma);
619         }
620
621         return rc;
622 }
623
624 int mdt_attr_get_complex(struct mdt_thread_info *info,
625                          struct mdt_object *o, struct md_attr *ma)
626 {
627         const struct lu_env *env = info->mti_env;
628         struct md_object    *next = mdt_object_child(o);
629         struct lu_buf       *buf = &info->mti_buf;
630         u32                  mode = lu_object_attr(&next->mo_lu);
631         int                  need = ma->ma_need;
632         int                  rc = 0, rc2;
633         ENTRY;
634
635         /* do we really need PFID */
636         LASSERT((ma->ma_need & MA_PFID) == 0);
637
638         ma->ma_valid = 0;
639
640         if (need & MA_INODE) {
641                 ma->ma_need = MA_INODE;
642                 rc = mo_attr_get(env, next, ma);
643                 if (rc)
644                         GOTO(out, rc);
645                 ma->ma_valid |= MA_INODE;
646         }
647
648         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
649                 rc = mdt_attr_get_lov(info, o, ma);
650                 if (rc)
651                         GOTO(out, rc);
652         }
653
654         if (need & MA_LMV && S_ISDIR(mode)) {
655                 buf->lb_buf = ma->ma_lmv;
656                 buf->lb_len = ma->ma_lmv_size;
657                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
658                 if (rc2 > 0) {
659                         ma->ma_lmv_size = rc2;
660                         ma->ma_valid |= MA_LMV;
661                 } else if (rc2 == -ENODATA) {
662                         /* no LMV EA */
663                         ma->ma_lmv_size = 0;
664                 } else
665                         GOTO(out, rc = rc2);
666         }
667
668
669         if (rc == 0 && S_ISREG(mode) && (need & (MA_HSM | MA_SOM))) {
670                 struct lustre_mdt_attrs *lma;
671
672                 lma = (struct lustre_mdt_attrs *)info->mti_xattr_buf;
673                 CLASSERT(sizeof(*lma) <= sizeof(info->mti_xattr_buf));
674
675                 buf->lb_buf = lma;
676                 buf->lb_len = sizeof(info->mti_xattr_buf);
677                 rc = mo_xattr_get(env, next, buf, XATTR_NAME_LMA);
678                 if (rc > 0) {
679                         lustre_lma_swab(lma);
680                         /* Swab and copy LMA */
681                         if (need & MA_HSM) {
682                                 if (lma->lma_compat & LMAC_HSM)
683                                         ma->ma_hsm.mh_flags =
684                                                 lma->lma_flags & HSM_FLAGS_MASK;
685                                 else
686                                         ma->ma_hsm.mh_flags = 0;
687                                 ma->ma_valid |= MA_HSM;
688                         }
689                         /* Copy SOM */
690                         if (need & MA_SOM && lma->lma_compat & LMAC_SOM) {
691                                 LASSERT(ma->ma_som != NULL);
692                                 ma->ma_som->msd_ioepoch = lma->lma_ioepoch;
693                                 ma->ma_som->msd_size    = lma->lma_som_size;
694                                 ma->ma_som->msd_blocks  = lma->lma_som_blocks;
695                                 ma->ma_som->msd_mountid = lma->lma_som_mountid;
696                                 ma->ma_valid |= MA_SOM;
697                         }
698                         rc = 0;
699                 } else if (rc == -ENODATA) {
700                         rc = 0;
701                 }
702         }
703
704 #ifdef CONFIG_FS_POSIX_ACL
705         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
706                 buf->lb_buf = ma->ma_acl;
707                 buf->lb_len = ma->ma_acl_size;
708                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
709                 if (rc2 > 0) {
710                         ma->ma_acl_size = rc2;
711                         ma->ma_valid |= MA_ACL_DEF;
712                 } else if (rc2 == -ENODATA) {
713                         /* no ACLs */
714                         ma->ma_acl_size = 0;
715                 } else
716                         GOTO(out, rc = rc2);
717         }
718 #endif
719 out:
720         ma->ma_need = need;
721         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
722                rc, ma->ma_valid, ma->ma_lmm);
723         RETURN(rc);
724 }
725
726 static int mdt_getattr_internal(struct mdt_thread_info *info,
727                                 struct mdt_object *o, int ma_need)
728 {
729         struct md_object        *next = mdt_object_child(o);
730         const struct mdt_body   *reqbody = info->mti_body;
731         struct ptlrpc_request   *req = mdt_info_req(info);
732         struct md_attr          *ma = &info->mti_attr;
733         struct lu_attr          *la = &ma->ma_attr;
734         struct req_capsule      *pill = info->mti_pill;
735         const struct lu_env     *env = info->mti_env;
736         struct mdt_body         *repbody;
737         struct lu_buf           *buffer = &info->mti_buf;
738         int                     rc;
739         int                     is_root;
740         ENTRY;
741
742         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
743                 RETURN(err_serious(-ENOMEM));
744
745         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
746
747         ma->ma_valid = 0;
748
749         rc = mdt_object_exists(o);
750         if (rc < 0) {
751                 /* This object is located on remote node.*/
752                 repbody->fid1 = *mdt_object_fid(o);
753                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
754                 GOTO(out, rc = 0);
755         }
756
757         buffer->lb_len = reqbody->eadatasize;
758         if (buffer->lb_len > 0)
759                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
760         else
761                 buffer->lb_buf = NULL;
762
763         /* If it is dir object and client require MEA, then we got MEA */
764         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
765             reqbody->valid & OBD_MD_MEA) {
766                 /* Assumption: MDT_MD size is enough for lmv size. */
767                 ma->ma_lmv = buffer->lb_buf;
768                 ma->ma_lmv_size = buffer->lb_len;
769                 ma->ma_need = MA_LMV | MA_INODE;
770         } else {
771                 ma->ma_lmm = buffer->lb_buf;
772                 ma->ma_lmm_size = buffer->lb_len;
773                 ma->ma_need = MA_LOV | MA_INODE;
774         }
775
776         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
777             reqbody->valid & OBD_MD_FLDIREA  &&
778             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
779                 /* get default stripe info for this dir. */
780                 ma->ma_need |= MA_LOV_DEF;
781         }
782         ma->ma_need |= ma_need;
783         if (ma->ma_need & MA_SOM)
784                 ma->ma_som = &info->mti_u.som.data;
785
786         rc = mdt_attr_get_complex(info, o, ma);
787         if (unlikely(rc)) {
788                 CERROR("getattr error for "DFID": %d\n",
789                         PFID(mdt_object_fid(o)), rc);
790                 RETURN(rc);
791         }
792
793         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
794
795         /* the Lustre protocol supposes to return default striping
796          * on the user-visible root if explicitly requested */
797         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
798             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
799                 struct lu_fid      rootfid;
800                 struct mdt_object *root;
801                 struct mdt_device *mdt = info->mti_mdt;
802
803                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
804                 if (rc)
805                         RETURN(rc);
806                 root = mdt_object_find(env, mdt, &rootfid);
807                 if (IS_ERR(root))
808                         RETURN(PTR_ERR(root));
809                 rc = mdt_attr_get_lov(info, root, ma);
810                 mdt_object_put(info->mti_env, root);
811                 if (unlikely(rc)) {
812                         CERROR("getattr error for "DFID": %d\n",
813                                         PFID(mdt_object_fid(o)), rc);
814                         RETURN(rc);
815                 }
816         }
817
818         if (likely(ma->ma_valid & MA_INODE))
819                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
820         else
821                 RETURN(-EFAULT);
822
823         if (mdt_body_has_lov(la, reqbody)) {
824                 if (ma->ma_valid & MA_LOV) {
825                         LASSERT(ma->ma_lmm_size);
826                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
827                         repbody->eadatasize = ma->ma_lmm_size;
828                         if (S_ISDIR(la->la_mode))
829                                 repbody->valid |= OBD_MD_FLDIREA;
830                         else
831                                 repbody->valid |= OBD_MD_FLEASIZE;
832                 }
833                 if (ma->ma_valid & MA_LMV) {
834                         LASSERT(S_ISDIR(la->la_mode));
835                         repbody->eadatasize = ma->ma_lmv_size;
836                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
837                 }
838         } else if (S_ISLNK(la->la_mode) &&
839                    reqbody->valid & OBD_MD_LINKNAME) {
840                 buffer->lb_buf = ma->ma_lmm;
841                 /* eadatasize from client includes NULL-terminator, so
842                  * there is no need to read it */
843                 buffer->lb_len = reqbody->eadatasize - 1;
844                 rc = mo_readlink(env, next, buffer);
845                 if (unlikely(rc <= 0)) {
846                         CERROR("readlink failed: %d\n", rc);
847                         rc = -EFAULT;
848                 } else {
849                         int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
850
851                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
852                                 rc -= 2;
853                         repbody->valid |= OBD_MD_LINKNAME;
854                         /* we need to report back size with NULL-terminator
855                          * because client expects that */
856                         repbody->eadatasize = rc + 1;
857                         if (repbody->eadatasize != reqbody->eadatasize)
858                                 CERROR("Read shorter symlink %d, expected %d\n",
859                                        rc, reqbody->eadatasize - 1);
860                         /* NULL terminate */
861                         ((char *)ma->ma_lmm)[rc] = 0;
862
863                         /* If the total CDEBUG() size is larger than a page, it
864                          * will print a warning to the console, avoid this by
865                          * printing just the last part of the symlink. */
866                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
867                                print_limit < rc ? "..." : "", print_limit,
868                                (char *)ma->ma_lmm + rc - print_limit, rc);
869                         rc = 0;
870                 }
871         }
872
873         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
874                 repbody->max_cookiesize = 0;
875                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
876                 repbody->valid |= OBD_MD_FLMODEASIZE;
877                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
878                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
879                        repbody->max_cookiesize);
880         }
881
882         if (exp_connect_rmtclient(info->mti_exp) &&
883             reqbody->valid & OBD_MD_FLRMTPERM) {
884                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
885
886                 /* mdt_getattr_lock only */
887                 rc = mdt_pack_remote_perm(info, o, buf);
888                 if (rc) {
889                         repbody->valid &= ~OBD_MD_FLRMTPERM;
890                         repbody->aclsize = 0;
891                         RETURN(rc);
892                 } else {
893                         repbody->valid |= OBD_MD_FLRMTPERM;
894                         repbody->aclsize = sizeof(struct mdt_remote_perm);
895                 }
896         }
897 #ifdef CONFIG_FS_POSIX_ACL
898         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
899                  (reqbody->valid & OBD_MD_FLACL)) {
900                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
901                 buffer->lb_len = req_capsule_get_size(pill,
902                                                       &RMF_ACL, RCL_SERVER);
903                 if (buffer->lb_len > 0) {
904                         rc = mo_xattr_get(env, next, buffer,
905                                           XATTR_NAME_ACL_ACCESS);
906                         if (rc < 0) {
907                                 if (rc == -ENODATA) {
908                                         repbody->aclsize = 0;
909                                         repbody->valid |= OBD_MD_FLACL;
910                                         rc = 0;
911                                 } else if (rc == -EOPNOTSUPP) {
912                                         rc = 0;
913                                 } else {
914                                         CERROR("got acl size: %d\n", rc);
915                                 }
916                         } else {
917                                 repbody->aclsize = rc;
918                                 repbody->valid |= OBD_MD_FLACL;
919                                 rc = 0;
920                         }
921                 }
922         }
923 #endif
924
925         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
926             info->mti_mdt->mdt_opts.mo_mds_capa &&
927             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
928                 struct lustre_capa *capa;
929
930                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
931                 LASSERT(capa);
932                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
933                 rc = mo_capa_get(env, next, capa, 0);
934                 if (rc)
935                         RETURN(rc);
936                 repbody->valid |= OBD_MD_FLMDSCAPA;
937         }
938
939 out:
940         if (rc == 0)
941                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
942
943         RETURN(rc);
944 }
945
946 static int mdt_renew_capa(struct mdt_thread_info *info)
947 {
948         struct mdt_object  *obj = info->mti_object;
949         struct mdt_body    *body;
950         struct lustre_capa *capa, *c;
951         int rc;
952         ENTRY;
953
954         /* if object doesn't exist, or server has disabled capability,
955          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
956          * flag not set.
957          */
958         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
959             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
960                 RETURN(0);
961
962         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
963         LASSERT(body != NULL);
964
965         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
966         LASSERT(c);
967
968         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
969         LASSERT(capa);
970
971         *capa = *c;
972         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
973         if (rc == 0)
974                 body->valid |= OBD_MD_FLOSSCAPA;
975         RETURN(rc);
976 }
977
978 static int mdt_getattr(struct mdt_thread_info *info)
979 {
980         struct mdt_object       *obj = info->mti_object;
981         struct req_capsule      *pill = info->mti_pill;
982         struct mdt_body         *reqbody;
983         struct mdt_body         *repbody;
984         mode_t                   mode;
985         int rc, rc2;
986         ENTRY;
987
988         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
989         LASSERT(reqbody);
990
991         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
992                 rc = req_capsule_server_pack(pill);
993                 if (unlikely(rc))
994                         RETURN(err_serious(rc));
995                 rc = mdt_renew_capa(info);
996                 GOTO(out_shrink, rc);
997         }
998
999         LASSERT(obj != NULL);
1000         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
1001
1002         mode = lu_object_attr(&obj->mot_obj.mo_lu);
1003
1004         /* old clients may not report needed easize, use max value then */
1005         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1006                              reqbody->eadatasize == 0 ?
1007                              info->mti_mdt->mdt_max_mdsize :
1008                              reqbody->eadatasize);
1009
1010         rc = req_capsule_server_pack(pill);
1011         if (unlikely(rc != 0))
1012                 RETURN(err_serious(rc));
1013
1014         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1015         LASSERT(repbody != NULL);
1016         repbody->eadatasize = 0;
1017         repbody->aclsize = 0;
1018
1019         if (reqbody->valid & OBD_MD_FLRMTPERM)
1020                 rc = mdt_init_ucred(info, reqbody);
1021         else
1022                 rc = mdt_check_ucred(info);
1023         if (unlikely(rc))
1024                 GOTO(out_shrink, rc);
1025
1026         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1027         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1028
1029         /*
1030          * Don't check capability at all, because rename might getattr for
1031          * remote obj, and at that time no capability is available.
1032          */
1033         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
1034         rc = mdt_getattr_internal(info, obj, 0);
1035         if (reqbody->valid & OBD_MD_FLRMTPERM)
1036                 mdt_exit_ucred(info);
1037         EXIT;
1038 out_shrink:
1039         mdt_client_compatibility(info);
1040         rc2 = mdt_fix_reply(info);
1041         if (rc == 0)
1042                 rc = rc2;
1043         return rc;
1044 }
1045
1046 static int mdt_is_subdir(struct mdt_thread_info *info)
1047 {
1048         struct mdt_object     *o = info->mti_object;
1049         struct req_capsule    *pill = info->mti_pill;
1050         const struct mdt_body *body = info->mti_body;
1051         struct mdt_body       *repbody;
1052         int                    rc;
1053         ENTRY;
1054
1055         LASSERT(o != NULL);
1056
1057         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1058
1059         /*
1060          * We save last checked parent fid to @repbody->fid1 for remote
1061          * directory case.
1062          */
1063         LASSERT(fid_is_sane(&body->fid2));
1064         LASSERT(mdt_object_exists(o) > 0);
1065         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1066                            &body->fid2, &repbody->fid1);
1067         if (rc == 0 || rc == -EREMOTE)
1068                 repbody->valid |= OBD_MD_FLID;
1069
1070         RETURN(rc);
1071 }
1072
1073 static int mdt_raw_lookup(struct mdt_thread_info *info,
1074                           struct mdt_object *parent,
1075                           const struct lu_name *lname,
1076                           struct ldlm_reply *ldlm_rep)
1077 {
1078         struct md_object *next = mdt_object_child(info->mti_object);
1079         const struct mdt_body *reqbody = info->mti_body;
1080         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1081         struct mdt_body *repbody;
1082         int rc;
1083         ENTRY;
1084
1085         if (reqbody->valid != OBD_MD_FLID)
1086                 RETURN(0);
1087
1088         LASSERT(!info->mti_cross_ref);
1089
1090         /* Only got the fid of this obj by name */
1091         fid_zero(child_fid);
1092         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1093                         &info->mti_spec);
1094 #if 0
1095         /* XXX is raw_lookup possible as intent operation? */
1096         if (rc != 0) {
1097                 if (rc == -ENOENT)
1098                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1099                 RETURN(rc);
1100         } else
1101                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1102
1103         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1104 #endif
1105         if (rc == 0) {
1106                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1107                 repbody->fid1 = *child_fid;
1108                 repbody->valid = OBD_MD_FLID;
1109         }
1110         RETURN(1);
1111 }
1112
1113 /*
1114  * UPDATE lock should be taken against parent, and be release before exit;
1115  * child_bits lock should be taken against child, and be returned back:
1116  *            (1)normal request should release the child lock;
1117  *            (2)intent request will grant the lock to client.
1118  */
1119 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1120                                  struct mdt_lock_handle *lhc,
1121                                  __u64 child_bits,
1122                                  struct ldlm_reply *ldlm_rep)
1123 {
1124         struct ptlrpc_request  *req       = mdt_info_req(info);
1125         struct mdt_body        *reqbody   = NULL;
1126         struct mdt_object      *parent    = info->mti_object;
1127         struct mdt_object      *child;
1128         struct md_object       *next      = mdt_object_child(parent);
1129         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1130         struct lu_name         *lname     = NULL;
1131         const char             *name      = NULL;
1132         int                     namelen   = 0;
1133         struct mdt_lock_handle *lhp       = NULL;
1134         struct ldlm_lock       *lock;
1135         struct ldlm_res_id     *res_id;
1136         int                     is_resent;
1137         int                     ma_need = 0;
1138         int                     rc;
1139
1140         ENTRY;
1141
1142         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1143         LASSERT(ergo(is_resent,
1144                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1145
1146         LASSERT(parent != NULL);
1147         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1148         if (name == NULL)
1149                 RETURN(err_serious(-EFAULT));
1150
1151         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1152                                        RCL_CLIENT) - 1;
1153         if (!info->mti_cross_ref) {
1154                 /*
1155                  * XXX: Check for "namelen == 0" is for getattr by fid
1156                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1157                  * that is the name must contain at least one character and
1158                  * the terminating '\0'
1159                  */
1160                 if (namelen == 0) {
1161                         reqbody = req_capsule_client_get(info->mti_pill,
1162                                                          &RMF_MDT_BODY);
1163                         if (unlikely(reqbody == NULL))
1164                                 RETURN(err_serious(-EFAULT));
1165
1166                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1167                                 RETURN(err_serious(-EINVAL));
1168
1169                         name = NULL;
1170                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1171                                "ldlm_rep = %p\n",
1172                                PFID(mdt_object_fid(parent)),
1173                                PFID(&reqbody->fid2), ldlm_rep);
1174                 } else {
1175                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1176                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1177                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1178                                name, ldlm_rep);
1179                 }
1180         }
1181         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1182
1183         rc = mdt_object_exists(parent);
1184         if (unlikely(rc == 0)) {
1185                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1186                                 &parent->mot_obj.mo_lu,
1187                                 "Parent doesn't exist!\n");
1188                 RETURN(-ESTALE);
1189         } else if (!info->mti_cross_ref) {
1190                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
1191                          PFID(mdt_object_fid(parent)));
1192         }
1193         if (lname) {
1194                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1195                 if (rc != 0) {
1196                         if (rc > 0)
1197                                 rc = 0;
1198                         RETURN(rc);
1199                 }
1200         }
1201
1202         if (info->mti_cross_ref) {
1203                 /* Only getattr on the child. Parent is on another node. */
1204                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1205                 child = parent;
1206                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1207                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1208
1209                 if (is_resent) {
1210                         /* Do not take lock for resent case. */
1211                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1212                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1213                                  lhc->mlh_reg_lh.cookie);
1214                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1215                                                 &lock->l_resource->lr_name));
1216                         LDLM_LOCK_PUT(lock);
1217                         rc = 0;
1218                 } else {
1219                         mdt_lock_handle_init(lhc);
1220                         mdt_lock_reg_init(lhc, LCK_PR);
1221
1222                         /*
1223                          * Object's name is on another MDS, no lookup lock is
1224                          * needed here but update is.
1225                          */
1226                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1227                         child_bits |= MDS_INODELOCK_UPDATE;
1228
1229                         rc = mdt_object_lock(info, child, lhc, child_bits,
1230                                              MDT_LOCAL_LOCK);
1231                 }
1232                 if (rc == 0) {
1233                         /* Finally, we can get attr for child. */
1234                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1235                                          BYPASS_CAPA);
1236                         rc = mdt_getattr_internal(info, child, 0);
1237                         if (unlikely(rc != 0))
1238                                 mdt_object_unlock(info, child, lhc, 1);
1239                 }
1240                 RETURN(rc);
1241         }
1242
1243         if (lname) {
1244                 /* step 1: lock parent only if parent is a directory */
1245                 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1246                         lhp = &info->mti_lh[MDT_LH_PARENT];
1247                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1248                         rc = mdt_object_lock(info, parent, lhp,
1249                                              MDS_INODELOCK_UPDATE,
1250                                              MDT_LOCAL_LOCK);
1251                         if (unlikely(rc != 0))
1252                                 RETURN(rc);
1253                 }
1254
1255                 /* step 2: lookup child's fid by name */
1256                 fid_zero(child_fid);
1257                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1258                                 &info->mti_spec);
1259
1260                 if (rc != 0) {
1261                         if (rc == -ENOENT)
1262                                 mdt_set_disposition(info, ldlm_rep,
1263                                                     DISP_LOOKUP_NEG);
1264                         GOTO(out_parent, rc);
1265                 } else
1266                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1267         } else {
1268                 *child_fid = reqbody->fid2;
1269                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1270         }
1271
1272         /*
1273          *step 3: find the child object by fid & lock it.
1274          *        regardless if it is local or remote.
1275          */
1276         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1277
1278         if (unlikely(IS_ERR(child)))
1279                 GOTO(out_parent, rc = PTR_ERR(child));
1280         if (is_resent) {
1281                 /* Do not take lock for resent case. */
1282                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1283                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1284                          lhc->mlh_reg_lh.cookie);
1285
1286                 res_id = &lock->l_resource->lr_name;
1287                 if (!fid_res_name_eq(mdt_object_fid(child),
1288                                     &lock->l_resource->lr_name)) {
1289                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1290                                                  &lock->l_resource->lr_name),
1291                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1292                                  (unsigned long)res_id->name[0],
1293                                  (unsigned long)res_id->name[1],
1294                                  (unsigned long)res_id->name[2],
1295                                  PFID(mdt_object_fid(parent)));
1296                           CWARN("Although resent, but still not get child lock"
1297                                 "parent:"DFID" child:"DFID"\n",
1298                                 PFID(mdt_object_fid(parent)),
1299                                 PFID(mdt_object_fid(child)));
1300                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1301                           LDLM_LOCK_PUT(lock);
1302                           GOTO(relock, 0);
1303                 }
1304                 LDLM_LOCK_PUT(lock);
1305                 rc = 0;
1306         } else {
1307 relock:
1308                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1309                 mdt_lock_handle_init(lhc);
1310                 if (child_bits == MDS_INODELOCK_LAYOUT)
1311                         mdt_lock_reg_init(lhc, LCK_CR);
1312                 else
1313                         mdt_lock_reg_init(lhc, LCK_PR);
1314
1315                 if (mdt_object_exists(child) == 0) {
1316                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1317                                         &child->mot_obj.mo_lu,
1318                                         "Object doesn't exist!\n");
1319                         GOTO(out_child, rc = -ENOENT);
1320                 }
1321
1322                 if (!(child_bits & MDS_INODELOCK_UPDATE)) {
1323                         struct md_attr *ma = &info->mti_attr;
1324
1325                         ma->ma_valid = 0;
1326                         ma->ma_need = MA_INODE;
1327                         rc = mdt_attr_get_complex(info, child, ma);
1328                         if (unlikely(rc != 0))
1329                                 GOTO(out_child, rc);
1330
1331                         /* layout lock is used only on regular files */
1332                         if ((ma->ma_valid & MA_INODE) &&
1333                             (ma->ma_attr.la_valid & LA_MODE) &&
1334                             !S_ISREG(ma->ma_attr.la_mode))
1335                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1336
1337                         /* If the file has not been changed for some time, we
1338                          * return not only a LOOKUP lock, but also an UPDATE
1339                          * lock and this might save us RPC on later STAT. For
1340                          * directories, it also let negative dentry starts
1341                          * working for this dir. */
1342                         if (ma->ma_valid & MA_INODE &&
1343                             ma->ma_attr.la_valid & LA_CTIME &&
1344                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1345                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1346                                 child_bits |= MDS_INODELOCK_UPDATE;
1347                 }
1348
1349                 rc = mdt_object_lock(info, child, lhc, child_bits,
1350                                      MDT_CROSS_LOCK);
1351
1352                 if (unlikely(rc != 0))
1353                         GOTO(out_child, rc);
1354         }
1355
1356         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1357         /* Get MA_SOM attributes if update lock is given. */
1358         if (lock &&
1359             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1360             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1361                 ma_need = MA_SOM;
1362
1363         /* finally, we can get attr for child. */
1364         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1365         rc = mdt_getattr_internal(info, child, ma_need);
1366         if (unlikely(rc != 0)) {
1367                 mdt_object_unlock(info, child, lhc, 1);
1368         } else if (lock) {
1369                 /* Debugging code. */
1370                 res_id = &lock->l_resource->lr_name;
1371                 LDLM_DEBUG(lock, "Returning lock to client");
1372                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1373                                          &lock->l_resource->lr_name),
1374                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1375                          (unsigned long)res_id->name[0],
1376                          (unsigned long)res_id->name[1],
1377                          (unsigned long)res_id->name[2],
1378                          PFID(mdt_object_fid(child)));
1379                 mdt_pack_size2body(info, child);
1380         }
1381         if (lock)
1382                 LDLM_LOCK_PUT(lock);
1383
1384         EXIT;
1385 out_child:
1386         mdt_object_put(info->mti_env, child);
1387 out_parent:
1388         if (lhp)
1389                 mdt_object_unlock(info, parent, lhp, 1);
1390         return rc;
1391 }
1392
1393 /* normal handler: should release the child lock */
1394 static int mdt_getattr_name(struct mdt_thread_info *info)
1395 {
1396         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1397         struct mdt_body        *reqbody;
1398         struct mdt_body        *repbody;
1399         int rc, rc2;
1400         ENTRY;
1401
1402         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1403         LASSERT(reqbody != NULL);
1404         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1405         LASSERT(repbody != NULL);
1406
1407         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1408         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1409         repbody->eadatasize = 0;
1410         repbody->aclsize = 0;
1411
1412         rc = mdt_init_ucred(info, reqbody);
1413         if (unlikely(rc))
1414                 GOTO(out_shrink, rc);
1415
1416         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1417         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1418                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1419                 lhc->mlh_reg_lh.cookie = 0;
1420         }
1421         mdt_exit_ucred(info);
1422         EXIT;
1423 out_shrink:
1424         mdt_client_compatibility(info);
1425         rc2 = mdt_fix_reply(info);
1426         if (rc == 0)
1427                 rc = rc2;
1428         return rc;
1429 }
1430
1431 static const struct lu_device_operations mdt_lu_ops;
1432
1433 static int lu_device_is_mdt(struct lu_device *d)
1434 {
1435         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1436 }
1437
1438 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1439                          void *karg, void *uarg);
1440
1441 static int mdt_set_info(struct mdt_thread_info *info)
1442 {
1443         struct ptlrpc_request *req = mdt_info_req(info);
1444         char *key;
1445         void *val;
1446         int keylen, vallen, rc = 0;
1447         ENTRY;
1448
1449         rc = req_capsule_server_pack(info->mti_pill);
1450         if (rc)
1451                 RETURN(rc);
1452
1453         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1454         if (key == NULL) {
1455                 DEBUG_REQ(D_HA, req, "no set_info key");
1456                 RETURN(-EFAULT);
1457         }
1458
1459         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1460                                       RCL_CLIENT);
1461
1462         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1463         if (val == NULL) {
1464                 DEBUG_REQ(D_HA, req, "no set_info val");
1465                 RETURN(-EFAULT);
1466         }
1467
1468         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1469                                       RCL_CLIENT);
1470
1471         /* Swab any part of val you need to here */
1472         if (KEY_IS(KEY_READ_ONLY)) {
1473                 req->rq_status = 0;
1474                 lustre_msg_set_status(req->rq_repmsg, 0);
1475
1476                 cfs_spin_lock(&req->rq_export->exp_lock);
1477                 if (*(__u32 *)val)
1478                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1479                 else
1480                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1481                 cfs_spin_unlock(&req->rq_export->exp_lock);
1482
1483         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1484                 struct changelog_setinfo *cs =
1485                         (struct changelog_setinfo *)val;
1486                 if (vallen != sizeof(*cs)) {
1487                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1488                         RETURN(-EINVAL);
1489                 }
1490                 if (ptlrpc_req_need_swab(req)) {
1491                         __swab64s(&cs->cs_recno);
1492                         __swab32s(&cs->cs_id);
1493                 }
1494
1495                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1496                                    vallen, val, NULL);
1497                 lustre_msg_set_status(req->rq_repmsg, rc);
1498
1499         } else {
1500                 RETURN(-EINVAL);
1501         }
1502         RETURN(0);
1503 }
1504
1505 static int mdt_connect(struct mdt_thread_info *info)
1506 {
1507         int rc;
1508         struct ptlrpc_request *req;
1509
1510         req = mdt_info_req(info);
1511         rc = target_handle_connect(req);
1512         if (rc == 0) {
1513                 LASSERT(req->rq_export != NULL);
1514                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1515                 rc = mdt_init_sec_level(info);
1516                 if (rc == 0)
1517                         rc = mdt_init_idmap(info);
1518                 if (rc != 0)
1519                         obd_disconnect(class_export_get(req->rq_export));
1520         } else {
1521                 rc = err_serious(rc);
1522         }
1523         return rc;
1524 }
1525
1526 static int mdt_disconnect(struct mdt_thread_info *info)
1527 {
1528         int rc;
1529         ENTRY;
1530
1531         rc = target_handle_disconnect(mdt_info_req(info));
1532         if (rc)
1533                 rc = err_serious(rc);
1534         RETURN(rc);
1535 }
1536
1537 static int mdt_sendpage(struct mdt_thread_info *info,
1538                         struct lu_rdpg *rdpg, int nob)
1539 {
1540         struct ptlrpc_request   *req = mdt_info_req(info);
1541         struct obd_export       *exp = req->rq_export;
1542         struct ptlrpc_bulk_desc *desc;
1543         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1544         int                      tmpcount;
1545         int                      tmpsize;
1546         int                      i;
1547         int                      rc;
1548         ENTRY;
1549
1550         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1551                                     MDS_BULK_PORTAL);
1552         if (desc == NULL)
1553                 RETURN(-ENOMEM);
1554
1555         if (!(exp->exp_connect_flags & OBD_CONNECT_BRW_SIZE))
1556                 /* old client requires reply size in it's PAGE_SIZE,
1557                  * which is rdpg->rp_count */
1558                 nob = rdpg->rp_count;
1559
1560         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1561              i++, tmpcount -= tmpsize) {
1562                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1563                 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1564         }
1565
1566         LASSERT(desc->bd_nob == nob);
1567         rc = target_bulk_io(exp, desc, lwi);
1568         ptlrpc_free_bulk_pin(desc);
1569         RETURN(rc);
1570 }
1571
1572 #ifdef HAVE_SPLIT_SUPPORT
1573 /*
1574  * Retrieve dir entry from the page and insert it to the slave object, actually,
1575  * this should be in osd layer, but since it will not in the final product, so
1576  * just do it here and do not define more moo api anymore for this.
1577  */
1578 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1579                               int size)
1580 {
1581         struct mdt_object *object = info->mti_object;
1582         struct lu_fid *lf = &info->mti_tmp_fid2;
1583         struct md_attr *ma = &info->mti_attr;
1584         struct lu_dirpage *dp;
1585         struct lu_dirent *ent;
1586         int rc = 0, offset = 0;
1587         ENTRY;
1588
1589         /* Make sure we have at least one entry. */
1590         if (size == 0)
1591                 RETURN(-EINVAL);
1592
1593         /*
1594          * Disable trans for this name insert, since it will include many trans
1595          * for this.
1596          */
1597         info->mti_no_need_trans = 1;
1598         /*
1599          * When write_dir_page, no need update parent's ctime,
1600          * and no permission check for name_insert.
1601          */
1602         ma->ma_attr.la_ctime = 0;
1603         ma->ma_attr.la_valid = LA_MODE;
1604         ma->ma_valid = MA_INODE;
1605
1606         cfs_kmap(page);
1607         dp = page_address(page);
1608         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1609
1610         for (ent = lu_dirent_start(dp); ent != NULL;
1611              ent = lu_dirent_next(ent)) {
1612                 struct lu_name *lname;
1613                 char *name;
1614
1615                 if (le16_to_cpu(ent->lde_namelen) == 0)
1616                         continue;
1617
1618                 fid_le_to_cpu(lf, &ent->lde_fid);
1619                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1620                         ma->ma_attr.la_mode = S_IFDIR;
1621                 else
1622                         ma->ma_attr.la_mode = 0;
1623                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1624                 if (name == NULL)
1625                         GOTO(out, rc = -ENOMEM);
1626
1627                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1628                 lname = mdt_name(info->mti_env, name,
1629                                  le16_to_cpu(ent->lde_namelen));
1630                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1631                 rc = mdo_name_insert(info->mti_env,
1632                                      md_object_next(&object->mot_obj),
1633                                      lname, lf, ma);
1634                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1635                 if (rc) {
1636                         CERROR("Can't insert %*.*s, rc %d\n",
1637                                le16_to_cpu(ent->lde_namelen),
1638                                le16_to_cpu(ent->lde_namelen),
1639                                ent->lde_name, rc);
1640                         GOTO(out, rc);
1641                 }
1642
1643                 offset += lu_dirent_size(ent);
1644                 if (offset >= size)
1645                         break;
1646         }
1647         EXIT;
1648 out:
1649         cfs_kunmap(page);
1650         return rc;
1651 }
1652
1653 static int mdt_bulk_timeout(void *data)
1654 {
1655         ENTRY;
1656
1657         CERROR("mdt bulk transfer timeout \n");
1658
1659         RETURN(1);
1660 }
1661
1662 static int mdt_writepage(struct mdt_thread_info *info)
1663 {
1664         struct ptlrpc_request   *req = mdt_info_req(info);
1665         struct mdt_body         *reqbody;
1666         struct l_wait_info      *lwi;
1667         struct ptlrpc_bulk_desc *desc;
1668         struct page             *page;
1669         int                rc;
1670         ENTRY;
1671
1672
1673         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1674         if (reqbody == NULL)
1675                 RETURN(err_serious(-EFAULT));
1676
1677         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1678         if (desc == NULL)
1679                 RETURN(err_serious(-ENOMEM));
1680
1681         /* allocate the page for the desc */
1682         page = cfs_alloc_page(CFS_ALLOC_STD);
1683         if (page == NULL)
1684                 GOTO(desc_cleanup, rc = -ENOMEM);
1685
1686         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1687                (int)reqbody->size, (int)reqbody->nlink);
1688
1689         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1690                               (int)reqbody->nlink);
1691
1692         rc = sptlrpc_svc_prep_bulk(req, desc);
1693         if (rc != 0)
1694                 GOTO(cleanup_page, rc);
1695         /*
1696          * Check if client was evicted while we were doing i/o before touching
1697          * network.
1698          */
1699         OBD_ALLOC_PTR(lwi);
1700         if (!lwi)
1701                 GOTO(cleanup_page, rc = -ENOMEM);
1702
1703         if (desc->bd_export->exp_failed)
1704                 rc = -ENOTCONN;
1705         else
1706                 rc = ptlrpc_start_bulk_transfer (desc);
1707         if (rc == 0) {
1708                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
1709                                             mdt_bulk_timeout, desc);
1710                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1711                                   desc->bd_export->exp_failed, lwi);
1712                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1713                 if (rc == -ETIMEDOUT) {
1714                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1715                         ptlrpc_abort_bulk(desc);
1716                 } else if (desc->bd_export->exp_failed) {
1717                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1718                         rc = -ENOTCONN;
1719                         ptlrpc_abort_bulk(desc);
1720                 } else if (!desc->bd_success ||
1721                            desc->bd_nob_transferred != desc->bd_nob) {
1722                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1723                                   desc->bd_success ?
1724                                   "truncated" : "network error on",
1725                                   desc->bd_nob_transferred, desc->bd_nob);
1726                         /* XXX should this be a different errno? */
1727                         rc = -ETIMEDOUT;
1728                 }
1729         } else {
1730                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1731         }
1732         if (rc)
1733                 GOTO(cleanup_lwi, rc);
1734         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1735
1736 cleanup_lwi:
1737         OBD_FREE_PTR(lwi);
1738 cleanup_page:
1739         cfs_free_page(page);
1740 desc_cleanup:
1741         ptlrpc_free_bulk_pin(desc);
1742         RETURN(rc);
1743 }
1744 #endif
1745
1746 static int mdt_readpage(struct mdt_thread_info *info)
1747 {
1748         struct mdt_object *object = info->mti_object;
1749         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1750         struct mdt_body   *reqbody;
1751         struct mdt_body   *repbody;
1752         int                rc;
1753         int                i;
1754         ENTRY;
1755
1756         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1757                 RETURN(err_serious(-ENOMEM));
1758
1759         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1760         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1761         if (reqbody == NULL || repbody == NULL)
1762                 RETURN(err_serious(-EFAULT));
1763
1764         /*
1765          * prepare @rdpg before calling lower layers and transfer itself. Here
1766          * reqbody->size contains offset of where to start to read and
1767          * reqbody->nlink contains number bytes to read.
1768          */
1769         rdpg->rp_hash = reqbody->size;
1770         if (rdpg->rp_hash != reqbody->size) {
1771                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1772                        rdpg->rp_hash, reqbody->size);
1773                 RETURN(-EFAULT);
1774         }
1775
1776         rdpg->rp_attrs = reqbody->mode;
1777         if (info->mti_exp->exp_connect_flags & OBD_CONNECT_64BITHASH)
1778                 rdpg->rp_attrs |= LUDA_64BITHASH;
1779         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1780                                 PTLRPC_MAX_BRW_SIZE);
1781         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1782                           CFS_PAGE_SHIFT;
1783         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1784         if (rdpg->rp_pages == NULL)
1785                 RETURN(-ENOMEM);
1786
1787         for (i = 0; i < rdpg->rp_npages; ++i) {
1788                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1789                 if (rdpg->rp_pages[i] == NULL)
1790                         GOTO(free_rdpg, rc = -ENOMEM);
1791         }
1792
1793         /* call lower layers to fill allocated pages with directory data */
1794         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1795         if (rc < 0)
1796                 GOTO(free_rdpg, rc);
1797
1798         /* send pages to client */
1799         rc = mdt_sendpage(info, rdpg, rc);
1800
1801         EXIT;
1802 free_rdpg:
1803
1804         for (i = 0; i < rdpg->rp_npages; i++)
1805                 if (rdpg->rp_pages[i] != NULL)
1806                         cfs_free_page(rdpg->rp_pages[i]);
1807         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1808
1809         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1810                 RETURN(0);
1811
1812         return rc;
1813 }
1814
1815 static int mdt_reint_internal(struct mdt_thread_info *info,
1816                               struct mdt_lock_handle *lhc,
1817                               __u32 op)
1818 {
1819         struct req_capsule      *pill = info->mti_pill;
1820         struct mdt_body         *repbody;
1821         int                      rc = 0, rc2;
1822         ENTRY;
1823
1824
1825         rc = mdt_reint_unpack(info, op);
1826         if (rc != 0) {
1827                 CERROR("Can't unpack reint, rc %d\n", rc);
1828                 RETURN(err_serious(rc));
1829         }
1830
1831         /* for replay (no_create) lmm is not needed, client has it already */
1832         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1833                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1834                                      info->mti_rr.rr_eadatalen);
1835
1836         /* llog cookies are always 0, the field is kept for compatibility */
1837         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1838                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1839
1840         rc = req_capsule_server_pack(pill);
1841         if (rc != 0) {
1842                 CERROR("Can't pack response, rc %d\n", rc);
1843                 RETURN(err_serious(rc));
1844         }
1845
1846         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1847                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1848                 LASSERT(repbody);
1849                 repbody->eadatasize = 0;
1850                 repbody->aclsize = 0;
1851         }
1852
1853         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1854
1855         /* for replay no cookkie / lmm need, because client have this already */
1856         if (info->mti_spec.no_create)
1857                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1858                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1859
1860         rc = mdt_init_ucred_reint(info);
1861         if (rc)
1862                 GOTO(out_shrink, rc);
1863
1864         rc = mdt_fix_attr_ucred(info, op);
1865         if (rc != 0)
1866                 GOTO(out_ucred, rc = err_serious(rc));
1867
1868         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1869                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1870                 GOTO(out_ucred, rc);
1871         }
1872         rc = mdt_reint_rec(info, lhc);
1873         EXIT;
1874 out_ucred:
1875         mdt_exit_ucred(info);
1876 out_shrink:
1877         mdt_client_compatibility(info);
1878         rc2 = mdt_fix_reply(info);
1879         if (rc == 0)
1880                 rc = rc2;
1881         return rc;
1882 }
1883
1884 static long mdt_reint_opcode(struct mdt_thread_info *info,
1885                              const struct req_format **fmt)
1886 {
1887         struct mdt_rec_reint *rec;
1888         long opc;
1889
1890         opc = err_serious(-EFAULT);
1891         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1892         if (rec != NULL) {
1893                 opc = rec->rr_opcode;
1894                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1895                 if (opc < REINT_MAX && fmt[opc] != NULL)
1896                         req_capsule_extend(info->mti_pill, fmt[opc]);
1897                 else {
1898                         CERROR("Unsupported opc: %ld\n", opc);
1899                         opc = err_serious(opc);
1900                 }
1901         }
1902         return opc;
1903 }
1904
1905 static int mdt_reint(struct mdt_thread_info *info)
1906 {
1907         long opc;
1908         int  rc;
1909
1910         static const struct req_format *reint_fmts[REINT_MAX] = {
1911                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1912                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1913                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1914                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1915                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1916                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1917                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1918         };
1919
1920         ENTRY;
1921
1922         opc = mdt_reint_opcode(info, reint_fmts);
1923         if (opc >= 0) {
1924                 /*
1925                  * No lock possible here from client to pass it to reint code
1926                  * path.
1927                  */
1928                 rc = mdt_reint_internal(info, NULL, opc);
1929         } else {
1930                 rc = opc;
1931         }
1932
1933         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1934         RETURN(rc);
1935 }
1936
1937 /* this should sync the whole device */
1938 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1939 {
1940         struct dt_device *dt = mdt->mdt_bottom;
1941         int rc;
1942         ENTRY;
1943
1944         rc = dt->dd_ops->dt_sync(env, dt);
1945         RETURN(rc);
1946 }
1947
1948 /* this should sync this object */
1949 static int mdt_object_sync(struct mdt_thread_info *info)
1950 {
1951         struct md_object *next;
1952         int rc;
1953         ENTRY;
1954
1955         if (!mdt_object_exists(info->mti_object)) {
1956                 CWARN("Non existing object  "DFID"!\n",
1957                       PFID(mdt_object_fid(info->mti_object)));
1958                 RETURN(-ESTALE);
1959         }
1960         next = mdt_object_child(info->mti_object);
1961         rc = mo_object_sync(info->mti_env, next);
1962
1963         RETURN(rc);
1964 }
1965
1966 static int mdt_sync(struct mdt_thread_info *info)
1967 {
1968         struct ptlrpc_request *req = mdt_info_req(info);
1969         struct req_capsule *pill = info->mti_pill;
1970         struct mdt_body *body;
1971         int rc;
1972         ENTRY;
1973
1974         /* The fid may be zero, so we req_capsule_set manually */
1975         req_capsule_set(pill, &RQF_MDS_SYNC);
1976
1977         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1978         if (body == NULL)
1979                 RETURN(err_serious(-EINVAL));
1980
1981         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1982                 RETURN(err_serious(-ENOMEM));
1983
1984         if (fid_seq(&body->fid1) == 0) {
1985                 /* sync the whole device */
1986                 rc = req_capsule_server_pack(pill);
1987                 if (rc == 0)
1988                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1989                 else
1990                         rc = err_serious(rc);
1991         } else {
1992                 /* sync an object */
1993                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1994                 if (rc == 0) {
1995                         rc = mdt_object_sync(info);
1996                         if (rc == 0) {
1997                                 const struct lu_fid *fid;
1998                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1999
2000                                 info->mti_attr.ma_need = MA_INODE;
2001                                 info->mti_attr.ma_valid = 0;
2002                                 rc = mdt_attr_get_complex(info, info->mti_object,
2003                                                           &info->mti_attr);
2004                                 if (rc == 0) {
2005                                         body = req_capsule_server_get(pill,
2006                                                                 &RMF_MDT_BODY);
2007                                         fid = mdt_object_fid(info->mti_object);
2008                                         mdt_pack_attr2body(info, body, la, fid);
2009                                 }
2010                         }
2011                 } else
2012                         rc = err_serious(rc);
2013         }
2014         if (rc == 0)
2015                 mdt_counter_incr(req, LPROC_MDT_SYNC);
2016
2017         RETURN(rc);
2018 }
2019
2020 /*
2021  * Quotacheck handler.
2022  * in-kernel quotacheck isn't supported any more.
2023  */
2024 static int mdt_quotacheck(struct mdt_thread_info *info)
2025 {
2026         struct obd_quotactl     *oqctl;
2027         int                      rc;
2028         ENTRY;
2029
2030         oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
2031         if (oqctl == NULL)
2032                 RETURN(err_serious(-EPROTO));
2033
2034         rc = req_capsule_server_pack(info->mti_pill);
2035         if (rc)
2036                 RETURN(err_serious(rc));
2037
2038         /* deprecated, not used any more */
2039         RETURN(-EOPNOTSUPP);
2040 }
2041
2042 /*
2043  * Handle quota control requests to consult current usage/limit, but also
2044  * to configure quota enforcement
2045  */
2046 static int mdt_quotactl(struct mdt_thread_info *info)
2047 {
2048         struct obd_export       *exp  = info->mti_exp;
2049         struct req_capsule      *pill = info->mti_pill;
2050         struct obd_quotactl     *oqctl, *repoqc;
2051         int                      id, rc;
2052         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2053         ENTRY;
2054
2055         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
2056         if (oqctl == NULL)
2057                 RETURN(err_serious(-EPROTO));
2058
2059         rc = req_capsule_server_pack(pill);
2060         if (rc)
2061                 RETURN(err_serious(rc));
2062
2063         switch (oqctl->qc_cmd) {
2064         case Q_QUOTACHECK:
2065         case LUSTRE_Q_INVALIDATE:
2066         case LUSTRE_Q_FINVALIDATE:
2067         case Q_QUOTAON:
2068         case Q_QUOTAOFF:
2069         case Q_INITQUOTA:
2070                 /* deprecated, not used any more */
2071                 RETURN(-EOPNOTSUPP);
2072                 /* master quotactl */
2073         case Q_GETINFO:
2074         case Q_SETINFO:
2075         case Q_SETQUOTA:
2076         case Q_GETQUOTA:
2077                 if (qmt == NULL)
2078                         RETURN(-EOPNOTSUPP);
2079                 /* slave quotactl */
2080         case Q_GETOINFO:
2081         case Q_GETOQUOTA:
2082                 break;
2083         default:
2084                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2085                 RETURN(-EFAULT);
2086         }
2087
2088         /* map uid/gid for remote client */
2089         id = oqctl->qc_id;
2090         if (exp_connect_rmtclient(exp)) {
2091                 struct lustre_idmap_table *idmap;
2092
2093                 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
2094
2095                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
2096                              oqctl->qc_cmd != Q_GETINFO))
2097                         RETURN(-EPERM);
2098
2099                 if (oqctl->qc_type == USRQUOTA)
2100                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
2101                                                      oqctl->qc_id);
2102                 else if (oqctl->qc_type == GRPQUOTA)
2103                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
2104                                                      oqctl->qc_id);
2105                 else
2106                         RETURN(-EINVAL);
2107
2108                 if (id == CFS_IDMAP_NOTFOUND) {
2109                         CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
2110                         RETURN(-EACCES);
2111                 }
2112         }
2113
2114         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2115         if (repoqc == NULL)
2116                 RETURN(err_serious(-EFAULT));
2117
2118         if (oqctl->qc_id != id)
2119                 swap(oqctl->qc_id, id);
2120
2121         switch (oqctl->qc_cmd) {
2122
2123         case Q_GETINFO:
2124         case Q_SETINFO:
2125         case Q_SETQUOTA:
2126         case Q_GETQUOTA:
2127                 /* forward quotactl request to QMT */
2128                 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2129                 break;
2130
2131         case Q_GETOINFO:
2132         case Q_GETOQUOTA:
2133                 /* slave quotactl */
2134                 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2135                                    oqctl);
2136                 break;
2137
2138         default:
2139                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2140                 RETURN(-EFAULT);
2141         }
2142
2143         if (oqctl->qc_id != id)
2144                 swap(oqctl->qc_id, id);
2145
2146         *repoqc = *oqctl;
2147         RETURN(rc);
2148 }
2149
2150 /*
2151  * OBD PING and other handlers.
2152  */
2153 static int mdt_obd_ping(struct mdt_thread_info *info)
2154 {
2155         int rc;
2156         ENTRY;
2157
2158         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2159
2160         rc = target_handle_ping(mdt_info_req(info));
2161         if (rc < 0)
2162                 rc = err_serious(rc);
2163         RETURN(rc);
2164 }
2165
2166 /*
2167  * OBD_IDX_READ handler
2168  */
2169 static int mdt_obd_idx_read(struct mdt_thread_info *info)
2170 {
2171         struct mdt_device       *mdt = info->mti_mdt;
2172         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2173         struct idx_info         *req_ii, *rep_ii;
2174         int                      rc, i;
2175         ENTRY;
2176
2177         memset(rdpg, 0, sizeof(*rdpg));
2178         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2179
2180         /* extract idx_info buffer from request & reply */
2181         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2182         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2183                 RETURN(err_serious(-EPROTO));
2184
2185         rc = req_capsule_server_pack(info->mti_pill);
2186         if (rc)
2187                 RETURN(err_serious(rc));
2188
2189         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2190         if (rep_ii == NULL)
2191                 RETURN(err_serious(-EFAULT));
2192         rep_ii->ii_magic = IDX_INFO_MAGIC;
2193
2194         /* extract hash to start with */
2195         rdpg->rp_hash = req_ii->ii_hash_start;
2196
2197         /* extract requested attributes */
2198         rdpg->rp_attrs = req_ii->ii_attrs;
2199
2200         /* check that fid packed in request is valid and supported */
2201         if (!fid_is_sane(&req_ii->ii_fid))
2202                 RETURN(-EINVAL);
2203         rep_ii->ii_fid = req_ii->ii_fid;
2204
2205         /* copy flags */
2206         rep_ii->ii_flags = req_ii->ii_flags;
2207
2208         /* compute number of pages to allocate, ii_count is the number of 4KB
2209          * containers */
2210         if (req_ii->ii_count <= 0)
2211                 GOTO(out, rc = -EFAULT);
2212         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2213                                PTLRPC_MAX_BRW_SIZE);
2214         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2215
2216         /* allocate pages to store the containers */
2217         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2218         if (rdpg->rp_pages == NULL)
2219                 GOTO(out, rc = -ENOMEM);
2220         for (i = 0; i < rdpg->rp_npages; i++) {
2221                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2222                 if (rdpg->rp_pages[i] == NULL)
2223                         GOTO(out, rc = -ENOMEM);
2224         }
2225
2226         /* populate pages with key/record pairs */
2227         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2228         if (rc < 0)
2229                 GOTO(out, rc);
2230
2231         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2232                  "asked %d > %d\n", rc, rdpg->rp_count);
2233
2234         /* send pages to client */
2235         rc = mdt_sendpage(info, rdpg, rc);
2236
2237         GOTO(out, rc);
2238 out:
2239         if (rdpg->rp_pages) {
2240                 for (i = 0; i < rdpg->rp_npages; i++)
2241                         if (rdpg->rp_pages[i])
2242                                 cfs_free_page(rdpg->rp_pages[i]);
2243                 OBD_FREE(rdpg->rp_pages,
2244                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2245         }
2246         return rc;
2247 }
2248
2249 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
2250 {
2251         return err_serious(-EOPNOTSUPP);
2252 }
2253
2254 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
2255 {
2256         return err_serious(-EOPNOTSUPP);
2257 }
2258
2259
2260 /*
2261  * LLOG handlers.
2262  */
2263
2264 /** clone llog ctxt from child (mdd)
2265  * This allows remote llog (replicator) access.
2266  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2267  * context was originally set up, or we can handle them directly.
2268  * I choose the latter, but that means I need any llog
2269  * contexts set up by child to be accessable by the mdt.  So we clone the
2270  * context into our context list here.
2271  */
2272 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2273                                int idx)
2274 {
2275         struct md_device  *next = mdt->mdt_child;
2276         struct llog_ctxt *ctxt;
2277         int rc;
2278
2279         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2280                 return 0;
2281
2282         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2283         if (rc || ctxt == NULL) {
2284                 return 0;
2285         }
2286
2287         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2288         if (rc)
2289                 CERROR("Can't set mdt ctxt %d\n", rc);
2290
2291         return rc;
2292 }
2293
2294 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2295                                  struct mdt_device *mdt, int idx)
2296 {
2297         struct llog_ctxt *ctxt;
2298
2299         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2300         if (ctxt == NULL)
2301                 return 0;
2302         /* Put once for the get we just did, and once for the clone */
2303         llog_ctxt_put(ctxt);
2304         llog_ctxt_put(ctxt);
2305         return 0;
2306 }
2307
2308 static int mdt_llog_create(struct mdt_thread_info *info)
2309 {
2310         int rc;
2311
2312         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2313         rc = llog_origin_handle_open(mdt_info_req(info));
2314         return (rc < 0 ? err_serious(rc) : rc);
2315 }
2316
2317 static int mdt_llog_destroy(struct mdt_thread_info *info)
2318 {
2319         int rc;
2320
2321         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2322         rc = llog_origin_handle_destroy(mdt_info_req(info));
2323         return (rc < 0 ? err_serious(rc) : rc);
2324 }
2325
2326 static int mdt_llog_read_header(struct mdt_thread_info *info)
2327 {
2328         int rc;
2329
2330         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2331         rc = llog_origin_handle_read_header(mdt_info_req(info));
2332         return (rc < 0 ? err_serious(rc) : rc);
2333 }
2334
2335 static int mdt_llog_next_block(struct mdt_thread_info *info)
2336 {
2337         int rc;
2338
2339         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2340         rc = llog_origin_handle_next_block(mdt_info_req(info));
2341         return (rc < 0 ? err_serious(rc) : rc);
2342 }
2343
2344 static int mdt_llog_prev_block(struct mdt_thread_info *info)
2345 {
2346         int rc;
2347
2348         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2349         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2350         return (rc < 0 ? err_serious(rc) : rc);
2351 }
2352
2353
2354 /*
2355  * DLM handlers.
2356  */
2357 static struct ldlm_callback_suite cbs = {
2358         .lcs_completion = ldlm_server_completion_ast,
2359         .lcs_blocking   = ldlm_server_blocking_ast,
2360         .lcs_glimpse    = ldlm_server_glimpse_ast
2361 };
2362
2363 static int mdt_enqueue(struct mdt_thread_info *info)
2364 {
2365         struct ptlrpc_request *req;
2366         int rc;
2367
2368         /*
2369          * info->mti_dlm_req already contains swapped and (if necessary)
2370          * converted dlm request.
2371          */
2372         LASSERT(info->mti_dlm_req != NULL);
2373
2374         req = mdt_info_req(info);
2375         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2376                                   req, info->mti_dlm_req, &cbs);
2377         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2378         return rc ? err_serious(rc) : req->rq_status;
2379 }
2380
2381 static int mdt_convert(struct mdt_thread_info *info)
2382 {
2383         int rc;
2384         struct ptlrpc_request *req;
2385
2386         LASSERT(info->mti_dlm_req);
2387         req = mdt_info_req(info);
2388         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2389         return rc ? err_serious(rc) : req->rq_status;
2390 }
2391
2392 static int mdt_bl_callback(struct mdt_thread_info *info)
2393 {
2394         CERROR("bl callbacks should not happen on MDS\n");
2395         LBUG();
2396         return err_serious(-EOPNOTSUPP);
2397 }
2398
2399 static int mdt_cp_callback(struct mdt_thread_info *info)
2400 {
2401         CERROR("cp callbacks should not happen on MDS\n");
2402         LBUG();
2403         return err_serious(-EOPNOTSUPP);
2404 }
2405
2406 /*
2407  * sec context handlers
2408  */
2409 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2410 {
2411         int rc;
2412
2413         rc = mdt_handle_idmap(info);
2414
2415         if (unlikely(rc)) {
2416                 struct ptlrpc_request *req = mdt_info_req(info);
2417                 __u32                  opc;
2418
2419                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2420                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2421                         sptlrpc_svc_ctx_invalidate(req);
2422         }
2423
2424         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2425
2426         return rc;
2427 }
2428
2429 /*
2430  * quota request handlers
2431  */
2432 static int mdt_quota_dqacq(struct mdt_thread_info *info)
2433 {
2434         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2435         int                      rc;
2436         ENTRY;
2437
2438         if (qmt == NULL)
2439                 RETURN(err_serious(-EOPNOTSUPP));
2440
2441         rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2442         RETURN(rc);
2443 }
2444
2445 static struct mdt_object *mdt_obj(struct lu_object *o)
2446 {
2447         LASSERT(lu_device_is_mdt(o->lo_dev));
2448         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2449 }
2450
2451 struct mdt_object *mdt_object_new(const struct lu_env *env,
2452                                   struct mdt_device *d,
2453                                   const struct lu_fid *f)
2454 {
2455         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2456         struct lu_object *o;
2457         struct mdt_object *m;
2458         ENTRY;
2459
2460         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2461         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2462         if (unlikely(IS_ERR(o)))
2463                 m = (struct mdt_object *)o;
2464         else
2465                 m = mdt_obj(o);
2466         RETURN(m);
2467 }
2468
2469 struct mdt_object *mdt_object_find(const struct lu_env *env,
2470                                    struct mdt_device *d,
2471                                    const struct lu_fid *f)
2472 {
2473         struct lu_object *o;
2474         struct mdt_object *m;
2475         ENTRY;
2476
2477         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2478         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2479         if (unlikely(IS_ERR(o)))
2480                 m = (struct mdt_object *)o;
2481         else
2482                 m = mdt_obj(o);
2483         RETURN(m);
2484 }
2485
2486 /**
2487  * Asyncronous commit for mdt device.
2488  *
2489  * Pass asynchonous commit call down the MDS stack.
2490  *
2491  * \param env environment
2492  * \param mdt the mdt device
2493  */
2494 static void mdt_device_commit_async(const struct lu_env *env,
2495                                     struct mdt_device *mdt)
2496 {
2497         struct dt_device *dt = mdt->mdt_bottom;
2498         int rc;
2499
2500         rc = dt->dd_ops->dt_commit_async(env, dt);
2501         if (unlikely(rc != 0))
2502                 CWARN("async commit start failed with rc = %d", rc);
2503 }
2504
2505 /**
2506  * Mark the lock as "synchonous".
2507  *
2508  * Mark the lock to deffer transaction commit to the unlock time.
2509  *
2510  * \param lock the lock to mark as "synchonous"
2511  *
2512  * \see mdt_is_lock_sync
2513  * \see mdt_save_lock
2514  */
2515 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2516 {
2517         lock->l_ast_data = (void*)1;
2518 }
2519
2520 /**
2521  * Check whehter the lock "synchonous" or not.
2522  *
2523  * \param lock the lock to check
2524  * \retval 1 the lock is "synchonous"
2525  * \retval 0 the lock isn't "synchronous"
2526  *
2527  * \see mdt_set_lock_sync
2528  * \see mdt_save_lock
2529  */
2530 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2531 {
2532         return lock->l_ast_data != NULL;
2533 }
2534
2535 /**
2536  * Blocking AST for mdt locks.
2537  *
2538  * Starts transaction commit if in case of COS lock conflict or
2539  * deffers such a commit to the mdt_save_lock.
2540  *
2541  * \param lock the lock which blocks a request or cancelling lock
2542  * \param desc unused
2543  * \param data unused
2544  * \param flag indicates whether this cancelling or blocking callback
2545  * \retval 0
2546  * \see ldlm_blocking_ast_nocheck
2547  */
2548 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2549                      void *data, int flag)
2550 {
2551         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2552         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2553         int rc;
2554         ENTRY;
2555
2556         if (flag == LDLM_CB_CANCELING)
2557                 RETURN(0);
2558         lock_res_and_lock(lock);
2559         if (lock->l_blocking_ast != mdt_blocking_ast) {
2560                 unlock_res_and_lock(lock);
2561                 RETURN(0);
2562         }
2563         if (mdt_cos_is_enabled(mdt) &&
2564             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2565             lock->l_blocking_lock != NULL &&
2566             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2567                 mdt_set_lock_sync(lock);
2568         }
2569         rc = ldlm_blocking_ast_nocheck(lock);
2570
2571         /* There is no lock conflict if l_blocking_lock == NULL,
2572          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2573          * when the last reference to a local lock was released */
2574         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2575                 struct lu_env env;
2576
2577                 rc = lu_env_init(&env, LCT_LOCAL);
2578                 if (unlikely(rc != 0))
2579                         CWARN("lu_env initialization failed with rc = %d,"
2580                               "cannot start asynchronous commit\n", rc);
2581                 else
2582                         mdt_device_commit_async(&env, mdt);
2583                 lu_env_fini(&env);
2584         }
2585         RETURN(rc);
2586 }
2587
2588 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2589                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2590 {
2591         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2592         ldlm_policy_data_t *policy = &info->mti_policy;
2593         struct ldlm_res_id *res_id = &info->mti_res_id;
2594         int rc;
2595         ENTRY;
2596
2597         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2598         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2599         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2600         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2601
2602         if (mdt_object_exists(o) < 0) {
2603                 if (locality == MDT_CROSS_LOCK) {
2604                         /* cross-ref object fix */
2605                         ibits &= ~MDS_INODELOCK_UPDATE;
2606                         ibits |= MDS_INODELOCK_LOOKUP;
2607                 } else {
2608                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2609                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2610                 }
2611                 /* No PDO lock on remote object */
2612                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2613         }
2614
2615         if (lh->mlh_type == MDT_PDO_LOCK) {
2616                 /* check for exists after object is locked */
2617                 if (mdt_object_exists(o) == 0) {
2618                         /* Non-existent object shouldn't have PDO lock */
2619                         RETURN(-ESTALE);
2620                 } else {
2621                         /* Non-dir object shouldn't have PDO lock */
2622                         if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2623                                 RETURN(-ENOTDIR);
2624                 }
2625         }
2626
2627         memset(policy, 0, sizeof(*policy));
2628         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2629
2630         /*
2631          * Take PDO lock on whole directory and build correct @res_id for lock
2632          * on part of directory.
2633          */
2634         if (lh->mlh_pdo_hash != 0) {
2635                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2636                 mdt_lock_pdo_mode(info, o, lh);
2637                 if (lh->mlh_pdo_mode != LCK_NL) {
2638                         /*
2639                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2640                          * is never going to be sent to client and we do not
2641                          * want it slowed down due to possible cancels.
2642                          */
2643                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2644                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2645                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2646                                           &info->mti_exp->exp_handle.h_cookie);
2647                         if (unlikely(rc))
2648                                 RETURN(rc);
2649                 }
2650
2651                 /*
2652                  * Finish res_id initializing by name hash marking part of
2653                  * directory which is taking modification.
2654                  */
2655                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2656         }
2657
2658         policy->l_inodebits.bits = ibits;
2659
2660         /*
2661          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2662          * going to be sent to client. If it is - mdt_intent_policy() path will
2663          * fix it up and turn FL_LOCAL flag off.
2664          */
2665         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2666                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2667                           &info->mti_exp->exp_handle.h_cookie);
2668         if (rc)
2669                 mdt_object_unlock(info, o, lh, 1);
2670         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2671                  lh->mlh_pdo_hash != 0 &&
2672                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2673                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2674         }
2675
2676         RETURN(rc);
2677 }
2678
2679 /**
2680  * Save a lock within request object.
2681  *
2682  * Keep the lock referenced until whether client ACK or transaction
2683  * commit happens or release the lock immediately depending on input
2684  * parameters. If COS is ON, a write lock is converted to COS lock
2685  * before saving.
2686  *
2687  * \param info thead info object
2688  * \param h lock handle
2689  * \param mode lock mode
2690  * \param decref force immediate lock releasing
2691  */
2692 static
2693 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2694                    ldlm_mode_t mode, int decref)
2695 {
2696         ENTRY;
2697
2698         if (lustre_handle_is_used(h)) {
2699                 if (decref || !info->mti_has_trans ||
2700                     !(mode & (LCK_PW | LCK_EX))){
2701                         mdt_fid_unlock(h, mode);
2702                 } else {
2703                         struct mdt_device *mdt = info->mti_mdt;
2704                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2705                         struct ptlrpc_request *req = mdt_info_req(info);
2706                         int no_ack = 0;
2707
2708                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2709                                  h->cookie);
2710                         CDEBUG(D_HA, "request = %p reply state = %p"
2711                                " transno = "LPD64"\n",
2712                                req, req->rq_reply_state, req->rq_transno);
2713                         if (mdt_cos_is_enabled(mdt)) {
2714                                 no_ack = 1;
2715                                 ldlm_lock_downgrade(lock, LCK_COS);
2716                                 mode = LCK_COS;
2717                         }
2718                         ptlrpc_save_lock(req, h, mode, no_ack);
2719                         if (mdt_is_lock_sync(lock)) {
2720                                 CDEBUG(D_HA, "found sync-lock,"
2721                                        " async commit started\n");
2722                                 mdt_device_commit_async(info->mti_env,
2723                                                         mdt);
2724                         }
2725                         LDLM_LOCK_PUT(lock);
2726                 }
2727                 h->cookie = 0ull;
2728         }
2729
2730         EXIT;
2731 }
2732
2733 /**
2734  * Unlock mdt object.
2735  *
2736  * Immeditely release the regular lock and the PDO lock or save the
2737  * lock in reqeuest and keep them referenced until client ACK or
2738  * transaction commit.
2739  *
2740  * \param info thread info object
2741  * \param o mdt object
2742  * \param lh mdt lock handle referencing regular and PDO locks
2743  * \param decref force immediate lock releasing
2744  */
2745 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2746                        struct mdt_lock_handle *lh, int decref)
2747 {
2748         ENTRY;
2749
2750         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2751         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2752
2753         EXIT;
2754 }
2755
2756 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2757                                         const struct lu_fid *f,
2758                                         struct mdt_lock_handle *lh,
2759                                         __u64 ibits)
2760 {
2761         struct mdt_object *o;
2762
2763         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2764         if (!IS_ERR(o)) {
2765                 int rc;
2766
2767                 rc = mdt_object_lock(info, o, lh, ibits,
2768                                      MDT_LOCAL_LOCK);
2769                 if (rc != 0) {
2770                         mdt_object_put(info->mti_env, o);
2771                         o = ERR_PTR(rc);
2772                 }
2773         }
2774         return o;
2775 }
2776
2777 void mdt_object_unlock_put(struct mdt_thread_info * info,
2778                            struct mdt_object * o,
2779                            struct mdt_lock_handle *lh,
2780                            int decref)
2781 {
2782         mdt_object_unlock(info, o, lh, decref);
2783         mdt_object_put(info->mti_env, o);
2784 }
2785
2786 static struct mdt_handler *mdt_handler_find(__u32 opc,
2787                                             struct mdt_opc_slice *supported)
2788 {
2789         struct mdt_opc_slice *s;
2790         struct mdt_handler   *h;
2791
2792         h = NULL;
2793         for (s = supported; s->mos_hs != NULL; s++) {
2794                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2795                         h = s->mos_hs + (opc - s->mos_opc_start);
2796                         if (likely(h->mh_opc != 0))
2797                                 LASSERTF(h->mh_opc == opc,
2798                                          "opcode mismatch %d != %d\n",
2799                                          h->mh_opc, opc);
2800                         else
2801                                 h = NULL; /* unsupported opc */
2802                         break;
2803                 }
2804         }
2805         return h;
2806 }
2807
2808 static int mdt_lock_resname_compat(struct mdt_device *m,
2809                                    struct ldlm_request *req)
2810 {
2811         /* XXX something... later. */
2812         return 0;
2813 }
2814
2815 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2816 {
2817         /* XXX something... later. */
2818         return 0;
2819 }
2820
2821 /*
2822  * Generic code handling requests that have struct mdt_body passed in:
2823  *
2824  *  - extract mdt_body from request and save it in @info, if present;
2825  *
2826  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2827  *  @info;
2828  *
2829  *  - if HABEO_CORPUS flag is set for this request type check whether object
2830  *  actually exists on storage (lu_object_exists()).
2831  *
2832  */
2833 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2834 {
2835         const struct mdt_body    *body;
2836         struct mdt_object        *obj;
2837         const struct lu_env      *env;
2838         struct req_capsule       *pill;
2839         int                       rc;
2840         ENTRY;
2841
2842         env = info->mti_env;
2843         pill = info->mti_pill;
2844
2845         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2846         if (body == NULL)
2847                 RETURN(-EFAULT);
2848
2849         if (!(body->valid & OBD_MD_FLID))
2850                 RETURN(0);
2851
2852         if (!fid_is_sane(&body->fid1)) {
2853                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2854                 RETURN(-EINVAL);
2855         }
2856
2857         /*
2858          * Do not get size or any capa fields before we check that request
2859          * contains capa actually. There are some requests which do not, for
2860          * instance MDS_IS_SUBDIR.
2861          */
2862         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2863             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2864                 mdt_set_capainfo(info, 0, &body->fid1,
2865                                  req_capsule_client_get(pill, &RMF_CAPA1));
2866
2867         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2868         if (!IS_ERR(obj)) {
2869                 if ((flags & HABEO_CORPUS) &&
2870                     !mdt_object_exists(obj)) {
2871                         mdt_object_put(env, obj);
2872                         /* for capability renew ENOENT will be handled in
2873                          * mdt_renew_capa */
2874                         if (body->valid & OBD_MD_FLOSSCAPA)
2875                                 rc = 0;
2876                         else
2877                                 rc = -ENOENT;
2878                 } else {
2879                         info->mti_object = obj;
2880                         rc = 0;
2881                 }
2882         } else
2883                 rc = PTR_ERR(obj);
2884
2885         RETURN(rc);
2886 }
2887
2888 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2889 {
2890         struct req_capsule *pill = info->mti_pill;
2891         int rc;
2892         ENTRY;
2893
2894         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2895                 rc = mdt_body_unpack(info, flags);
2896         else
2897                 rc = 0;
2898
2899         if (rc == 0 && (flags & HABEO_REFERO)) {
2900                 /* Pack reply. */
2901                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2902                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2903                                              info->mti_body->eadatasize);
2904                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2905                         req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2906                                              RCL_SERVER, 0);
2907
2908                 rc = req_capsule_server_pack(pill);
2909         }
2910         RETURN(rc);
2911 }
2912
2913 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2914 {
2915         struct md_device *next = m->mdt_child;
2916
2917         return next->md_ops->mdo_init_capa_ctxt(env, next,
2918                                                 m->mdt_opts.mo_mds_capa,
2919                                                 m->mdt_capa_timeout,
2920                                                 m->mdt_capa_alg,
2921                                                 m->mdt_capa_keys);
2922 }
2923
2924 /*
2925  * Invoke handler for this request opc. Also do necessary preprocessing
2926  * (according to handler ->mh_flags), and post-processing (setting of
2927  * ->last_{xid,committed}).
2928  */
2929 static int mdt_req_handle(struct mdt_thread_info *info,
2930                           struct mdt_handler *h, struct ptlrpc_request *req)
2931 {
2932         int   rc, serious = 0;
2933         __u32 flags;
2934
2935         ENTRY;
2936
2937         LASSERT(h->mh_act != NULL);
2938         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2939         LASSERT(current->journal_info == NULL);
2940
2941         /*
2942          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2943          * to put same checks into handlers like mdt_close(), mdt_reint(),
2944          * etc., without talking to mdt authors first. Checking same thing
2945          * there again is useless and returning 0 error without packing reply
2946          * is buggy! Handlers either pack reply or return error.
2947          *
2948          * We return 0 here and do not send any reply in order to emulate
2949          * network failure. Do not send any reply in case any of NET related
2950          * fail_id has occured.
2951          */
2952         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2953                 RETURN(0);
2954
2955         rc = 0;
2956         flags = h->mh_flags;
2957         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2958
2959         if (h->mh_fmt != NULL) {
2960                 req_capsule_set(info->mti_pill, h->mh_fmt);
2961                 rc = mdt_unpack_req_pack_rep(info, flags);
2962         }
2963
2964         if (rc == 0 && flags & MUTABOR &&
2965             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2966                 /* should it be rq_status? */
2967                 rc = -EROFS;
2968
2969         if (rc == 0 && flags & HABEO_CLAVIS) {
2970                 struct ldlm_request *dlm_req;
2971
2972                 LASSERT(h->mh_fmt != NULL);
2973
2974                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2975                 if (dlm_req != NULL) {
2976                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2977                                         LDLM_IBITS &&
2978                                      dlm_req->lock_desc.l_policy_data.\
2979                                         l_inodebits.bits == 0)) {
2980                                 /*
2981                                  * Lock without inodebits makes no sense and
2982                                  * will oops later in ldlm. If client miss to
2983                                  * set such bits, do not trigger ASSERTION.
2984                                  *
2985                                  * For liblustre flock case, it maybe zero.
2986                                  */
2987                                 rc = -EPROTO;
2988                         } else {
2989                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2990                                         rc = mdt_lock_resname_compat(
2991                                                                 info->mti_mdt,
2992                                                                 dlm_req);
2993                                 info->mti_dlm_req = dlm_req;
2994                         }
2995                 } else {
2996                         rc = -EFAULT;
2997                 }
2998         }
2999
3000         /* capability setting changed via /proc, needs reinitialize ctxt */
3001         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
3002                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
3003                 info->mti_mdt->mdt_capa_conf = 0;
3004         }
3005
3006         if (likely(rc == 0)) {
3007                 /*
3008                  * Process request, there can be two types of rc:
3009                  * 1) errors with msg unpack/pack, other failures outside the
3010                  * operation itself. This is counted as serious errors;
3011                  * 2) errors during fs operation, should be placed in rq_status
3012                  * only
3013                  */
3014                 rc = h->mh_act(info);
3015                 if (rc == 0 &&
3016                     !req->rq_no_reply && req->rq_reply_state == NULL) {
3017                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
3018                                   "pack reply and returned 0 error\n",
3019                                   h->mh_name);
3020                         LBUG();
3021                 }
3022                 serious = is_serious(rc);
3023                 rc = clear_serious(rc);
3024         } else
3025                 serious = 1;
3026
3027         req->rq_status = rc;
3028
3029         /*
3030          * ELDLM_* codes which > 0 should be in rq_status only as well as
3031          * all non-serious errors.
3032          */
3033         if (rc > 0 || !serious)
3034                 rc = 0;
3035
3036         LASSERT(current->journal_info == NULL);
3037
3038         if (rc == 0 && (flags & HABEO_CLAVIS) &&
3039             info->mti_mdt->mdt_opts.mo_compat_resname) {
3040                 struct ldlm_reply *dlmrep;
3041
3042                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3043                 if (dlmrep != NULL)
3044                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3045         }
3046
3047         /* If we're DISCONNECTing, the mdt_export_data is already freed */
3048         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3049                 target_committed_to_req(req);
3050
3051         if (unlikely(req_is_replay(req) &&
3052                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3053                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3054                 LBUG();
3055         }
3056
3057         target_send_reply(req, rc, info->mti_fail_id);
3058         RETURN(0);
3059 }
3060
3061 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3062 {
3063         lh->mlh_type = MDT_NUL_LOCK;
3064         lh->mlh_reg_lh.cookie = 0ull;
3065         lh->mlh_reg_mode = LCK_MINMODE;
3066         lh->mlh_pdo_lh.cookie = 0ull;
3067         lh->mlh_pdo_mode = LCK_MINMODE;
3068 }
3069
3070 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3071 {
3072         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3073         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3074 }
3075
3076 /*
3077  * Initialize fields of struct mdt_thread_info. Other fields are left in
3078  * uninitialized state, because it's too expensive to zero out whole
3079  * mdt_thread_info (> 1K) on each request arrival.
3080  */
3081 static void mdt_thread_info_init(struct ptlrpc_request *req,
3082                                  struct mdt_thread_info *info)
3083 {
3084         int i;
3085         struct md_capainfo *ci;
3086
3087         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3088         info->mti_pill = &req->rq_pill;
3089
3090         /* lock handle */
3091         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3092                 mdt_lock_handle_init(&info->mti_lh[i]);
3093
3094         /* mdt device: it can be NULL while CONNECT */
3095         if (req->rq_export) {
3096                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3097                 info->mti_exp = req->rq_export;
3098         } else
3099                 info->mti_mdt = NULL;
3100         info->mti_env = req->rq_svc_thread->t_env;
3101         ci = md_capainfo(info->mti_env);
3102         memset(ci, 0, sizeof *ci);
3103         if (req->rq_export) {
3104                 if (exp_connect_rmtclient(req->rq_export))
3105                         ci->mc_auth = LC_ID_CONVERT;
3106                 else if (req->rq_export->exp_connect_flags &
3107                          OBD_CONNECT_MDS_CAPA)
3108                         ci->mc_auth = LC_ID_PLAIN;
3109                 else
3110                         ci->mc_auth = LC_ID_NONE;
3111         }
3112
3113         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3114         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3115         info->mti_mos = NULL;
3116
3117         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3118         info->mti_body = NULL;
3119         info->mti_object = NULL;
3120         info->mti_dlm_req = NULL;
3121         info->mti_has_trans = 0;
3122         info->mti_cross_ref = 0;
3123         info->mti_opdata = 0;
3124         info->mti_big_lmm_used = 0;
3125
3126         /* To not check for split by default. */
3127         info->mti_spec.sp_ck_split = 0;
3128         info->mti_spec.no_create = 0;
3129 }
3130
3131 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3132 {
3133         int i;
3134
3135         req_capsule_fini(info->mti_pill);
3136         if (info->mti_object != NULL) {
3137                 mdt_object_put(info->mti_env, info->mti_object);
3138                 info->mti_object = NULL;
3139         }
3140         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3141                 mdt_lock_handle_fini(&info->mti_lh[i]);
3142         info->mti_env = NULL;
3143 }
3144
3145 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3146                                        struct obd_device *obd, int *process)
3147 {
3148         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3149         case MDS_CONNECT: /* This will never get here, but for completeness. */
3150         case OST_CONNECT: /* This will never get here, but for completeness. */
3151         case MDS_DISCONNECT:
3152         case OST_DISCONNECT:
3153                *process = 1;
3154                RETURN(0);
3155
3156         case MDS_CLOSE:
3157         case MDS_DONE_WRITING:
3158         case MDS_SYNC: /* used in unmounting */
3159         case OBD_PING:
3160         case MDS_REINT:
3161         case SEQ_QUERY:
3162         case FLD_QUERY:
3163         case LDLM_ENQUEUE:
3164                 *process = target_queue_recovery_request(req, obd);
3165                 RETURN(0);
3166
3167         default:
3168                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3169                 *process = -EAGAIN;
3170                 RETURN(0);
3171         }
3172 }
3173
3174 /*
3175  * Handle recovery. Return:
3176  *        +1: continue request processing;
3177  *       -ve: abort immediately with the given error code;
3178  *         0: send reply with error code in req->rq_status;
3179  */
3180 static int mdt_recovery(struct mdt_thread_info *info)
3181 {
3182         struct ptlrpc_request *req = mdt_info_req(info);
3183         struct obd_device *obd;
3184
3185         ENTRY;
3186
3187         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3188         case MDS_CONNECT:
3189         case SEC_CTX_INIT:
3190         case SEC_CTX_INIT_CONT:
3191         case SEC_CTX_FINI:
3192                 {
3193 #if 0
3194                         int rc;
3195
3196                         rc = mdt_handle_idmap(info);
3197                         if (rc)
3198                                 RETURN(rc);
3199                         else
3200 #endif
3201                                 RETURN(+1);
3202                 }
3203         }
3204
3205         if (unlikely(!class_connected_export(req->rq_export))) {
3206                 CERROR("operation %d on unconnected MDS from %s\n",
3207                        lustre_msg_get_opc(req->rq_reqmsg),
3208                        libcfs_id2str(req->rq_peer));
3209                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3210                  * mds_A will get -ENOTCONN(especially for ping req),
3211                  * which will cause that mds_A deactive timeout, then when
3212                  * mds_A cleanup, the cleanup process will be suspended since
3213                  * deactive timeout is not zero.
3214                  */
3215                 req->rq_status = -ENOTCONN;
3216                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3217                 RETURN(0);
3218         }
3219
3220         /* sanity check: if the xid matches, the request must be marked as a
3221          * resent or replayed */
3222         if (req_xid_is_last(req)) {
3223                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3224                       (MSG_RESENT | MSG_REPLAY))) {
3225                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3226                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3227                                   lustre_msg_get_flags(req->rq_reqmsg));
3228                         LBUG();
3229                         req->rq_status = -ENOTCONN;
3230       &n