Whamcloud - gitweb
LU-2730 mdt: fix erroneous LASSERT in mdt_reint_opcode
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71
72 mdl_mode_t mdt_mdl_lock_modes[] = {
73         [LCK_MINMODE] = MDL_MINMODE,
74         [LCK_EX]      = MDL_EX,
75         [LCK_PW]      = MDL_PW,
76         [LCK_PR]      = MDL_PR,
77         [LCK_CW]      = MDL_CW,
78         [LCK_CR]      = MDL_CR,
79         [LCK_NL]      = MDL_NL,
80         [LCK_GROUP]   = MDL_GROUP
81 };
82
83 ldlm_mode_t mdt_dlm_lock_modes[] = {
84         [MDL_MINMODE] = LCK_MINMODE,
85         [MDL_EX]      = LCK_EX,
86         [MDL_PW]      = LCK_PW,
87         [MDL_PR]      = LCK_PR,
88         [MDL_CW]      = LCK_CW,
89         [MDL_CR]      = LCK_CR,
90         [MDL_NL]      = LCK_NL,
91         [MDL_GROUP]   = LCK_GROUP
92 };
93
94 static struct mdt_device *mdt_dev(struct lu_device *d);
95 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
96 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
97                         struct getinfo_fid2path *fp);
98
99 static const struct lu_object_operations mdt_obj_ops;
100
101 /* Slab for MDT object allocation */
102 static cfs_mem_cache_t *mdt_object_kmem;
103
104 static struct lu_kmem_descr mdt_caches[] = {
105         {
106                 .ckd_cache = &mdt_object_kmem,
107                 .ckd_name  = "mdt_obj",
108                 .ckd_size  = sizeof(struct mdt_object)
109         },
110         {
111                 .ckd_cache = NULL
112         }
113 };
114
115 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
116 {
117         if (!rep)
118                 return 0;
119         return (rep->lock_policy_res1 & flag);
120 }
121
122 void mdt_clear_disposition(struct mdt_thread_info *info,
123                            struct ldlm_reply *rep, int flag)
124 {
125         if (info)
126                 info->mti_opdata &= ~flag;
127         if (rep)
128                 rep->lock_policy_res1 &= ~flag;
129 }
130
131 void mdt_set_disposition(struct mdt_thread_info *info,
132                          struct ldlm_reply *rep, int flag)
133 {
134         if (info)
135                 info->mti_opdata |= flag;
136         if (rep)
137                 rep->lock_policy_res1 |= flag;
138 }
139
140 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
141 {
142         lh->mlh_pdo_hash = 0;
143         lh->mlh_reg_mode = lm;
144         lh->mlh_rreg_mode = lm;
145         lh->mlh_type = MDT_REG_LOCK;
146 }
147
148 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
149                        const char *name, int namelen)
150 {
151         lh->mlh_reg_mode = lm;
152         lh->mlh_rreg_mode = lm;
153         lh->mlh_type = MDT_PDO_LOCK;
154
155         if (name != NULL && (name[0] != '\0')) {
156                 LASSERT(namelen > 0);
157                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
158         } else {
159                 LASSERT(namelen == 0);
160                 lh->mlh_pdo_hash = 0ull;
161         }
162 }
163
164 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
165                               struct mdt_lock_handle *lh)
166 {
167         mdl_mode_t mode;
168         ENTRY;
169
170         /*
171          * Any dir access needs couple of locks:
172          *
173          * 1) on part of dir we gonna take lookup/modify;
174          *
175          * 2) on whole dir to protect it from concurrent splitting and/or to
176          * flush client's cache for readdir().
177          *
178          * so, for a given mode and object this routine decides what lock mode
179          * to use for lock #2:
180          *
181          * 1) if caller's gonna lookup in dir then we need to protect dir from
182          * being splitted only - LCK_CR
183          *
184          * 2) if caller's gonna modify dir then we need to protect dir from
185          * being splitted and to flush cache - LCK_CW
186          *
187          * 3) if caller's gonna modify dir and that dir seems ready for
188          * splitting then we need to protect it from any type of access
189          * (lookup/modify/split) - LCK_EX --bzzz
190          */
191
192         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
193         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
194
195         /*
196          * Ask underlaying level its opinion about preferable PDO lock mode
197          * having access type passed as regular lock mode:
198          *
199          * - MDL_MINMODE means that lower layer does not want to specify lock
200          * mode;
201          *
202          * - MDL_NL means that no PDO lock should be taken. This is used in some
203          * cases. Say, for non-splittable directories no need to use PDO locks
204          * at all.
205          */
206         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
207                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
208
209         if (mode != MDL_MINMODE) {
210                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
211         } else {
212                 /*
213                  * Lower layer does not want to specify locking mode. We do it
214                  * our selves. No special protection is needed, just flush
215                  * client's cache on modification and allow concurrent
216                  * mondification.
217                  */
218                 switch (lh->mlh_reg_mode) {
219                 case LCK_EX:
220                         lh->mlh_pdo_mode = LCK_EX;
221                         break;
222                 case LCK_PR:
223                         lh->mlh_pdo_mode = LCK_CR;
224                         break;
225                 case LCK_PW:
226                         lh->mlh_pdo_mode = LCK_CW;
227                         break;
228                 default:
229                         CERROR("Not expected lock type (0x%x)\n",
230                                (int)lh->mlh_reg_mode);
231                         LBUG();
232                 }
233         }
234
235         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
236         EXIT;
237 }
238
239 int mdt_getstatus(struct mdt_thread_info *info)
240 {
241         struct mdt_device *mdt  = info->mti_mdt;
242         struct md_device  *next = mdt->mdt_child;
243         struct mdt_body   *repbody;
244         int                rc;
245
246         ENTRY;
247
248         rc = mdt_check_ucred(info);
249         if (rc)
250                 RETURN(err_serious(rc));
251
252         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
253                 RETURN(err_serious(-ENOMEM));
254
255         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
256         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
257         if (rc != 0)
258                 RETURN(rc);
259
260         repbody->valid |= OBD_MD_FLID;
261
262         if (mdt->mdt_opts.mo_mds_capa &&
263             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
264                 struct mdt_object  *root;
265                 struct lustre_capa *capa;
266
267                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
268                 if (IS_ERR(root))
269                         RETURN(PTR_ERR(root));
270
271                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
272                 LASSERT(capa);
273                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
274                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
275                                  0);
276                 mdt_object_put(info->mti_env, root);
277                 if (rc == 0)
278                         repbody->valid |= OBD_MD_FLMDSCAPA;
279         }
280
281         RETURN(rc);
282 }
283
284 int mdt_statfs(struct mdt_thread_info *info)
285 {
286         struct ptlrpc_request           *req = mdt_info_req(info);
287         struct md_device                *next = info->mti_mdt->mdt_child;
288         struct ptlrpc_service_part      *svcpt;
289         struct obd_statfs               *osfs;
290         int                             rc;
291
292         ENTRY;
293
294         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
295
296         /* This will trigger a watchdog timeout */
297         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
298                          (MDT_SERVICE_WATCHDOG_FACTOR *
299                           at_get(&svcpt->scp_at_estimate)) + 1);
300
301         rc = mdt_check_ucred(info);
302         if (rc)
303                 RETURN(err_serious(rc));
304
305         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
306                 RETURN(err_serious(-ENOMEM));
307
308         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
309         if (!osfs)
310                 RETURN(-EPROTO);
311
312         /** statfs information are cached in the mdt_device */
313         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
314                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
315                 /** statfs data is too old, get up-to-date one */
316                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
317                 if (rc)
318                         RETURN(rc);
319                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
320                 info->mti_mdt->mdt_osfs = *osfs;
321                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
322                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
323         } else {
324                 /** use cached statfs data */
325                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
326                 *osfs = info->mti_mdt->mdt_osfs;
327                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
328         }
329
330         if (rc == 0)
331                 mdt_counter_incr(req, LPROC_MDT_STATFS);
332
333         RETURN(rc);
334 }
335
336 /**
337  * Pack SOM attributes into the reply.
338  * Call under a DLM UPDATE lock.
339  */
340 static void mdt_pack_size2body(struct mdt_thread_info *info,
341                                struct mdt_object *mo)
342 {
343         struct mdt_body *b;
344         struct md_attr *ma = &info->mti_attr;
345
346         LASSERT(ma->ma_attr.la_valid & LA_MODE);
347         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
348
349         /* Check if Size-on-MDS is supported, if this is a regular file,
350          * if SOM is enabled on the object and if SOM cache exists and valid.
351          * Otherwise do not pack Size-on-MDS attributes to the reply. */
352         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
353             !S_ISREG(ma->ma_attr.la_mode) ||
354             !mdt_object_is_som_enabled(mo) ||
355             !(ma->ma_valid & MA_SOM))
356                 return;
357
358         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
359         b->size = ma->ma_som->msd_size;
360         b->blocks = ma->ma_som->msd_blocks;
361 }
362
363 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
364                         const struct lu_attr *attr, const struct lu_fid *fid)
365 {
366         struct md_attr *ma = &info->mti_attr;
367
368         LASSERT(ma->ma_valid & MA_INODE);
369
370         b->atime      = attr->la_atime;
371         b->mtime      = attr->la_mtime;
372         b->ctime      = attr->la_ctime;
373         b->mode       = attr->la_mode;
374         b->size       = attr->la_size;
375         b->blocks     = attr->la_blocks;
376         b->uid        = attr->la_uid;
377         b->gid        = attr->la_gid;
378         b->flags      = attr->la_flags;
379         b->nlink      = attr->la_nlink;
380         b->rdev       = attr->la_rdev;
381
382         /*XXX should pack the reply body according to lu_valid*/
383         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
384                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
385                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
386                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
387
388         if (!S_ISREG(attr->la_mode)) {
389                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
390         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
391                 /* means no objects are allocated on osts. */
392                 LASSERT(!(ma->ma_valid & MA_LOV));
393                 /* just ignore blocks occupied by extend attributes on MDS */
394                 b->blocks = 0;
395                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
396                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
397         }
398
399         if (fid) {
400                 b->fid1 = *fid;
401                 b->valid |= OBD_MD_FLID;
402                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
403                                 PFID(fid), b->nlink, b->mode, b->size);
404         }
405
406         if (info)
407                 mdt_body_reverse_idmap(info, b);
408
409         if (b->valid & OBD_MD_FLSIZE)
410                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
411                        PFID(fid), (unsigned long long)b->size);
412 }
413
414 static inline int mdt_body_has_lov(const struct lu_attr *la,
415                                    const struct mdt_body *body)
416 {
417         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
418                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
419 }
420
421 void mdt_client_compatibility(struct mdt_thread_info *info)
422 {
423         struct mdt_body       *body;
424         struct ptlrpc_request *req = mdt_info_req(info);
425         struct obd_export     *exp = req->rq_export;
426         struct md_attr        *ma = &info->mti_attr;
427         struct lu_attr        *la = &ma->ma_attr;
428         ENTRY;
429
430         if (exp_connect_layout(exp))
431                 /* the client can deal with 16-bit lmm_stripe_count */
432                 RETURN_EXIT;
433
434         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
435
436         if (!mdt_body_has_lov(la, body))
437                 RETURN_EXIT;
438
439         /* now we have a reply with a lov for a client not compatible with the
440          * layout lock so we have to clean the layout generation number */
441         if (S_ISREG(la->la_mode))
442                 ma->ma_lmm->lmm_layout_gen = 0;
443         EXIT;
444 }
445
446 static int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
447                              char *name)
448 {
449         const struct lu_env *env = info->mti_env;
450         int rc;
451         ENTRY;
452
453         LASSERT(info->mti_big_lmm_used == 0);
454         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
455         if (rc < 0)
456                 RETURN(rc);
457
458         /* big_lmm may need to be grown */
459         if (info->mti_big_lmmsize < rc) {
460                 int size = size_roundup_power2(rc);
461
462                 if (info->mti_big_lmmsize > 0) {
463                         /* free old buffer */
464                         LASSERT(info->mti_big_lmm);
465                         OBD_FREE_LARGE(info->mti_big_lmm,
466                                        info->mti_big_lmmsize);
467                         info->mti_big_lmm = NULL;
468                         info->mti_big_lmmsize = 0;
469                 }
470
471                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
472                 if (info->mti_big_lmm == NULL)
473                         RETURN(-ENOMEM);
474                 info->mti_big_lmmsize = size;
475         }
476         LASSERT(info->mti_big_lmmsize >= rc);
477
478         info->mti_buf.lb_buf = info->mti_big_lmm;
479         info->mti_buf.lb_len = info->mti_big_lmmsize;
480         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
481
482         RETURN(rc);
483 }
484
485 int mdt_attr_get_lov(struct mdt_thread_info *info,
486                      struct mdt_object *o, struct md_attr *ma)
487 {
488         struct md_object *next = mdt_object_child(o);
489         struct lu_buf    *buf = &info->mti_buf;
490         int rc;
491
492         buf->lb_buf = ma->ma_lmm;
493         buf->lb_len = ma->ma_lmm_size;
494         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
495         if (rc > 0) {
496                 ma->ma_lmm_size = rc;
497                 ma->ma_valid |= MA_LOV;
498                 rc = 0;
499         } else if (rc == -ENODATA) {
500                 /* no LOV EA */
501                 rc = 0;
502         } else if (rc == -ERANGE) {
503                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LOV);
504                 if (rc > 0) {
505                         info->mti_big_lmm_used = 1;
506                         ma->ma_valid |= MA_LOV;
507                         ma->ma_lmm = info->mti_big_lmm;
508                         ma->ma_lmm_size = rc;
509                         /* update mdt_max_mdsize so all clients
510                          * will be aware about that */
511                         if (info->mti_mdt->mdt_max_mdsize < rc)
512                                 info->mti_mdt->mdt_max_mdsize = rc;
513                         rc = 0;
514                 }
515         }
516
517         return rc;
518 }
519
520 int mdt_attr_get_pfid(struct mdt_thread_info *info,
521                       struct mdt_object *o, struct lu_fid *pfid)
522 {
523         struct lu_buf           *buf = &info->mti_buf;
524         struct link_ea_header   *leh;
525         struct link_ea_entry    *lee;
526         int                      rc;
527         ENTRY;
528
529         buf->lb_buf = info->mti_big_lmm;
530         buf->lb_len = info->mti_big_lmmsize;
531         rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
532                           buf, XATTR_NAME_LINK);
533         /* ignore errors, MA_PFID won't be set and it is
534          * up to the caller to treat this as an error */
535         if (rc == -ERANGE || buf->lb_len == 0) {
536                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
537                 buf->lb_buf = info->mti_big_lmm;
538                 buf->lb_len = info->mti_big_lmmsize;
539         }
540
541         if (rc < 0)
542                 RETURN(rc);
543         if (rc < sizeof(*leh)) {
544                 CERROR("short LinkEA on "DFID": rc = %d\n",
545                        PFID(mdt_object_fid(o)), rc);
546                 RETURN(-ENODATA);
547         }
548
549         leh = (struct link_ea_header *) buf->lb_buf;
550         lee = (struct link_ea_entry *)(leh + 1);
551         if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
552                 leh->leh_magic = LINK_EA_MAGIC;
553                 leh->leh_reccount = __swab32(leh->leh_reccount);
554                 leh->leh_len = __swab64(leh->leh_len);
555         }
556         if (leh->leh_magic != LINK_EA_MAGIC)
557                 RETURN(-EINVAL);
558         if (leh->leh_reccount == 0)
559                 RETURN(-ENODATA);
560
561         memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
562         fid_be_to_cpu(pfid, pfid);
563
564         RETURN(0);
565 }
566
567 int mdt_attr_get_complex(struct mdt_thread_info *info,
568                          struct mdt_object *o, struct md_attr *ma)
569 {
570         const struct lu_env *env = info->mti_env;
571         struct md_object    *next = mdt_object_child(o);
572         struct lu_buf       *buf = &info->mti_buf;
573         u32                  mode = lu_object_attr(&next->mo_lu);
574         int                  need = ma->ma_need;
575         int                  rc = 0, rc2;
576         ENTRY;
577
578         ma->ma_valid = 0;
579
580         if (need & MA_INODE) {
581                 ma->ma_need = MA_INODE;
582                 rc = mo_attr_get(env, next, ma);
583                 if (rc)
584                         GOTO(out, rc);
585                 ma->ma_valid |= MA_INODE;
586         }
587
588         if (need & MA_PFID) {
589                 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
590                 if (rc == 0)
591                         ma->ma_valid |= MA_PFID;
592                 /* ignore this error, parent fid is not mandatory */
593                 rc = 0;
594         }
595
596         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
597                 rc = mdt_attr_get_lov(info, o, ma);
598                 if (rc)
599                         GOTO(out, rc);
600         }
601
602         if (need & MA_LMV && S_ISDIR(mode)) {
603                 buf->lb_buf = ma->ma_lmv;
604                 buf->lb_len = ma->ma_lmv_size;
605                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
606                 if (rc2 > 0) {
607                         ma->ma_lmv_size = rc2;
608                         ma->ma_valid |= MA_LMV;
609                 } else if (rc2 == -ENODATA) {
610                         /* no LMV EA */
611                         ma->ma_lmv_size = 0;
612                 } else
613                         GOTO(out, rc = rc2);
614         }
615
616         if (need & MA_SOM && S_ISREG(mode)) {
617                 buf->lb_buf = info->mti_xattr_buf;
618                 buf->lb_len = sizeof(info->mti_xattr_buf);
619                 CLASSERT(sizeof(struct som_attrs) <=
620                          sizeof(info->mti_xattr_buf));
621                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_SOM);
622                 rc2 = lustre_buf2som(info->mti_xattr_buf, rc2, ma->ma_som);
623                 if (rc2 == 0)
624                         ma->ma_valid |= MA_SOM;
625                 else if (rc2 < 0 && rc2 != -ENODATA)
626                         GOTO(out, rc = rc2);
627         }
628
629         if (need & MA_HSM && S_ISREG(mode)) {
630                 buf->lb_buf = info->mti_xattr_buf;
631                 buf->lb_len = sizeof(info->mti_xattr_buf);
632                 CLASSERT(sizeof(struct hsm_attrs) <=
633                          sizeof(info->mti_xattr_buf));
634                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM);
635                 rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm);
636                 if (rc2 == 0)
637                         ma->ma_valid |= MA_HSM;
638                 else if (rc2 < 0 && rc2 != -ENODATA)
639                         GOTO(out, rc = rc2);
640         }
641
642 #ifdef CONFIG_FS_POSIX_ACL
643         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
644                 buf->lb_buf = ma->ma_acl;
645                 buf->lb_len = ma->ma_acl_size;
646                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
647                 if (rc2 > 0) {
648                         ma->ma_acl_size = rc2;
649                         ma->ma_valid |= MA_ACL_DEF;
650                 } else if (rc2 == -ENODATA) {
651                         /* no ACLs */
652                         ma->ma_acl_size = 0;
653                 } else
654                         GOTO(out, rc = rc2);
655         }
656 #endif
657 out:
658         ma->ma_need = need;
659         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
660                rc, ma->ma_valid, ma->ma_lmm);
661         RETURN(rc);
662 }
663
664 static int mdt_getattr_internal(struct mdt_thread_info *info,
665                                 struct mdt_object *o, int ma_need)
666 {
667         struct md_object        *next = mdt_object_child(o);
668         const struct mdt_body   *reqbody = info->mti_body;
669         struct ptlrpc_request   *req = mdt_info_req(info);
670         struct md_attr          *ma = &info->mti_attr;
671         struct lu_attr          *la = &ma->ma_attr;
672         struct req_capsule      *pill = info->mti_pill;
673         const struct lu_env     *env = info->mti_env;
674         struct mdt_body         *repbody;
675         struct lu_buf           *buffer = &info->mti_buf;
676         int                     rc;
677         int                     is_root;
678         ENTRY;
679
680         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
681                 RETURN(err_serious(-ENOMEM));
682
683         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
684
685         ma->ma_valid = 0;
686
687         if (mdt_object_remote(o)) {
688                 /* This object is located on remote node.*/
689                 /* Return -EIO for old client */
690                 if (!mdt_is_dne_client(req->rq_export))
691                         GOTO(out, rc = -EIO);
692
693                 repbody->fid1 = *mdt_object_fid(o);
694                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
695                 GOTO(out, rc = 0);
696         }
697
698         buffer->lb_len = reqbody->eadatasize;
699         if (buffer->lb_len > 0)
700                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
701         else
702                 buffer->lb_buf = NULL;
703
704         /* If it is dir object and client require MEA, then we got MEA */
705         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
706             reqbody->valid & OBD_MD_MEA) {
707                 /* Assumption: MDT_MD size is enough for lmv size. */
708                 ma->ma_lmv = buffer->lb_buf;
709                 ma->ma_lmv_size = buffer->lb_len;
710                 ma->ma_need = MA_LMV | MA_INODE;
711         } else {
712                 ma->ma_lmm = buffer->lb_buf;
713                 ma->ma_lmm_size = buffer->lb_len;
714                 ma->ma_need = MA_LOV | MA_INODE | MA_HSM;
715         }
716
717         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
718             reqbody->valid & OBD_MD_FLDIREA  &&
719             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
720                 /* get default stripe info for this dir. */
721                 ma->ma_need |= MA_LOV_DEF;
722         }
723         ma->ma_need |= ma_need;
724         if (ma->ma_need & MA_SOM)
725                 ma->ma_som = &info->mti_u.som.data;
726
727         rc = mdt_attr_get_complex(info, o, ma);
728         if (unlikely(rc)) {
729                 CERROR("getattr error for "DFID": %d\n",
730                         PFID(mdt_object_fid(o)), rc);
731                 RETURN(rc);
732         }
733
734         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
735
736         /* the Lustre protocol supposes to return default striping
737          * on the user-visible root if explicitly requested */
738         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
739             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
740                 struct lu_fid      rootfid;
741                 struct mdt_object *root;
742                 struct mdt_device *mdt = info->mti_mdt;
743
744                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
745                 if (rc)
746                         RETURN(rc);
747                 root = mdt_object_find(env, mdt, &rootfid);
748                 if (IS_ERR(root))
749                         RETURN(PTR_ERR(root));
750                 rc = mdt_attr_get_lov(info, root, ma);
751                 mdt_object_put(info->mti_env, root);
752                 if (unlikely(rc)) {
753                         CERROR("getattr error for "DFID": %d\n",
754                                         PFID(mdt_object_fid(o)), rc);
755                         RETURN(rc);
756                 }
757         }
758
759         if (likely(ma->ma_valid & MA_INODE))
760                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
761         else
762                 RETURN(-EFAULT);
763
764         if (mdt_body_has_lov(la, reqbody)) {
765                 if (ma->ma_valid & MA_LOV) {
766                         LASSERT(ma->ma_lmm_size);
767                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
768                         repbody->eadatasize = ma->ma_lmm_size;
769                         if (S_ISDIR(la->la_mode))
770                                 repbody->valid |= OBD_MD_FLDIREA;
771                         else
772                                 repbody->valid |= OBD_MD_FLEASIZE;
773                 }
774                 if (ma->ma_valid & MA_LMV) {
775                         LASSERT(S_ISDIR(la->la_mode));
776                         repbody->eadatasize = ma->ma_lmv_size;
777                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
778                 }
779         } else if (S_ISLNK(la->la_mode) &&
780                    reqbody->valid & OBD_MD_LINKNAME) {
781                 buffer->lb_buf = ma->ma_lmm;
782                 /* eadatasize from client includes NULL-terminator, so
783                  * there is no need to read it */
784                 buffer->lb_len = reqbody->eadatasize - 1;
785                 rc = mo_readlink(env, next, buffer);
786                 if (unlikely(rc <= 0)) {
787                         CERROR("readlink failed: %d\n", rc);
788                         rc = -EFAULT;
789                 } else {
790                         int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
791
792                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
793                                 rc -= 2;
794                         repbody->valid |= OBD_MD_LINKNAME;
795                         /* we need to report back size with NULL-terminator
796                          * because client expects that */
797                         repbody->eadatasize = rc + 1;
798                         if (repbody->eadatasize != reqbody->eadatasize)
799                                 CERROR("Read shorter symlink %d, expected %d\n",
800                                        rc, reqbody->eadatasize - 1);
801                         /* NULL terminate */
802                         ((char *)ma->ma_lmm)[rc] = 0;
803
804                         /* If the total CDEBUG() size is larger than a page, it
805                          * will print a warning to the console, avoid this by
806                          * printing just the last part of the symlink. */
807                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
808                                print_limit < rc ? "..." : "", print_limit,
809                                (char *)ma->ma_lmm + rc - print_limit, rc);
810                         rc = 0;
811                 }
812         }
813
814         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
815                 repbody->max_cookiesize = 0;
816                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
817                 repbody->valid |= OBD_MD_FLMODEASIZE;
818                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
819                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
820                        repbody->max_cookiesize);
821         }
822
823         if (exp_connect_rmtclient(info->mti_exp) &&
824             reqbody->valid & OBD_MD_FLRMTPERM) {
825                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
826
827                 /* mdt_getattr_lock only */
828                 rc = mdt_pack_remote_perm(info, o, buf);
829                 if (rc) {
830                         repbody->valid &= ~OBD_MD_FLRMTPERM;
831                         repbody->aclsize = 0;
832                         RETURN(rc);
833                 } else {
834                         repbody->valid |= OBD_MD_FLRMTPERM;
835                         repbody->aclsize = sizeof(struct mdt_remote_perm);
836                 }
837         }
838 #ifdef CONFIG_FS_POSIX_ACL
839         else if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
840                  (reqbody->valid & OBD_MD_FLACL)) {
841                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
842                 buffer->lb_len = req_capsule_get_size(pill,
843                                                       &RMF_ACL, RCL_SERVER);
844                 if (buffer->lb_len > 0) {
845                         rc = mo_xattr_get(env, next, buffer,
846                                           XATTR_NAME_ACL_ACCESS);
847                         if (rc < 0) {
848                                 if (rc == -ENODATA) {
849                                         repbody->aclsize = 0;
850                                         repbody->valid |= OBD_MD_FLACL;
851                                         rc = 0;
852                                 } else if (rc == -EOPNOTSUPP) {
853                                         rc = 0;
854                                 } else {
855                                         CERROR("got acl size: %d\n", rc);
856                                 }
857                         } else {
858                                 repbody->aclsize = rc;
859                                 repbody->valid |= OBD_MD_FLACL;
860                                 rc = 0;
861                         }
862                 }
863         }
864 #endif
865
866         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
867             info->mti_mdt->mdt_opts.mo_mds_capa &&
868             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
869                 struct lustre_capa *capa;
870
871                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
872                 LASSERT(capa);
873                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
874                 rc = mo_capa_get(env, next, capa, 0);
875                 if (rc)
876                         RETURN(rc);
877                 repbody->valid |= OBD_MD_FLMDSCAPA;
878         }
879
880 out:
881         if (rc == 0)
882                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
883
884         RETURN(rc);
885 }
886
887 static int mdt_renew_capa(struct mdt_thread_info *info)
888 {
889         struct mdt_object  *obj = info->mti_object;
890         struct mdt_body    *body;
891         struct lustre_capa *capa, *c;
892         int rc;
893         ENTRY;
894
895         /* if object doesn't exist, or server has disabled capability,
896          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
897          * flag not set.
898          */
899         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
900             !(exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA))
901                 RETURN(0);
902
903         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
904         LASSERT(body != NULL);
905
906         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
907         LASSERT(c);
908
909         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
910         LASSERT(capa);
911
912         *capa = *c;
913         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
914         if (rc == 0)
915                 body->valid |= OBD_MD_FLOSSCAPA;
916         RETURN(rc);
917 }
918
919 int mdt_getattr(struct mdt_thread_info *info)
920 {
921         struct mdt_object       *obj = info->mti_object;
922         struct req_capsule      *pill = info->mti_pill;
923         struct mdt_body         *reqbody;
924         struct mdt_body         *repbody;
925         mode_t                   mode;
926         int rc, rc2;
927         ENTRY;
928
929         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
930         LASSERT(reqbody);
931
932         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
933                 rc = req_capsule_server_pack(pill);
934                 if (unlikely(rc))
935                         RETURN(err_serious(rc));
936                 rc = mdt_renew_capa(info);
937                 GOTO(out_shrink, rc);
938         }
939
940         LASSERT(obj != NULL);
941         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
942
943         mode = lu_object_attr(&obj->mot_obj.mo_lu);
944
945         /* old clients may not report needed easize, use max value then */
946         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
947                              reqbody->eadatasize == 0 ?
948                              info->mti_mdt->mdt_max_mdsize :
949                              reqbody->eadatasize);
950
951         rc = req_capsule_server_pack(pill);
952         if (unlikely(rc != 0))
953                 RETURN(err_serious(rc));
954
955         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
956         LASSERT(repbody != NULL);
957         repbody->eadatasize = 0;
958         repbody->aclsize = 0;
959
960         if (reqbody->valid & OBD_MD_FLRMTPERM)
961                 rc = mdt_init_ucred(info, reqbody);
962         else
963                 rc = mdt_check_ucred(info);
964         if (unlikely(rc))
965                 GOTO(out_shrink, rc);
966
967         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
968
969         /*
970          * Don't check capability at all, because rename might getattr for
971          * remote obj, and at that time no capability is available.
972          */
973         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
974         rc = mdt_getattr_internal(info, obj, 0);
975         if (reqbody->valid & OBD_MD_FLRMTPERM)
976                 mdt_exit_ucred(info);
977         EXIT;
978 out_shrink:
979         mdt_client_compatibility(info);
980         rc2 = mdt_fix_reply(info);
981         if (rc == 0)
982                 rc = rc2;
983         return rc;
984 }
985
986 int mdt_is_subdir(struct mdt_thread_info *info)
987 {
988         struct mdt_object     *o = info->mti_object;
989         struct req_capsule    *pill = info->mti_pill;
990         const struct mdt_body *body = info->mti_body;
991         struct mdt_body       *repbody;
992         int                    rc;
993         ENTRY;
994
995         LASSERT(o != NULL);
996
997         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
998
999         /*
1000          * We save last checked parent fid to @repbody->fid1 for remote
1001          * directory case.
1002          */
1003         LASSERT(fid_is_sane(&body->fid2));
1004         LASSERT(mdt_object_exists(o) && !mdt_object_remote(o));
1005         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1006                            &body->fid2, &repbody->fid1);
1007         if (rc == 0 || rc == -EREMOTE)
1008                 repbody->valid |= OBD_MD_FLID;
1009
1010         RETURN(rc);
1011 }
1012
1013 int mdt_swap_layouts(struct mdt_thread_info *info)
1014 {
1015         struct ptlrpc_request   *req = mdt_info_req(info);
1016         struct obd_export       *exp = req->rq_export;
1017         struct mdt_object       *o1, *o2, *o;
1018         struct mdt_lock_handle  *lh1, *lh2;
1019         struct mdc_swap_layouts *msl;
1020         int                      rc;
1021         ENTRY;
1022
1023         /* client does not support layout lock, so layout swaping
1024          * is disabled.
1025          * FIXME: there is a problem for old clients which don't support
1026          * layout lock yet. If those clients have already opened the file
1027          * they won't be notified at all so that old layout may still be
1028          * used to do IO. This can be fixed after file release is landed by
1029          * doing exclusive open and taking full EX ibits lock. - Jinshan */
1030         if (!exp_connect_layout(exp))
1031                 RETURN(-EOPNOTSUPP);
1032
1033         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA1, RCL_CLIENT))
1034                 mdt_set_capainfo(info, 0, &info->mti_body->fid1,
1035                                  req_capsule_client_get(info->mti_pill,
1036                                                         &RMF_CAPA1));
1037
1038         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA2, RCL_CLIENT))
1039                 mdt_set_capainfo(info, 1, &info->mti_body->fid2,
1040                                  req_capsule_client_get(info->mti_pill,
1041                                                         &RMF_CAPA2));
1042
1043         o1 = info->mti_object;
1044         o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
1045                                 &info->mti_body->fid2);
1046         if (IS_ERR(o))
1047                 GOTO(out, rc = PTR_ERR(o));
1048
1049         if (mdt_object_remote(o) || !mdt_object_exists(o)) /* remote object */
1050                 GOTO(put, rc = -ENOENT);
1051
1052         rc = lu_fid_cmp(&info->mti_body->fid1, &info->mti_body->fid2);
1053         if (unlikely(rc == 0)) /* same file, you kidding me? no-op. */
1054                 GOTO(put, rc);
1055
1056         if (rc < 0)
1057                 swap(o1, o2);
1058
1059         /* permission check. Make sure the calling process having permission
1060          * to write both files. */
1061         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
1062                                 MAY_WRITE);
1063         if (rc < 0)
1064                 GOTO(put, rc);
1065
1066         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
1067                                 MAY_WRITE);
1068         if (rc < 0)
1069                 GOTO(put, rc);
1070
1071         msl = req_capsule_client_get(info->mti_pill, &RMF_SWAP_LAYOUTS);
1072         LASSERT(msl != NULL);
1073
1074         lh1 = &info->mti_lh[MDT_LH_NEW];
1075         mdt_lock_reg_init(lh1, LCK_EX);
1076         lh2 = &info->mti_lh[MDT_LH_OLD];
1077         mdt_lock_reg_init(lh2, LCK_EX);
1078
1079         rc = mdt_object_lock(info, o1, lh1, MDS_INODELOCK_LAYOUT,
1080                              MDT_LOCAL_LOCK);
1081         if (rc < 0)
1082                 GOTO(put, rc);
1083
1084         rc = mdt_object_lock(info, o2, lh2, MDS_INODELOCK_LAYOUT,
1085                              MDT_LOCAL_LOCK);
1086         if (rc < 0)
1087                 GOTO(unlock1, rc);
1088
1089         rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
1090                              mdt_object_child(o2), msl->msl_flags);
1091         GOTO(unlock2, rc);
1092 unlock2:
1093         mdt_object_unlock(info, o2, lh2, rc);
1094 unlock1:
1095         mdt_object_unlock(info, o1, lh1, rc);
1096 put:
1097         mdt_object_put(info->mti_env, o);
1098 out:
1099         RETURN(rc);
1100 }
1101
1102 static int mdt_raw_lookup(struct mdt_thread_info *info,
1103                           struct mdt_object *parent,
1104                           const struct lu_name *lname,
1105                           struct ldlm_reply *ldlm_rep)
1106 {
1107         struct md_object *next = mdt_object_child(info->mti_object);
1108         const struct mdt_body *reqbody = info->mti_body;
1109         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1110         struct mdt_body *repbody;
1111         int rc;
1112         ENTRY;
1113
1114         if (reqbody->valid != OBD_MD_FLID)
1115                 RETURN(0);
1116
1117         LASSERT(!info->mti_cross_ref);
1118
1119         /* Only got the fid of this obj by name */
1120         fid_zero(child_fid);
1121         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1122                         &info->mti_spec);
1123 #if 0
1124         /* XXX is raw_lookup possible as intent operation? */
1125         if (rc != 0) {
1126                 if (rc == -ENOENT)
1127                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1128                 RETURN(rc);
1129         } else
1130                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1131
1132         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1133 #endif
1134         if (rc == 0) {
1135                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1136                 repbody->fid1 = *child_fid;
1137                 repbody->valid = OBD_MD_FLID;
1138         }
1139         RETURN(1);
1140 }
1141
1142 /*
1143  * UPDATE lock should be taken against parent, and be release before exit;
1144  * child_bits lock should be taken against child, and be returned back:
1145  *            (1)normal request should release the child lock;
1146  *            (2)intent request will grant the lock to client.
1147  */
1148 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1149                                  struct mdt_lock_handle *lhc,
1150                                  __u64 child_bits,
1151                                  struct ldlm_reply *ldlm_rep)
1152 {
1153         struct ptlrpc_request  *req       = mdt_info_req(info);
1154         struct mdt_body        *reqbody   = NULL;
1155         struct mdt_object      *parent    = info->mti_object;
1156         struct mdt_object      *child;
1157         struct md_object       *next      = mdt_object_child(parent);
1158         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1159         struct lu_name         *lname     = NULL;
1160         const char             *name      = NULL;
1161         int                     namelen   = 0;
1162         struct mdt_lock_handle *lhp       = NULL;
1163         struct ldlm_lock       *lock;
1164         struct ldlm_res_id     *res_id;
1165         int                     is_resent;
1166         int                     ma_need = 0;
1167         int                     rc;
1168
1169         ENTRY;
1170
1171         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1172         LASSERT(ergo(is_resent,
1173                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1174
1175         LASSERT(parent != NULL);
1176         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1177         if (name == NULL)
1178                 RETURN(err_serious(-EFAULT));
1179
1180         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1181                                        RCL_CLIENT) - 1;
1182         if (!info->mti_cross_ref) {
1183                 /*
1184                  * XXX: Check for "namelen == 0" is for getattr by fid
1185                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1186                  * that is the name must contain at least one character and
1187                  * the terminating '\0'
1188                  */
1189                 if (namelen == 0) {
1190                         reqbody = req_capsule_client_get(info->mti_pill,
1191                                                          &RMF_MDT_BODY);
1192                         if (unlikely(reqbody == NULL))
1193                                 RETURN(err_serious(-EFAULT));
1194
1195                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1196                                 RETURN(err_serious(-EINVAL));
1197
1198                         name = NULL;
1199                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1200                                "ldlm_rep = %p\n",
1201                                PFID(mdt_object_fid(parent)),
1202                                PFID(&reqbody->fid2), ldlm_rep);
1203                 } else {
1204                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1205                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1206                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1207                                name, ldlm_rep);
1208                 }
1209         }
1210         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1211
1212         if (unlikely(!mdt_object_exists(parent))) {
1213                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1214                                 &parent->mot_obj.mo_lu,
1215                                 "Parent doesn't exist!\n");
1216                 RETURN(-ESTALE);
1217         } else if (!info->mti_cross_ref) {
1218                 LASSERTF(!mdt_object_remote(parent),
1219                          "Parent "DFID" is on remote server\n",
1220                          PFID(mdt_object_fid(parent)));
1221         }
1222         if (lname) {
1223                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1224                 if (rc != 0) {
1225                         if (rc > 0)
1226                                 rc = 0;
1227                         RETURN(rc);
1228                 }
1229         }
1230
1231         if (info->mti_cross_ref) {
1232                 /* Only getattr on the child. Parent is on another node. */
1233                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1234                 child = parent;
1235                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1236                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1237
1238                 if (is_resent) {
1239                         /* Do not take lock for resent case. */
1240                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1241                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1242                                  lhc->mlh_reg_lh.cookie);
1243                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1244                                                 &lock->l_resource->lr_name));
1245                         LDLM_LOCK_PUT(lock);
1246                         rc = 0;
1247                 } else {
1248                         mdt_lock_handle_init(lhc);
1249                         mdt_lock_reg_init(lhc, LCK_PR);
1250
1251                         /*
1252                          * Object's name is on another MDS, no lookup lock is
1253                          * needed here but update is.
1254                          */
1255                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1256                         child_bits |= MDS_INODELOCK_PERM | MDS_INODELOCK_UPDATE;
1257
1258                         rc = mdt_object_lock(info, child, lhc, child_bits,
1259                                              MDT_LOCAL_LOCK);
1260                 }
1261                 if (rc == 0) {
1262                         /* Finally, we can get attr for child. */
1263                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1264                                          BYPASS_CAPA);
1265                         rc = mdt_getattr_internal(info, child, 0);
1266                         if (unlikely(rc != 0))
1267                                 mdt_object_unlock(info, child, lhc, 1);
1268                 }
1269                 RETURN(rc);
1270         }
1271
1272         if (lname) {
1273                 /* step 1: lock parent only if parent is a directory */
1274                 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1275                         lhp = &info->mti_lh[MDT_LH_PARENT];
1276                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1277                         rc = mdt_object_lock(info, parent, lhp,
1278                                              MDS_INODELOCK_UPDATE,
1279                                              MDT_LOCAL_LOCK);
1280                         if (unlikely(rc != 0))
1281                                 RETURN(rc);
1282                 }
1283
1284                 /* step 2: lookup child's fid by name */
1285                 fid_zero(child_fid);
1286                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1287                                 &info->mti_spec);
1288
1289                 if (rc != 0) {
1290                         if (rc == -ENOENT)
1291                                 mdt_set_disposition(info, ldlm_rep,
1292                                                     DISP_LOOKUP_NEG);
1293                         GOTO(out_parent, rc);
1294                 } else
1295                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1296         } else {
1297                 *child_fid = reqbody->fid2;
1298                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1299         }
1300
1301         /*
1302          *step 3: find the child object by fid & lock it.
1303          *        regardless if it is local or remote.
1304          */
1305         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1306
1307         if (unlikely(IS_ERR(child)))
1308                 GOTO(out_parent, rc = PTR_ERR(child));
1309         if (is_resent) {
1310                 /* Do not take lock for resent case. */
1311                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1312                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1313                          lhc->mlh_reg_lh.cookie);
1314
1315                 res_id = &lock->l_resource->lr_name;
1316                 if (!fid_res_name_eq(mdt_object_fid(child),
1317                                     &lock->l_resource->lr_name)) {
1318                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1319                                                  &lock->l_resource->lr_name),
1320                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1321                                  (unsigned long)res_id->name[0],
1322                                  (unsigned long)res_id->name[1],
1323                                  (unsigned long)res_id->name[2],
1324                                  PFID(mdt_object_fid(parent)));
1325                           CWARN("Although resent, but still not get child lock"
1326                                 "parent:"DFID" child:"DFID"\n",
1327                                 PFID(mdt_object_fid(parent)),
1328                                 PFID(mdt_object_fid(child)));
1329                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1330                           LDLM_LOCK_PUT(lock);
1331                           GOTO(relock, 0);
1332                 }
1333                 LDLM_LOCK_PUT(lock);
1334                 rc = 0;
1335         } else {
1336                 bool try_layout = false;
1337
1338 relock:
1339                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1340                 mdt_lock_handle_init(lhc);
1341                 mdt_lock_reg_init(lhc, LCK_PR);
1342
1343                 if (!mdt_object_exists(child)) {
1344                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1345                                         &child->mot_obj.mo_lu,
1346                                         "Object doesn't exist!\n");
1347                         GOTO(out_child, rc = -ENOENT);
1348                 }
1349
1350                 if (!(child_bits & MDS_INODELOCK_UPDATE) &&
1351                       mdt_object_exists(child) && !mdt_object_remote(child)) {
1352                         struct md_attr *ma = &info->mti_attr;
1353
1354                         ma->ma_valid = 0;
1355                         ma->ma_need = MA_INODE;
1356                         rc = mdt_attr_get_complex(info, child, ma);
1357                         if (unlikely(rc != 0))
1358                                 GOTO(out_child, rc);
1359
1360                         /* If the file has not been changed for some time, we
1361                          * return not only a LOOKUP lock, but also an UPDATE
1362                          * lock and this might save us RPC on later STAT. For
1363                          * directories, it also let negative dentry starts
1364                          * working for this dir. */
1365                         if (ma->ma_valid & MA_INODE &&
1366                             ma->ma_attr.la_valid & LA_CTIME &&
1367                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1368                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1369                                 child_bits |= MDS_INODELOCK_UPDATE;
1370                 }
1371
1372                 /* layout lock must be granted in a best-effort way
1373                  * for IT operations */
1374                 LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT));
1375                 if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) &&
1376                     exp_connect_layout(info->mti_exp) &&
1377                     S_ISREG(lu_object_attr(&child->mot_obj.mo_lu)) &&
1378                     ldlm_rep != NULL) {
1379                         /* try to grant layout lock for regular file. */
1380                         try_layout = true;
1381                 }
1382
1383                 rc = 0;
1384                 if (try_layout) {
1385                         child_bits |= MDS_INODELOCK_LAYOUT;
1386                         /* try layout lock, it may fail to be granted due to
1387                          * contention at LOOKUP or UPDATE */
1388                         if (!mdt_object_lock_try(info, child, lhc, child_bits,
1389                                                  MDT_CROSS_LOCK)) {
1390                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1391                                 LASSERT(child_bits != 0);
1392                                 rc = mdt_object_lock(info, child, lhc,
1393                                                 child_bits, MDT_CROSS_LOCK);
1394                         } else {
1395                                 ma_need |= MA_LOV;
1396                         }
1397                 } else {
1398                         rc = mdt_object_lock(info, child, lhc, child_bits,
1399                                                 MDT_CROSS_LOCK);
1400                 }
1401                 if (unlikely(rc != 0))
1402                         GOTO(out_child, rc);
1403         }
1404
1405         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1406         /* Get MA_SOM attributes if update lock is given. */
1407         if (lock &&
1408             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1409             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1410                 ma_need |= MA_SOM;
1411
1412         /* finally, we can get attr for child. */
1413         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1414         rc = mdt_getattr_internal(info, child, ma_need);
1415         if (unlikely(rc != 0)) {
1416                 mdt_object_unlock(info, child, lhc, 1);
1417         } else if (lock) {
1418                 /* Debugging code. */
1419                 res_id = &lock->l_resource->lr_name;
1420                 LDLM_DEBUG(lock, "Returning lock to client");
1421                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1422                                          &lock->l_resource->lr_name),
1423                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1424                          (unsigned long)res_id->name[0],
1425                          (unsigned long)res_id->name[1],
1426                          (unsigned long)res_id->name[2],
1427                          PFID(mdt_object_fid(child)));
1428                 if (mdt_object_exists(child) && !mdt_object_remote(child))
1429                         mdt_pack_size2body(info, child);
1430         }
1431         if (lock)
1432                 LDLM_LOCK_PUT(lock);
1433
1434         EXIT;
1435 out_child:
1436         mdt_object_put(info->mti_env, child);
1437 out_parent:
1438         if (lhp)
1439                 mdt_object_unlock(info, parent, lhp, 1);
1440         return rc;
1441 }
1442
1443 /* normal handler: should release the child lock */
1444 int mdt_getattr_name(struct mdt_thread_info *info)
1445 {
1446         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1447         struct mdt_body        *reqbody;
1448         struct mdt_body        *repbody;
1449         int rc, rc2;
1450         ENTRY;
1451
1452         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1453         LASSERT(reqbody != NULL);
1454         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1455         LASSERT(repbody != NULL);
1456
1457         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1458         repbody->eadatasize = 0;
1459         repbody->aclsize = 0;
1460
1461         rc = mdt_init_ucred(info, reqbody);
1462         if (unlikely(rc))
1463                 GOTO(out_shrink, rc);
1464
1465         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1466         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1467                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1468                 lhc->mlh_reg_lh.cookie = 0;
1469         }
1470         mdt_exit_ucred(info);
1471         EXIT;
1472 out_shrink:
1473         mdt_client_compatibility(info);
1474         rc2 = mdt_fix_reply(info);
1475         if (rc == 0)
1476                 rc = rc2;
1477         return rc;
1478 }
1479
1480 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1481                          void *karg, void *uarg);
1482
1483 int mdt_set_info(struct mdt_thread_info *info)
1484 {
1485         struct ptlrpc_request *req = mdt_info_req(info);
1486         char *key;
1487         void *val;
1488         int keylen, vallen, rc = 0;
1489         ENTRY;
1490
1491         rc = req_capsule_server_pack(info->mti_pill);
1492         if (rc)
1493                 RETURN(rc);
1494
1495         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1496         if (key == NULL) {
1497                 DEBUG_REQ(D_HA, req, "no set_info key");
1498                 RETURN(-EFAULT);
1499         }
1500
1501         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1502                                       RCL_CLIENT);
1503
1504         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1505         if (val == NULL) {
1506                 DEBUG_REQ(D_HA, req, "no set_info val");
1507                 RETURN(-EFAULT);
1508         }
1509
1510         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1511                                       RCL_CLIENT);
1512
1513         /* Swab any part of val you need to here */
1514         if (KEY_IS(KEY_READ_ONLY)) {
1515                 req->rq_status = 0;
1516                 lustre_msg_set_status(req->rq_repmsg, 0);
1517
1518                 spin_lock(&req->rq_export->exp_lock);
1519                 if (*(__u32 *)val)
1520                         *exp_connect_flags_ptr(req->rq_export) |=
1521                                 OBD_CONNECT_RDONLY;
1522                 else
1523                         *exp_connect_flags_ptr(req->rq_export) &=
1524                                 ~OBD_CONNECT_RDONLY;
1525                 spin_unlock(&req->rq_export->exp_lock);
1526
1527         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1528                 struct changelog_setinfo *cs =
1529                         (struct changelog_setinfo *)val;
1530                 if (vallen != sizeof(*cs)) {
1531                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1532                         RETURN(-EINVAL);
1533                 }
1534                 if (ptlrpc_req_need_swab(req)) {
1535                         __swab64s(&cs->cs_recno);
1536                         __swab32s(&cs->cs_id);
1537                 }
1538
1539                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1540                                    vallen, val, NULL);
1541                 lustre_msg_set_status(req->rq_repmsg, rc);
1542
1543         } else {
1544                 RETURN(-EINVAL);
1545         }
1546         RETURN(0);
1547 }
1548
1549 /**
1550  * Top-level handler for MDT connection requests.
1551  */
1552 int mdt_connect(struct mdt_thread_info *info)
1553 {
1554         int rc;
1555         struct obd_connect_data *reply;
1556         struct obd_export *exp;
1557         struct ptlrpc_request *req = mdt_info_req(info);
1558
1559         rc = target_handle_connect(req);
1560         if (rc != 0)
1561                 return err_serious(rc);
1562
1563         LASSERT(req->rq_export != NULL);
1564         info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1565         rc = mdt_init_sec_level(info);
1566         if (rc != 0) {
1567                 obd_disconnect(class_export_get(req->rq_export));
1568                 return rc;
1569         }
1570
1571         /* To avoid exposing partially initialized connection flags, changes up
1572          * to this point have been staged in reply->ocd_connect_flags. Now that
1573          * connection handling has completed successfully, atomically update
1574          * the connect flags in the shared export data structure. LU-1623 */
1575         reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
1576         exp = req->rq_export;
1577         spin_lock(&exp->exp_lock);
1578         *exp_connect_flags_ptr(exp) = reply->ocd_connect_flags;
1579         spin_unlock(&exp->exp_lock);
1580
1581         rc = mdt_init_idmap(info);
1582         if (rc != 0)
1583                 obd_disconnect(class_export_get(req->rq_export));
1584
1585         return rc;
1586 }
1587
1588 int mdt_disconnect(struct mdt_thread_info *info)
1589 {
1590         int rc;
1591         ENTRY;
1592
1593         rc = target_handle_disconnect(mdt_info_req(info));
1594         if (rc)
1595                 rc = err_serious(rc);
1596         RETURN(rc);
1597 }
1598
1599 static int mdt_sendpage(struct mdt_thread_info *info,
1600                         struct lu_rdpg *rdpg, int nob)
1601 {
1602         struct ptlrpc_request   *req = mdt_info_req(info);
1603         struct obd_export       *exp = req->rq_export;
1604         struct ptlrpc_bulk_desc *desc;
1605         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1606         int                      tmpcount;
1607         int                      tmpsize;
1608         int                      i;
1609         int                      rc;
1610         ENTRY;
1611
1612         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
1613                                     MDS_BULK_PORTAL);
1614         if (desc == NULL)
1615                 RETURN(-ENOMEM);
1616
1617         if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1618                 /* old client requires reply size in it's PAGE_SIZE,
1619                  * which is rdpg->rp_count */
1620                 nob = rdpg->rp_count;
1621
1622         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1623              i++, tmpcount -= tmpsize) {
1624                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1625                 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1626         }
1627
1628         LASSERT(desc->bd_nob == nob);
1629         rc = target_bulk_io(exp, desc, lwi);
1630         ptlrpc_free_bulk_pin(desc);
1631         RETURN(rc);
1632 }
1633
1634 int mdt_readpage(struct mdt_thread_info *info)
1635 {
1636         struct mdt_object *object = info->mti_object;
1637         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1638         struct mdt_body   *reqbody;
1639         struct mdt_body   *repbody;
1640         int                rc;
1641         int                i;
1642         ENTRY;
1643
1644         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1645                 RETURN(err_serious(-ENOMEM));
1646
1647         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1648         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1649         if (reqbody == NULL || repbody == NULL)
1650                 RETURN(err_serious(-EFAULT));
1651
1652         /*
1653          * prepare @rdpg before calling lower layers and transfer itself. Here
1654          * reqbody->size contains offset of where to start to read and
1655          * reqbody->nlink contains number bytes to read.
1656          */
1657         rdpg->rp_hash = reqbody->size;
1658         if (rdpg->rp_hash != reqbody->size) {
1659                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1660                        rdpg->rp_hash, reqbody->size);
1661                 RETURN(-EFAULT);
1662         }
1663
1664         rdpg->rp_attrs = reqbody->mode;
1665         if (exp_connect_flags(info->mti_exp) & OBD_CONNECT_64BITHASH)
1666                 rdpg->rp_attrs |= LUDA_64BITHASH;
1667         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1668                                 exp_max_brw_size(info->mti_exp));
1669         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1670                           CFS_PAGE_SHIFT;
1671         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1672         if (rdpg->rp_pages == NULL)
1673                 RETURN(-ENOMEM);
1674
1675         for (i = 0; i < rdpg->rp_npages; ++i) {
1676                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1677                 if (rdpg->rp_pages[i] == NULL)
1678                         GOTO(free_rdpg, rc = -ENOMEM);
1679         }
1680
1681         /* call lower layers to fill allocated pages with directory data */
1682         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1683         if (rc < 0)
1684                 GOTO(free_rdpg, rc);
1685
1686         /* send pages to client */
1687         rc = mdt_sendpage(info, rdpg, rc);
1688
1689         EXIT;
1690 free_rdpg:
1691
1692         for (i = 0; i < rdpg->rp_npages; i++)
1693                 if (rdpg->rp_pages[i] != NULL)
1694                         cfs_free_page(rdpg->rp_pages[i]);
1695         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1696
1697         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1698                 RETURN(0);
1699
1700         return rc;
1701 }
1702
1703 static int mdt_reint_internal(struct mdt_thread_info *info,
1704                               struct mdt_lock_handle *lhc,
1705                               __u32 op)
1706 {
1707         struct req_capsule      *pill = info->mti_pill;
1708         struct mdt_body         *repbody;
1709         int                      rc = 0, rc2;
1710         ENTRY;
1711
1712
1713         rc = mdt_reint_unpack(info, op);
1714         if (rc != 0) {
1715                 CERROR("Can't unpack reint, rc %d\n", rc);
1716                 RETURN(err_serious(rc));
1717         }
1718
1719         /* for replay (no_create) lmm is not needed, client has it already */
1720         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1721                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1722                                      info->mti_rr.rr_eadatalen);
1723
1724         /* llog cookies are always 0, the field is kept for compatibility */
1725         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1726                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1727
1728         rc = req_capsule_server_pack(pill);
1729         if (rc != 0) {
1730                 CERROR("Can't pack response, rc %d\n", rc);
1731                 RETURN(err_serious(rc));
1732         }
1733
1734         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1735                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1736                 LASSERT(repbody);
1737                 repbody->eadatasize = 0;
1738                 repbody->aclsize = 0;
1739         }
1740
1741         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1742
1743         /* for replay no cookkie / lmm need, because client have this already */
1744         if (info->mti_spec.no_create)
1745                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1746                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1747
1748         rc = mdt_init_ucred_reint(info);
1749         if (rc)
1750                 GOTO(out_shrink, rc);
1751
1752         rc = mdt_fix_attr_ucred(info, op);
1753         if (rc != 0)
1754                 GOTO(out_ucred, rc = err_serious(rc));
1755
1756         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1757                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1758                 GOTO(out_ucred, rc);
1759         }
1760         rc = mdt_reint_rec(info, lhc);
1761         EXIT;
1762 out_ucred:
1763         mdt_exit_ucred(info);
1764 out_shrink:
1765         mdt_client_compatibility(info);
1766         rc2 = mdt_fix_reply(info);
1767         if (rc == 0)
1768                 rc = rc2;
1769         return rc;
1770 }
1771
1772 static long mdt_reint_opcode(struct mdt_thread_info *info,
1773                              const struct req_format **fmt)
1774 {
1775         struct mdt_rec_reint *rec;
1776         long opc;
1777
1778         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1779         if (rec != NULL) {
1780                 opc = rec->rr_opcode;
1781                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1782                 if (opc < REINT_MAX && fmt[opc] != NULL)
1783                         req_capsule_extend(info->mti_pill, fmt[opc]);
1784                 else {
1785                         CERROR("%s: Unsupported opcode '%ld' from client '%s': "
1786                                "rc = %d\n", mdt_obd_name(info->mti_mdt), opc,
1787                                info->mti_mdt->mdt_ldlm_client->cli_name,
1788                                -EFAULT);
1789                         opc = err_serious(-EFAULT);
1790                 }
1791         } else {
1792                 opc = err_serious(-EFAULT);
1793         }
1794         return opc;
1795 }
1796
1797 int mdt_reint(struct mdt_thread_info *info)
1798 {
1799         long opc;
1800         int  rc;
1801
1802         static const struct req_format *reint_fmts[REINT_MAX] = {
1803                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1804                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1805                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1806                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1807                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1808                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1809                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR,
1810                 [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK
1811         };
1812
1813         ENTRY;
1814
1815         opc = mdt_reint_opcode(info, reint_fmts);
1816         if (opc >= 0) {
1817                 /*
1818                  * No lock possible here from client to pass it to reint code
1819                  * path.
1820                  */
1821                 rc = mdt_reint_internal(info, NULL, opc);
1822         } else {
1823                 rc = opc;
1824         }
1825
1826         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1827         RETURN(rc);
1828 }
1829
1830 /* this should sync the whole device */
1831 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1832 {
1833         struct dt_device *dt = mdt->mdt_bottom;
1834         int rc;
1835         ENTRY;
1836
1837         rc = dt->dd_ops->dt_sync(env, dt);
1838         RETURN(rc);
1839 }
1840
1841 /* this should sync this object */
1842 static int mdt_object_sync(struct mdt_thread_info *info)
1843 {
1844         struct md_object *next;
1845         int rc;
1846         ENTRY;
1847
1848         if (!mdt_object_exists(info->mti_object)) {
1849                 CWARN("Non existing object  "DFID"!\n",
1850                       PFID(mdt_object_fid(info->mti_object)));
1851                 RETURN(-ESTALE);
1852         }
1853         next = mdt_object_child(info->mti_object);
1854         rc = mo_object_sync(info->mti_env, next);
1855
1856         RETURN(rc);
1857 }
1858
1859 int mdt_sync(struct mdt_thread_info *info)
1860 {
1861         struct ptlrpc_request *req = mdt_info_req(info);
1862         struct req_capsule *pill = info->mti_pill;
1863         struct mdt_body *body;
1864         int rc;
1865         ENTRY;
1866
1867         /* The fid may be zero, so we req_capsule_set manually */
1868         req_capsule_set(pill, &RQF_MDS_SYNC);
1869
1870         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1871         if (body == NULL)
1872                 RETURN(err_serious(-EINVAL));
1873
1874         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1875                 RETURN(err_serious(-ENOMEM));
1876
1877         if (fid_seq(&body->fid1) == 0) {
1878                 /* sync the whole device */
1879                 rc = req_capsule_server_pack(pill);
1880                 if (rc == 0)
1881                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1882                 else
1883                         rc = err_serious(rc);
1884         } else {
1885                 /* sync an object */
1886                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1887                 if (rc == 0) {
1888                         rc = mdt_object_sync(info);
1889                         if (rc == 0) {
1890                                 const struct lu_fid *fid;
1891                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1892
1893                                 info->mti_attr.ma_need = MA_INODE;
1894                                 info->mti_attr.ma_valid = 0;
1895                                 rc = mdt_attr_get_complex(info, info->mti_object,
1896                                                           &info->mti_attr);
1897                                 if (rc == 0) {
1898                                         body = req_capsule_server_get(pill,
1899                                                                 &RMF_MDT_BODY);
1900                                         fid = mdt_object_fid(info->mti_object);
1901                                         mdt_pack_attr2body(info, body, la, fid);
1902                                 }
1903                         }
1904                 } else
1905                         rc = err_serious(rc);
1906         }
1907         if (rc == 0)
1908                 mdt_counter_incr(req, LPROC_MDT_SYNC);
1909
1910         RETURN(rc);
1911 }
1912
1913 /*
1914  * Quotacheck handler.
1915  * in-kernel quotacheck isn't supported any more.
1916  */
1917 int mdt_quotacheck(struct mdt_thread_info *info)
1918 {
1919         struct obd_quotactl     *oqctl;
1920         int                      rc;
1921         ENTRY;
1922
1923         oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
1924         if (oqctl == NULL)
1925                 RETURN(err_serious(-EPROTO));
1926
1927         rc = req_capsule_server_pack(info->mti_pill);
1928         if (rc)
1929                 RETURN(err_serious(rc));
1930
1931         /* deprecated, not used any more */
1932         RETURN(-EOPNOTSUPP);
1933 }
1934
1935 /*
1936  * Handle quota control requests to consult current usage/limit, but also
1937  * to configure quota enforcement
1938  */
1939 int mdt_quotactl(struct mdt_thread_info *info)
1940 {
1941         struct obd_export       *exp  = info->mti_exp;
1942         struct req_capsule      *pill = info->mti_pill;
1943         struct obd_quotactl     *oqctl, *repoqc;
1944         int                      id, rc;
1945         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
1946         ENTRY;
1947
1948         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1949         if (oqctl == NULL)
1950                 RETURN(err_serious(-EPROTO));
1951
1952         rc = req_capsule_server_pack(pill);
1953         if (rc)
1954                 RETURN(err_serious(rc));
1955
1956         switch (oqctl->qc_cmd) {
1957         case Q_QUOTACHECK:
1958         case LUSTRE_Q_INVALIDATE:
1959         case LUSTRE_Q_FINVALIDATE:
1960         case Q_QUOTAON:
1961         case Q_QUOTAOFF:
1962         case Q_INITQUOTA:
1963                 /* deprecated, not used any more */
1964                 RETURN(-EOPNOTSUPP);
1965                 /* master quotactl */
1966         case Q_GETINFO:
1967         case Q_SETINFO:
1968         case Q_SETQUOTA:
1969         case Q_GETQUOTA:
1970                 if (qmt == NULL)
1971                         RETURN(-EOPNOTSUPP);
1972                 /* slave quotactl */
1973         case Q_GETOINFO:
1974         case Q_GETOQUOTA:
1975                 break;
1976         default:
1977                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
1978                 RETURN(-EFAULT);
1979         }
1980
1981         /* map uid/gid for remote client */
1982         id = oqctl->qc_id;
1983         if (exp_connect_rmtclient(exp)) {
1984                 struct lustre_idmap_table *idmap;
1985
1986                 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
1987
1988                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1989                              oqctl->qc_cmd != Q_GETINFO))
1990                         RETURN(-EPERM);
1991
1992                 if (oqctl->qc_type == USRQUOTA)
1993                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1994                                                      oqctl->qc_id);
1995                 else if (oqctl->qc_type == GRPQUOTA)
1996                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1997                                                      oqctl->qc_id);
1998                 else
1999                         RETURN(-EINVAL);
2000
2001                 if (id == CFS_IDMAP_NOTFOUND) {
2002                         CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
2003                         RETURN(-EACCES);
2004                 }
2005         }
2006
2007         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2008         if (repoqc == NULL)
2009                 RETURN(err_serious(-EFAULT));
2010
2011         if (oqctl->qc_id != id)
2012                 swap(oqctl->qc_id, id);
2013
2014         switch (oqctl->qc_cmd) {
2015
2016         case Q_GETINFO:
2017         case Q_SETINFO:
2018         case Q_SETQUOTA:
2019         case Q_GETQUOTA:
2020                 /* forward quotactl request to QMT */
2021                 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2022                 break;
2023
2024         case Q_GETOINFO:
2025         case Q_GETOQUOTA:
2026                 /* slave quotactl */
2027                 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2028                                    oqctl);
2029                 break;
2030
2031         default:
2032                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2033                 RETURN(-EFAULT);
2034         }
2035
2036         if (oqctl->qc_id != id)
2037                 swap(oqctl->qc_id, id);
2038
2039         *repoqc = *oqctl;
2040         RETURN(rc);
2041 }
2042
2043 /*
2044  * OBD PING and other handlers.
2045  */
2046 int mdt_obd_ping(struct mdt_thread_info *info)
2047 {
2048         int rc;
2049         ENTRY;
2050
2051         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2052
2053         rc = target_handle_ping(mdt_info_req(info));
2054         if (rc < 0)
2055                 rc = err_serious(rc);
2056         RETURN(rc);
2057 }
2058
2059 /*
2060  * OBD_IDX_READ handler
2061  */
2062 int mdt_obd_idx_read(struct mdt_thread_info *info)
2063 {
2064         struct mdt_device       *mdt = info->mti_mdt;
2065         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2066         struct idx_info         *req_ii, *rep_ii;
2067         int                      rc, i;
2068         ENTRY;
2069
2070         memset(rdpg, 0, sizeof(*rdpg));
2071         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2072
2073         /* extract idx_info buffer from request & reply */
2074         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2075         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2076                 RETURN(err_serious(-EPROTO));
2077
2078         rc = req_capsule_server_pack(info->mti_pill);
2079         if (rc)
2080                 RETURN(err_serious(rc));
2081
2082         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2083         if (rep_ii == NULL)
2084                 RETURN(err_serious(-EFAULT));
2085         rep_ii->ii_magic = IDX_INFO_MAGIC;
2086
2087         /* extract hash to start with */
2088         rdpg->rp_hash = req_ii->ii_hash_start;
2089
2090         /* extract requested attributes */
2091         rdpg->rp_attrs = req_ii->ii_attrs;
2092
2093         /* check that fid packed in request is valid and supported */
2094         if (!fid_is_sane(&req_ii->ii_fid))
2095                 RETURN(-EINVAL);
2096         rep_ii->ii_fid = req_ii->ii_fid;
2097
2098         /* copy flags */
2099         rep_ii->ii_flags = req_ii->ii_flags;
2100
2101         /* compute number of pages to allocate, ii_count is the number of 4KB
2102          * containers */
2103         if (req_ii->ii_count <= 0)
2104                 GOTO(out, rc = -EFAULT);
2105         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2106                                exp_max_brw_size(info->mti_exp));
2107         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2108
2109         /* allocate pages to store the containers */
2110         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2111         if (rdpg->rp_pages == NULL)
2112                 GOTO(out, rc = -ENOMEM);
2113         for (i = 0; i < rdpg->rp_npages; i++) {
2114                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2115                 if (rdpg->rp_pages[i] == NULL)
2116                         GOTO(out, rc = -ENOMEM);
2117         }
2118
2119         /* populate pages with key/record pairs */
2120         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2121         if (rc < 0)
2122                 GOTO(out, rc);
2123
2124         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2125                  "asked %d > %d\n", rc, rdpg->rp_count);
2126
2127         /* send pages to client */
2128         rc = mdt_sendpage(info, rdpg, rc);
2129
2130         GOTO(out, rc);
2131 out:
2132         if (rdpg->rp_pages) {
2133                 for (i = 0; i < rdpg->rp_npages; i++)
2134                         if (rdpg->rp_pages[i])
2135                                 cfs_free_page(rdpg->rp_pages[i]);
2136                 OBD_FREE(rdpg->rp_pages,
2137                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2138         }
2139         return rc;
2140 }
2141
2142 int mdt_obd_log_cancel(struct mdt_thread_info *info)
2143 {
2144         return err_serious(-EOPNOTSUPP);
2145 }
2146
2147 int mdt_obd_qc_callback(struct mdt_thread_info *info)
2148 {
2149         return err_serious(-EOPNOTSUPP);
2150 }
2151
2152 /*
2153  * LLOG handlers.
2154  */
2155
2156 /** clone llog ctxt from child (mdd)
2157  * This allows remote llog (replicator) access.
2158  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2159  * context was originally set up, or we can handle them directly.
2160  * I choose the latter, but that means I need any llog
2161  * contexts set up by child to be accessable by the mdt.  So we clone the
2162  * context into our context list here.
2163  */
2164 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2165                                int idx)
2166 {
2167         struct md_device  *next = mdt->mdt_child;
2168         struct llog_ctxt *ctxt;
2169         int rc;
2170
2171         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2172                 return 0;
2173
2174         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2175         if (rc || ctxt == NULL) {
2176                 return 0;
2177         }
2178
2179         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2180         if (rc)
2181                 CERROR("Can't set mdt ctxt %d\n", rc);
2182
2183         return rc;
2184 }
2185
2186 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2187                                  struct mdt_device *mdt, int idx)
2188 {
2189         struct llog_ctxt *ctxt;
2190
2191         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2192         if (ctxt == NULL)
2193                 return 0;
2194         /* Put once for the get we just did, and once for the clone */
2195         llog_ctxt_put(ctxt);
2196         llog_ctxt_put(ctxt);
2197         return 0;
2198 }
2199
2200 int mdt_llog_create(struct mdt_thread_info *info)
2201 {
2202         int rc;
2203
2204         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2205         rc = llog_origin_handle_open(mdt_info_req(info));
2206         return (rc < 0 ? err_serious(rc) : rc);
2207 }
2208
2209 int mdt_llog_destroy(struct mdt_thread_info *info)
2210 {
2211         int rc;
2212
2213         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2214         rc = llog_origin_handle_destroy(mdt_info_req(info));
2215         return (rc < 0 ? err_serious(rc) : rc);
2216 }
2217
2218 int mdt_llog_read_header(struct mdt_thread_info *info)
2219 {
2220         int rc;
2221
2222         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2223         rc = llog_origin_handle_read_header(mdt_info_req(info));
2224         return (rc < 0 ? err_serious(rc) : rc);
2225 }
2226
2227 int mdt_llog_next_block(struct mdt_thread_info *info)
2228 {
2229         int rc;
2230
2231         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2232         rc = llog_origin_handle_next_block(mdt_info_req(info));
2233         return (rc < 0 ? err_serious(rc) : rc);
2234 }
2235
2236 int mdt_llog_prev_block(struct mdt_thread_info *info)
2237 {
2238         int rc;
2239
2240         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2241         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2242         return (rc < 0 ? err_serious(rc) : rc);
2243 }
2244
2245
2246 /*
2247  * DLM handlers.
2248  */
2249
2250 static struct ldlm_callback_suite cbs = {
2251         .lcs_completion = ldlm_server_completion_ast,
2252         .lcs_blocking   = ldlm_server_blocking_ast,
2253         .lcs_glimpse    = ldlm_server_glimpse_ast
2254 };
2255
2256 int mdt_enqueue(struct mdt_thread_info *info)
2257 {
2258         struct ptlrpc_request *req;
2259         int rc;
2260
2261         /*
2262          * info->mti_dlm_req already contains swapped and (if necessary)
2263          * converted dlm request.
2264          */
2265         LASSERT(info->mti_dlm_req != NULL);
2266
2267         req = mdt_info_req(info);
2268         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2269                                   req, info->mti_dlm_req, &cbs);
2270         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2271         return rc ? err_serious(rc) : req->rq_status;
2272 }
2273
2274 int mdt_convert(struct mdt_thread_info *info)
2275 {
2276         int rc;
2277         struct ptlrpc_request *req;
2278
2279         LASSERT(info->mti_dlm_req);
2280         req = mdt_info_req(info);
2281         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2282         return rc ? err_serious(rc) : req->rq_status;
2283 }
2284
2285 int mdt_bl_callback(struct mdt_thread_info *info)
2286 {
2287         CERROR("bl callbacks should not happen on MDS\n");
2288         LBUG();
2289         return err_serious(-EOPNOTSUPP);
2290 }
2291
2292 int mdt_cp_callback(struct mdt_thread_info *info)
2293 {
2294         CERROR("cp callbacks should not happen on MDS\n");
2295         LBUG();
2296         return err_serious(-EOPNOTSUPP);
2297 }
2298
2299 /*
2300  * sec context handlers
2301  */
2302 int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2303 {
2304         int rc;
2305
2306         rc = mdt_handle_idmap(info);
2307
2308         if (unlikely(rc)) {
2309                 struct ptlrpc_request *req = mdt_info_req(info);
2310                 __u32                  opc;
2311
2312                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2313                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2314                         sptlrpc_svc_ctx_invalidate(req);
2315         }
2316
2317         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2318
2319         return rc;
2320 }
2321
2322 /*
2323  * quota request handlers
2324  */
2325 int mdt_quota_dqacq(struct mdt_thread_info *info)
2326 {
2327         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2328         int                      rc;
2329         ENTRY;
2330
2331         if (qmt == NULL)
2332                 RETURN(err_serious(-EOPNOTSUPP));
2333
2334         rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2335         RETURN(rc);
2336 }
2337
2338 static struct mdt_object *mdt_obj(struct lu_object *o)
2339 {
2340         LASSERT(lu_device_is_mdt(o->lo_dev));
2341         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2342 }
2343
2344 struct mdt_object *mdt_object_new(const struct lu_env *env,
2345                                   struct mdt_device *d,
2346                                   const struct lu_fid *f)
2347 {
2348         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2349         struct lu_object *o;
2350         struct mdt_object *m;
2351         ENTRY;
2352
2353         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2354         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2355         if (unlikely(IS_ERR(o)))
2356                 m = (struct mdt_object *)o;
2357         else
2358                 m = mdt_obj(o);
2359         RETURN(m);
2360 }
2361
2362 struct mdt_object *mdt_object_find(const struct lu_env *env,
2363                                    struct mdt_device *d,
2364                                    const struct lu_fid *f)
2365 {
2366         struct lu_object *o;
2367         struct mdt_object *m;
2368         ENTRY;
2369
2370         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2371         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2372         if (unlikely(IS_ERR(o)))
2373                 m = (struct mdt_object *)o;
2374         else
2375                 m = mdt_obj(o);
2376         RETURN(m);
2377 }
2378
2379 /**
2380  * Asyncronous commit for mdt device.
2381  *
2382  * Pass asynchonous commit call down the MDS stack.
2383  *
2384  * \param env environment
2385  * \param mdt the mdt device
2386  */
2387 static void mdt_device_commit_async(const struct lu_env *env,
2388                                     struct mdt_device *mdt)
2389 {
2390         struct dt_device *dt = mdt->mdt_bottom;
2391         int rc;
2392
2393         rc = dt->dd_ops->dt_commit_async(env, dt);
2394         if (unlikely(rc != 0))
2395                 CWARN("async commit start failed with rc = %d", rc);
2396 }
2397
2398 /**
2399  * Mark the lock as "synchonous".
2400  *
2401  * Mark the lock to deffer transaction commit to the unlock time.
2402  *
2403  * \param lock the lock to mark as "synchonous"
2404  *
2405  * \see mdt_is_lock_sync
2406  * \see mdt_save_lock
2407  */
2408 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2409 {
2410         lock->l_ast_data = (void*)1;
2411 }
2412
2413 /**
2414  * Check whehter the lock "synchonous" or not.
2415  *
2416  * \param lock the lock to check
2417  * \retval 1 the lock is "synchonous"
2418  * \retval 0 the lock isn't "synchronous"
2419  *
2420  * \see mdt_set_lock_sync
2421  * \see mdt_save_lock
2422  */
2423 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2424 {
2425         return lock->l_ast_data != NULL;
2426 }
2427
2428 /**
2429  * Blocking AST for mdt locks.
2430  *
2431  * Starts transaction commit if in case of COS lock conflict or
2432  * deffers such a commit to the mdt_save_lock.
2433  *
2434  * \param lock the lock which blocks a request or cancelling lock
2435  * \param desc unused
2436  * \param data unused
2437  * \param flag indicates whether this cancelling or blocking callback
2438  * \retval 0
2439  * \see ldlm_blocking_ast_nocheck
2440  */
2441 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2442                      void *data, int flag)
2443 {
2444         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2445         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2446         int rc;
2447         ENTRY;
2448
2449         if (flag == LDLM_CB_CANCELING)
2450                 RETURN(0);
2451         lock_res_and_lock(lock);
2452         if (lock->l_blocking_ast != mdt_blocking_ast) {
2453                 unlock_res_and_lock(lock);
2454                 RETURN(0);
2455         }
2456         if (mdt_cos_is_enabled(mdt) &&
2457             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2458             lock->l_blocking_lock != NULL &&
2459             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2460                 mdt_set_lock_sync(lock);
2461         }
2462         rc = ldlm_blocking_ast_nocheck(lock);
2463
2464         /* There is no lock conflict if l_blocking_lock == NULL,
2465          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2466          * when the last reference to a local lock was released */
2467         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2468                 struct lu_env env;
2469
2470                 rc = lu_env_init(&env, LCT_LOCAL);
2471                 if (unlikely(rc != 0))
2472                         CWARN("lu_env initialization failed with rc = %d,"
2473                               "cannot start asynchronous commit\n", rc);
2474                 else
2475                         mdt_device_commit_async(&env, mdt);
2476                 lu_env_fini(&env);
2477         }
2478         RETURN(rc);
2479 }
2480
2481 int mdt_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2482                         void *data, int flag)
2483 {
2484         struct lustre_handle lockh;
2485         int               rc;
2486
2487         switch (flag) {
2488         case LDLM_CB_BLOCKING:
2489                 ldlm_lock2handle(lock, &lockh);
2490                 rc = ldlm_cli_cancel(&lockh);
2491                 if (rc < 0) {
2492                         CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2493                         RETURN(rc);
2494                 }
2495                 break;
2496         case LDLM_CB_CANCELING:
2497                 LDLM_DEBUG(lock, "Revoke remote lock\n");
2498                 break;
2499         default:
2500                 LBUG();
2501         }
2502         RETURN(0);
2503 }
2504
2505 int mdt_remote_object_lock(struct mdt_thread_info *mti,
2506                            struct mdt_object *o, struct lustre_handle *lh,
2507                            ldlm_mode_t mode, __u64 ibits)
2508 {
2509         struct ldlm_enqueue_info *einfo = &mti->mti_einfo;
2510         ldlm_policy_data_t *policy = &mti->mti_policy;
2511         int rc = 0;
2512         ENTRY;
2513
2514         LASSERT(mdt_object_remote(o));
2515
2516         LASSERT((ibits & MDS_INODELOCK_UPDATE));
2517
2518         memset(einfo, 0, sizeof(*einfo));
2519         einfo->ei_type = LDLM_IBITS;
2520         einfo->ei_mode = mode;
2521         einfo->ei_cb_bl = mdt_md_blocking_ast;
2522         einfo->ei_cb_cp = ldlm_completion_ast;
2523
2524         memset(policy, 0, sizeof(*policy));
2525         policy->l_inodebits.bits = ibits;
2526
2527         rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo,
2528                             policy);
2529         RETURN(rc);
2530 }
2531
2532 static int mdt_object_lock0(struct mdt_thread_info *info, struct mdt_object *o,
2533                             struct mdt_lock_handle *lh, __u64 ibits,
2534                             bool nonblock, int locality)
2535 {
2536         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2537         ldlm_policy_data_t *policy = &info->mti_policy;
2538         struct ldlm_res_id *res_id = &info->mti_res_id;
2539         __u64 dlmflags;
2540         int rc;
2541         ENTRY;
2542
2543         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2544         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2545         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2546         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2547
2548         if (mdt_object_remote(o)) {
2549                 if (locality == MDT_CROSS_LOCK) {
2550                         ibits &= ~(MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM);
2551                         ibits |= MDS_INODELOCK_LOOKUP;
2552                 } else {
2553                         LASSERTF(!(ibits &
2554                                   (MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM)),
2555                                 "%s: wrong bit "LPX64" for remote obj "DFID"\n",
2556                                 mdt_obd_name(info->mti_mdt), ibits,
2557                                 PFID(mdt_object_fid(o)));
2558                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2559                 }
2560                 /* No PDO lock on remote object */
2561                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2562         }
2563
2564         if (lh->mlh_type == MDT_PDO_LOCK) {
2565                 /* check for exists after object is locked */
2566                 if (mdt_object_exists(o) == 0) {
2567                         /* Non-existent object shouldn't have PDO lock */
2568                         RETURN(-ESTALE);
2569                 } else {
2570                         /* Non-dir object shouldn't have PDO lock */
2571                         if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2572                                 RETURN(-ENOTDIR);
2573                 }
2574         }
2575
2576         memset(policy, 0, sizeof(*policy));
2577         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2578
2579         dlmflags = LDLM_FL_ATOMIC_CB;
2580         if (nonblock)
2581                 dlmflags |= LDLM_FL_BLOCK_NOWAIT;
2582
2583         /*
2584          * Take PDO lock on whole directory and build correct @res_id for lock
2585          * on part of directory.
2586          */
2587         if (lh->mlh_pdo_hash != 0) {
2588                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2589                 mdt_lock_pdo_mode(info, o, lh);
2590                 if (lh->mlh_pdo_mode != LCK_NL) {
2591                         /*
2592                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2593                          * is never going to be sent to client and we do not
2594                          * want it slowed down due to possible cancels.
2595                          */
2596                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2597                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2598                                           policy, res_id, dlmflags,
2599                                           &info->mti_exp->exp_handle.h_cookie);
2600                         if (unlikely(rc))
2601                                 RETURN(rc);
2602                 }
2603
2604                 /*
2605                  * Finish res_id initializing by name hash marking part of
2606                  * directory which is taking modification.
2607                  */
2608                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2609         }
2610
2611         policy->l_inodebits.bits = ibits;
2612
2613         /*
2614          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2615          * going to be sent to client. If it is - mdt_intent_policy() path will
2616          * fix it up and turn FL_LOCAL flag off.
2617          */
2618         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2619                           res_id, LDLM_FL_LOCAL_ONLY | dlmflags,
2620                           &info->mti_exp->exp_handle.h_cookie);
2621         if (rc)
2622                 mdt_object_unlock(info, o, lh, 1);
2623         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2624                  lh->mlh_pdo_hash != 0 &&
2625                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2626                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2627         }
2628
2629         RETURN(rc);
2630 }
2631
2632 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2633                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2634 {
2635         return mdt_object_lock0(info, o, lh, ibits, false, locality);
2636 }
2637
2638 int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *o,
2639                         struct mdt_lock_handle *lh, __u64 ibits, int locality)
2640 {
2641         struct mdt_lock_handle tmp = *lh;
2642         int rc;
2643
2644         rc = mdt_object_lock0(info, o, &tmp, ibits, true, locality);
2645         if (rc == 0)
2646                 *lh = tmp;
2647
2648         return rc == 0;
2649 }
2650
2651 /**
2652  * Save a lock within request object.
2653  *
2654  * Keep the lock referenced until whether client ACK or transaction
2655  * commit happens or release the lock immediately depending on input
2656  * parameters. If COS is ON, a write lock is converted to COS lock
2657  * before saving.
2658  *
2659  * \param info thead info object
2660  * \param h lock handle
2661  * \param mode lock mode
2662  * \param decref force immediate lock releasing
2663  */
2664 static
2665 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2666                    ldlm_mode_t mode, int decref)
2667 {
2668         ENTRY;
2669
2670         if (lustre_handle_is_used(h)) {
2671                 if (decref || !info->mti_has_trans ||
2672                     !(mode & (LCK_PW | LCK_EX))){
2673                         mdt_fid_unlock(h, mode);
2674                 } else {
2675                         struct mdt_device *mdt = info->mti_mdt;
2676                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2677                         struct ptlrpc_request *req = mdt_info_req(info);
2678                         int no_ack = 0;
2679
2680                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2681                                  h->cookie);
2682                         CDEBUG(D_HA, "request = %p reply state = %p"
2683                                " transno = "LPD64"\n",
2684                                req, req->rq_reply_state, req->rq_transno);
2685                         if (mdt_cos_is_enabled(mdt)) {
2686                                 no_ack = 1;
2687                                 ldlm_lock_downgrade(lock, LCK_COS);
2688                                 mode = LCK_COS;
2689                         }
2690                         ptlrpc_save_lock(req, h, mode, no_ack);
2691                         if (mdt_is_lock_sync(lock)) {
2692                                 CDEBUG(D_HA, "found sync-lock,"
2693                                        " async commit started\n");
2694                                 mdt_device_commit_async(info->mti_env,
2695                                                         mdt);
2696                         }
2697                         LDLM_LOCK_PUT(lock);
2698                 }
2699                 h->cookie = 0ull;
2700         }
2701
2702         EXIT;
2703 }
2704
2705 /**
2706  * Unlock mdt object.
2707  *
2708  * Immeditely release the regular lock and the PDO lock or save the
2709  * lock in reqeuest and keep them referenced until client ACK or
2710  * transaction commit.
2711  *
2712  * \param info thread info object
2713  * \param o mdt object
2714  * \param lh mdt lock handle referencing regular and PDO locks
2715  * \param decref force immediate lock releasing
2716  */
2717 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2718                        struct mdt_lock_handle *lh, int decref)
2719 {
2720         ENTRY;
2721
2722         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2723         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2724
2725         if (lustre_handle_is_used(&lh->mlh_rreg_lh))
2726                 ldlm_lock_decref(&lh->mlh_rreg_lh, lh->mlh_rreg_mode);
2727
2728         EXIT;
2729 }
2730
2731 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2732                                         const struct lu_fid *f,
2733                                         struct mdt_lock_handle *lh,
2734                                         __u64 ibits)
2735 {
2736         struct mdt_object *o;
2737
2738         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2739         if (!IS_ERR(o)) {
2740                 int rc;
2741
2742                 rc = mdt_object_lock(info, o, lh, ibits,
2743                                      MDT_LOCAL_LOCK);
2744                 if (rc != 0) {
2745                         mdt_object_put(info->mti_env, o);
2746                         o = ERR_PTR(rc);
2747                 }
2748         }
2749         return o;
2750 }
2751
2752 void mdt_object_unlock_put(struct mdt_thread_info * info,
2753                            struct mdt_object * o,
2754                            struct mdt_lock_handle *lh,
2755                            int decref)
2756 {
2757         mdt_object_unlock(info, o, lh, decref);
2758         mdt_object_put(info->mti_env, o);
2759 }
2760
2761 struct mdt_handler *mdt_handler_find(__u32 opc, struct mdt_opc_slice *supported)
2762 {
2763         struct mdt_opc_slice *s;
2764         struct mdt_handler   *h;
2765
2766         h = NULL;
2767         for (s = supported; s->mos_hs != NULL; s++) {
2768                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2769                         h = s->mos_hs + (opc - s->mos_opc_start);
2770                         if (likely(h->mh_opc != 0))
2771                                 LASSERTF(h->mh_opc == opc,
2772                                          "opcode mismatch %d != %d\n",
2773                                          h->mh_opc, opc);
2774                         else
2775                                 h = NULL; /* unsupported opc */
2776                         break;
2777                 }
2778         }
2779         return h;
2780 }
2781
2782 static int mdt_lock_resname_compat(struct mdt_device *m,
2783                                    struct ldlm_request *req)
2784 {
2785         /* XXX something... later. */
2786         return 0;
2787 }
2788
2789 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2790 {
2791         /* XXX something... later. */
2792         return 0;
2793 }
2794
2795 /*
2796  * Generic code handling requests that have struct mdt_body passed in:
2797  *
2798  *  - extract mdt_body from request and save it in @info, if present;
2799  *
2800  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2801  *  @info;
2802  *
2803  *  - if HABEO_CORPUS flag is set for this request type check whether object
2804  *  actually exists on storage (lu_object_exists()).
2805  *
2806  */
2807 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2808 {
2809         const struct mdt_body    *body;
2810         struct mdt_object        *obj;
2811         const struct lu_env      *env;
2812         struct req_capsule       *pill;
2813         int                       rc;
2814         ENTRY;
2815
2816         env = info->mti_env;
2817         pill = info->mti_pill;
2818
2819         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2820         if (body == NULL)
2821                 RETURN(-EFAULT);
2822
2823         if (!(body->valid & OBD_MD_FLID))
2824                 RETURN(0);
2825
2826         if (!fid_is_sane(&body->fid1)) {
2827                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2828                 RETURN(-EINVAL);
2829         }
2830
2831         /*
2832          * Do not get size or any capa fields before we check that request
2833          * contains capa actually. There are some requests which do not, for
2834          * instance MDS_IS_SUBDIR.
2835          */
2836         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2837             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2838                 mdt_set_capainfo(info, 0, &body->fid1,
2839                                  req_capsule_client_get(pill, &RMF_CAPA1));
2840
2841         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2842         if (!IS_ERR(obj)) {
2843                 if ((flags & HABEO_CORPUS) &&
2844                     !mdt_object_exists(obj)) {
2845                         mdt_object_put(env, obj);
2846                         /* for capability renew ENOENT will be handled in
2847                          * mdt_renew_capa */
2848                         if (body->valid & OBD_MD_FLOSSCAPA)
2849                                 rc = 0;
2850                         else
2851                                 rc = -ENOENT;
2852                 } else {
2853                         info->mti_object = obj;
2854                         rc = 0;
2855                 }
2856         } else
2857                 rc = PTR_ERR(obj);
2858
2859         RETURN(rc);
2860 }
2861
2862 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2863 {
2864         struct req_capsule *pill = info->mti_pill;
2865         int rc;
2866         ENTRY;
2867
2868         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2869                 rc = mdt_body_unpack(info, flags);
2870         else
2871                 rc = 0;
2872
2873         if (rc == 0 && (flags & HABEO_REFERO)) {
2874                 /* Pack reply. */
2875                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2876                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2877                                              info->mti_body->eadatasize);
2878                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2879                         req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2880                                              RCL_SERVER, 0);
2881
2882                 rc = req_capsule_server_pack(pill);
2883         }
2884         RETURN(rc);
2885 }
2886
2887 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2888 {
2889         struct md_device *next = m->mdt_child;
2890
2891         return next->md_ops->mdo_init_capa_ctxt(env, next,
2892                                                 m->mdt_opts.mo_mds_capa,
2893                                                 m->mdt_capa_timeout,
2894                                                 m->mdt_capa_alg,
2895                                                 m->mdt_capa_keys);
2896 }
2897
2898 /*
2899  * Invoke handler for this request opc. Also do necessary preprocessing
2900  * (according to handler ->mh_flags), and post-processing (setting of
2901  * ->last_{xid,committed}).
2902  */
2903 static int mdt_req_handle(struct mdt_thread_info *info,
2904                           struct mdt_handler *h, struct ptlrpc_request *req)
2905 {
2906         int   rc, serious = 0;
2907         __u32 flags;
2908
2909         ENTRY;
2910
2911         LASSERT(h->mh_act != NULL);
2912         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2913         LASSERT(current->journal_info == NULL);
2914
2915         /*
2916          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2917          * to put same checks into handlers like mdt_close(), mdt_reint(),
2918          * etc., without talking to mdt authors first. Checking same thing
2919          * there again is useless and returning 0 error without packing reply
2920          * is buggy! Handlers either pack reply or return error.
2921          *
2922          * We return 0 here and do not send any reply in order to emulate
2923          * network failure. Do not send any reply in case any of NET related
2924          * fail_id has occured.
2925          */
2926         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2927                 RETURN(0);
2928
2929         rc = 0;
2930         flags = h->mh_flags;
2931         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2932
2933         if (h->mh_fmt != NULL) {
2934                 req_capsule_set(info->mti_pill, h->mh_fmt);
2935                 rc = mdt_unpack_req_pack_rep(info, flags);
2936         }
2937
2938         if (rc == 0 && flags & MUTABOR &&
2939             exp_connect_flags(req->rq_export) & OBD_CONNECT_RDONLY)
2940                 /* should it be rq_status? */
2941                 rc = -EROFS;
2942
2943         if (rc == 0 && flags & HABEO_CLAVIS) {
2944                 struct ldlm_request *dlm_req;
2945
2946                 LASSERT(h->mh_fmt != NULL);
2947
2948                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2949                 if (dlm_req != NULL) {
2950                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2951                                         LDLM_IBITS &&
2952                                      dlm_req->lock_desc.l_policy_data.\
2953                                         l_inodebits.bits == 0)) {
2954                                 /*
2955                                  * Lock without inodebits makes no sense and
2956                                  * will oops later in ldlm. If client miss to
2957                                  * set such bits, do not trigger ASSERTION.
2958                                  *
2959                                  * For liblustre flock case, it maybe zero.
2960                                  */
2961                                 rc = -EPROTO;
2962                         } else {
2963                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2964                                         rc = mdt_lock_resname_compat(
2965                                                                 info->mti_mdt,
2966                                                                 dlm_req);
2967                                 info->mti_dlm_req = dlm_req;
2968                         }
2969                 } else {
2970                         rc = -EFAULT;
2971                 }
2972         }
2973
2974         /* capability setting changed via /proc, needs reinitialize ctxt */
2975         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2976                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2977                 info->mti_mdt->mdt_capa_conf = 0;
2978         }
2979
2980         if (likely(rc == 0)) {
2981                 /*
2982                  * Process request, there can be two types of rc:
2983                  * 1) errors with msg unpack/pack, other failures outside the
2984                  * operation itself. This is counted as serious errors;
2985                  * 2) errors during fs operation, should be placed in rq_status
2986                  * only
2987                  */
2988                 rc = h->mh_act(info);
2989                 if (rc == 0 &&
2990                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2991                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2992                                   "pack reply and returned 0 error\n",
2993                                   h->mh_name);
2994                         LBUG();
2995                 }
2996                 serious = is_serious(rc);
2997                 rc = clear_serious(rc);
2998         } else
2999                 serious = 1;
3000
3001         req->rq_status = rc;
3002
3003         /*
3004          * ELDLM_* codes which > 0 should be in rq_status only as well as
3005          * all non-serious errors.
3006          */
3007         if (rc > 0 || !serious)
3008                 rc = 0;
3009
3010         LASSERT(current->journal_info == NULL);
3011
3012         if (rc == 0 && (flags & HABEO_CLAVIS) &&
3013             info->mti_mdt->mdt_opts.mo_compat_resname) {
3014                 struct ldlm_reply *dlmrep;
3015
3016                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3017                 if (dlmrep != NULL)
3018                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3019         }
3020
3021         /* If we're DISCONNECTing, the mdt_export_data is already freed */
3022         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3023                 target_committed_to_req(req);
3024
3025         if (unlikely(req_is_replay(req) &&
3026                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3027                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3028                 LBUG();
3029         }
3030
3031         target_send_reply(req, rc, info->mti_fail_id);
3032         RETURN(0);
3033 }
3034
3035 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3036 {
3037         lh->mlh_type = MDT_NUL_LOCK;
3038         lh->mlh_reg_lh.cookie = 0ull;
3039         lh->mlh_reg_mode = LCK_MINMODE;
3040         lh->mlh_pdo_lh.cookie = 0ull;
3041         lh->mlh_pdo_mode = LCK_MINMODE;
3042         lh->mlh_rreg_lh.cookie = 0ull;
3043         lh->mlh_rreg_mode = LCK_MINMODE;
3044 }
3045
3046 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3047 {
3048         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3049         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3050 }
3051
3052 /*
3053  * Initialize fields of struct mdt_thread_info. Other fields are left in
3054  * uninitialized state, because it's too expensive to zero out whole
3055  * mdt_thread_info (> 1K) on each request arrival.
3056  */
3057 static void mdt_thread_info_init(struct ptlrpc_request *req,
3058                                  struct mdt_thread_info *info)
3059 {
3060         int i;
3061
3062         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3063         info->mti_pill = &req->rq_pill;
3064
3065         /* lock handle */
3066         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3067                 mdt_lock_handle_init(&info->mti_lh[i]);
3068
3069         /* mdt device: it can be NULL while CONNECT */
3070         if (req->rq_export) {
3071                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3072                 info->mti_exp = req->rq_export;
3073         } else
3074                 info->mti_mdt = NULL;
3075         info->mti_env = req->rq_svc_thread->t_env;
3076         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3077         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3078         info->mti_mos = NULL;
3079
3080         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3081         info->mti_body = NULL;
3082         info->mti_object = NULL;
3083         info->mti_dlm_req = NULL;
3084         info->mti_has_trans = 0;
3085         info->mti_cross_ref = 0;
3086         info->mti_opdata = 0;
3087         info->mti_big_lmm_used = 0;
3088
3089         /* To not check for split by default. */
3090         info->mti_spec.no_create = 0;
3091         info->mti_spec.sp_rm_entry = 0;
3092 }
3093
3094 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3095 {
3096         int i;
3097
3098         req_capsule_fini(info->mti_pill);
3099         if (info->mti_object != NULL) {
3100                 mdt_object_put(info->mti_env, info->mti_object);
3101                 info->mti_object = NULL;
3102         }
3103         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3104                 mdt_lock_handle_fini(&info->mti_lh[i]);
3105         info->mti_env = NULL;
3106 }
3107
3108 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3109                                        struct obd_device *obd, int *process)
3110 {
3111         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3112         case MDS_CONNECT: /* This will never get here, but for completeness. */
3113         case OST_CONNECT: /* This will never get here, but for completeness. */
3114         case MDS_DISCONNECT:
3115         case OST_DISCONNECT:
3116         case OBD_IDX_READ:
3117                *process = 1;
3118                RETURN(0);
3119
3120         case MDS_CLOSE:
3121         case MDS_DONE_WRITING:
3122         case MDS_SYNC: /* used in unmounting */
3123         case OBD_PING:
3124         case MDS_REINT:
3125         case SEQ_QUERY:
3126         case FLD_QUERY:
3127         case LDLM_ENQUEUE:
3128                 *process = target_queue_recovery_request(req, obd);
3129                 RETURN(0);
3130
3131         default:
3132                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3133                 *process = -EAGAIN;
3134                 RETURN(0);
3135         }
3136 }
3137
3138 /*
3139  * Handle recovery. Return:
3140  *        +1: continue request processing;
3141  *       -ve: abort immediately with the given error code;
3142  *         0: send reply with error code in req->rq_status;
3143  */
3144 static int mdt_recovery(struct mdt_thread_info *info)
3145 {
3146         struct ptlrpc_request *req = mdt_info_req(info);
3147         struct obd_device *obd;
3148
3149         ENTRY;
3150
3151         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3152         case MDS_CONNECT:
3153         case SEC_CTX_INIT:
3154         case SEC_CTX_INIT_CONT:
3155         case SEC_CTX_FINI:
3156                 {
3157 #if 0
3158                         int rc;
3159
3160                         rc = mdt_handle_idmap(info);
3161                         if (rc)
3162                                 RETURN(rc);
3163                         else
3164 #endif
3165                                 RETURN(+1);
3166                 }
3167         }
3168
3169         if (unlikely(!class_connected_export(req->rq_export))) {
3170                 CDEBUG(D_HA, "operation %d on unconnected MDS from %s\n",
3171                        lustre_msg_get_opc(req->rq_reqmsg),
3172                        libcfs_id2str(req->rq_peer));
3173                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3174                  * mds_A will get -ENOTCONN(especially for ping req),
3175                  * which will cause that mds_A deactive timeout, then when
3176                  * mds_A cleanup, the cleanup process will be suspended since
3177                  * deactive timeout is not zero.
3178                  */
3179                 req->rq_status = -ENOTCONN;
3180                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3181                 RETURN(0);
3182         }
3183
3184         /* sanity check: if the xid matches, the request must be marked as a
3185          * resent or replayed */
3186         if (req_xid_is_last(req)) {
3187                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3188                       (MSG_RESENT | MSG_REPLAY))) {
3189                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3190                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3191                                   lustre_msg_get_flags(req->rq_reqmsg));
3192                         LBUG();
3193                         req->rq_status = -ENOTCONN;
3194                         RETURN(-ENOTCONN);
3195                 }
3196         }
3197
3198         /* else: note the opposite is not always true; a RESENT req after a
3199          * failover will usually not match the last_xid, since it was likely
3200          * never committed. A REPLAYed request will almost never match the
3201          * last xid, however it could for a committed, but still retained,
3202          * open. */
3203
3204         obd = req->rq_export->exp_obd;
3205
3206         /* Check for aborted recovery... */
3207         if (unlikely(obd->obd_recovering)) {
3208                 int rc;
3209                 int should_process;
3210                 DEBUG_REQ(D_INFO, req, "Got new replay");
3211                 rc = mdt_filter_recovery_request(req, obd, &should_process);
3212                 if (rc != 0 || !should_process)
3213                         RETURN(rc);
3214                 else if (should_process < 0) {
3215                         req->rq_status = should_process;
3216                         rc = ptlrpc_error(req);
3217                         RETURN(rc);
3218                 }
3219         }
3220         RETURN(+1);
3221 }
3222
3223 static int mdt_msg_check_version(struct lustre_msg *msg)
3224 {
3225