Whamcloud - gitweb
a27a9d3b8e443e9ac4b0c6936d6d6f90332ac55c
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71
72 mdl_mode_t mdt_mdl_lock_modes[] = {
73         [LCK_MINMODE] = MDL_MINMODE,
74         [LCK_EX]      = MDL_EX,
75         [LCK_PW]      = MDL_PW,
76         [LCK_PR]      = MDL_PR,
77         [LCK_CW]      = MDL_CW,
78         [LCK_CR]      = MDL_CR,
79         [LCK_NL]      = MDL_NL,
80         [LCK_GROUP]   = MDL_GROUP
81 };
82
83 ldlm_mode_t mdt_dlm_lock_modes[] = {
84         [MDL_MINMODE] = LCK_MINMODE,
85         [MDL_EX]      = LCK_EX,
86         [MDL_PW]      = LCK_PW,
87         [MDL_PR]      = LCK_PR,
88         [MDL_CW]      = LCK_CW,
89         [MDL_CR]      = LCK_CR,
90         [MDL_NL]      = LCK_NL,
91         [MDL_GROUP]   = LCK_GROUP
92 };
93
94 static struct mdt_device *mdt_dev(struct lu_device *d);
95 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
96 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
97                         struct getinfo_fid2path *fp);
98
99 static const struct lu_object_operations mdt_obj_ops;
100
101 /* Slab for MDT object allocation */
102 static cfs_mem_cache_t *mdt_object_kmem;
103
104 static struct lu_kmem_descr mdt_caches[] = {
105         {
106                 .ckd_cache = &mdt_object_kmem,
107                 .ckd_name  = "mdt_obj",
108                 .ckd_size  = sizeof(struct mdt_object)
109         },
110         {
111                 .ckd_cache = NULL
112         }
113 };
114
115 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
116 {
117         if (!rep)
118                 return 0;
119         return (rep->lock_policy_res1 & flag);
120 }
121
122 void mdt_clear_disposition(struct mdt_thread_info *info,
123                            struct ldlm_reply *rep, int flag)
124 {
125         if (info)
126                 info->mti_opdata &= ~flag;
127         if (rep)
128                 rep->lock_policy_res1 &= ~flag;
129 }
130
131 void mdt_set_disposition(struct mdt_thread_info *info,
132                          struct ldlm_reply *rep, int flag)
133 {
134         if (info)
135                 info->mti_opdata |= flag;
136         if (rep)
137                 rep->lock_policy_res1 |= flag;
138 }
139
140 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
141 {
142         lh->mlh_pdo_hash = 0;
143         lh->mlh_reg_mode = lm;
144         lh->mlh_rreg_mode = lm;
145         lh->mlh_type = MDT_REG_LOCK;
146 }
147
148 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
149                        const char *name, int namelen)
150 {
151         lh->mlh_reg_mode = lm;
152         lh->mlh_rreg_mode = lm;
153         lh->mlh_type = MDT_PDO_LOCK;
154
155         if (name != NULL && (name[0] != '\0')) {
156                 LASSERT(namelen > 0);
157                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
158         } else {
159                 LASSERT(namelen == 0);
160                 lh->mlh_pdo_hash = 0ull;
161         }
162 }
163
164 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
165                               struct mdt_lock_handle *lh)
166 {
167         mdl_mode_t mode;
168         ENTRY;
169
170         /*
171          * Any dir access needs couple of locks:
172          *
173          * 1) on part of dir we gonna take lookup/modify;
174          *
175          * 2) on whole dir to protect it from concurrent splitting and/or to
176          * flush client's cache for readdir().
177          *
178          * so, for a given mode and object this routine decides what lock mode
179          * to use for lock #2:
180          *
181          * 1) if caller's gonna lookup in dir then we need to protect dir from
182          * being splitted only - LCK_CR
183          *
184          * 2) if caller's gonna modify dir then we need to protect dir from
185          * being splitted and to flush cache - LCK_CW
186          *
187          * 3) if caller's gonna modify dir and that dir seems ready for
188          * splitting then we need to protect it from any type of access
189          * (lookup/modify/split) - LCK_EX --bzzz
190          */
191
192         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
193         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
194
195         /*
196          * Ask underlaying level its opinion about preferable PDO lock mode
197          * having access type passed as regular lock mode:
198          *
199          * - MDL_MINMODE means that lower layer does not want to specify lock
200          * mode;
201          *
202          * - MDL_NL means that no PDO lock should be taken. This is used in some
203          * cases. Say, for non-splittable directories no need to use PDO locks
204          * at all.
205          */
206         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
207                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
208
209         if (mode != MDL_MINMODE) {
210                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
211         } else {
212                 /*
213                  * Lower layer does not want to specify locking mode. We do it
214                  * our selves. No special protection is needed, just flush
215                  * client's cache on modification and allow concurrent
216                  * mondification.
217                  */
218                 switch (lh->mlh_reg_mode) {
219                 case LCK_EX:
220                         lh->mlh_pdo_mode = LCK_EX;
221                         break;
222                 case LCK_PR:
223                         lh->mlh_pdo_mode = LCK_CR;
224                         break;
225                 case LCK_PW:
226                         lh->mlh_pdo_mode = LCK_CW;
227                         break;
228                 default:
229                         CERROR("Not expected lock type (0x%x)\n",
230                                (int)lh->mlh_reg_mode);
231                         LBUG();
232                 }
233         }
234
235         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
236         EXIT;
237 }
238
239 int mdt_getstatus(struct mdt_thread_info *info)
240 {
241         struct mdt_device *mdt  = info->mti_mdt;
242         struct md_device  *next = mdt->mdt_child;
243         struct mdt_body   *repbody;
244         int                rc;
245
246         ENTRY;
247
248         rc = mdt_check_ucred(info);
249         if (rc)
250                 RETURN(err_serious(rc));
251
252         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
253                 RETURN(err_serious(-ENOMEM));
254
255         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
256         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
257         if (rc != 0)
258                 RETURN(rc);
259
260         repbody->valid |= OBD_MD_FLID;
261
262         if (mdt->mdt_opts.mo_mds_capa &&
263             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
264                 struct mdt_object  *root;
265                 struct lustre_capa *capa;
266
267                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
268                 if (IS_ERR(root))
269                         RETURN(PTR_ERR(root));
270
271                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
272                 LASSERT(capa);
273                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
274                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
275                                  0);
276                 mdt_object_put(info->mti_env, root);
277                 if (rc == 0)
278                         repbody->valid |= OBD_MD_FLMDSCAPA;
279         }
280
281         RETURN(rc);
282 }
283
284 int mdt_statfs(struct mdt_thread_info *info)
285 {
286         struct ptlrpc_request           *req = mdt_info_req(info);
287         struct md_device                *next = info->mti_mdt->mdt_child;
288         struct ptlrpc_service_part      *svcpt;
289         struct obd_statfs               *osfs;
290         int                             rc;
291
292         ENTRY;
293
294         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
295
296         /* This will trigger a watchdog timeout */
297         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
298                          (MDT_SERVICE_WATCHDOG_FACTOR *
299                           at_get(&svcpt->scp_at_estimate)) + 1);
300
301         rc = mdt_check_ucred(info);
302         if (rc)
303                 RETURN(err_serious(rc));
304
305         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
306                 RETURN(err_serious(-ENOMEM));
307
308         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
309         if (!osfs)
310                 RETURN(-EPROTO);
311
312         /** statfs information are cached in the mdt_device */
313         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
314                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
315                 /** statfs data is too old, get up-to-date one */
316                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
317                 if (rc)
318                         RETURN(rc);
319                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
320                 info->mti_mdt->mdt_osfs = *osfs;
321                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
322                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
323         } else {
324                 /** use cached statfs data */
325                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
326                 *osfs = info->mti_mdt->mdt_osfs;
327                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
328         }
329
330         if (rc == 0)
331                 mdt_counter_incr(req, LPROC_MDT_STATFS);
332
333         RETURN(rc);
334 }
335
336 /**
337  * Pack SOM attributes into the reply.
338  * Call under a DLM UPDATE lock.
339  */
340 static void mdt_pack_size2body(struct mdt_thread_info *info,
341                                struct mdt_object *mo)
342 {
343         struct mdt_body *b;
344         struct md_attr *ma = &info->mti_attr;
345
346         LASSERT(ma->ma_attr.la_valid & LA_MODE);
347         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
348
349         /* Check if Size-on-MDS is supported, if this is a regular file,
350          * if SOM is enabled on the object and if SOM cache exists and valid.
351          * Otherwise do not pack Size-on-MDS attributes to the reply. */
352         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
353             !S_ISREG(ma->ma_attr.la_mode) ||
354             !mdt_object_is_som_enabled(mo) ||
355             !(ma->ma_valid & MA_SOM))
356                 return;
357
358         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
359         b->size = ma->ma_som->msd_size;
360         b->blocks = ma->ma_som->msd_blocks;
361 }
362
363 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
364                         const struct lu_attr *attr, const struct lu_fid *fid)
365 {
366         struct md_attr *ma = &info->mti_attr;
367
368         LASSERT(ma->ma_valid & MA_INODE);
369
370         b->atime      = attr->la_atime;
371         b->mtime      = attr->la_mtime;
372         b->ctime      = attr->la_ctime;
373         b->mode       = attr->la_mode;
374         b->size       = attr->la_size;
375         b->blocks     = attr->la_blocks;
376         b->uid        = attr->la_uid;
377         b->gid        = attr->la_gid;
378         b->flags      = attr->la_flags;
379         b->nlink      = attr->la_nlink;
380         b->rdev       = attr->la_rdev;
381
382         /*XXX should pack the reply body according to lu_valid*/
383         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
384                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
385                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
386                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
387
388         if (!S_ISREG(attr->la_mode)) {
389                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
390         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
391                 /* means no objects are allocated on osts. */
392                 LASSERT(!(ma->ma_valid & MA_LOV));
393                 /* just ignore blocks occupied by extend attributes on MDS */
394                 b->blocks = 0;
395                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
396                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
397         }
398
399         if (fid) {
400                 b->fid1 = *fid;
401                 b->valid |= OBD_MD_FLID;
402                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
403                                 PFID(fid), b->nlink, b->mode, b->size);
404         }
405
406         if (info)
407                 mdt_body_reverse_idmap(info, b);
408
409         if (b->valid & OBD_MD_FLSIZE)
410                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
411                        PFID(fid), (unsigned long long)b->size);
412 }
413
414 static inline int mdt_body_has_lov(const struct lu_attr *la,
415                                    const struct mdt_body *body)
416 {
417         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
418                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
419 }
420
421 void mdt_client_compatibility(struct mdt_thread_info *info)
422 {
423         struct mdt_body       *body;
424         struct ptlrpc_request *req = mdt_info_req(info);
425         struct obd_export     *exp = req->rq_export;
426         struct md_attr        *ma = &info->mti_attr;
427         struct lu_attr        *la = &ma->ma_attr;
428         ENTRY;
429
430         if (exp_connect_layout(exp))
431                 /* the client can deal with 16-bit lmm_stripe_count */
432                 RETURN_EXIT;
433
434         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
435
436         if (!mdt_body_has_lov(la, body))
437                 RETURN_EXIT;
438
439         /* now we have a reply with a lov for a client not compatible with the
440          * layout lock so we have to clean the layout generation number */
441         if (S_ISREG(la->la_mode))
442                 ma->ma_lmm->lmm_layout_gen = 0;
443         EXIT;
444 }
445
446 static int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
447                              char *name)
448 {
449         const struct lu_env *env = info->mti_env;
450         int rc;
451         ENTRY;
452
453         LASSERT(info->mti_big_lmm_used == 0);
454         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
455         if (rc < 0)
456                 RETURN(rc);
457
458         /* big_lmm may need to be grown */
459         if (info->mti_big_lmmsize < rc) {
460                 int size = size_roundup_power2(rc);
461
462                 if (info->mti_big_lmmsize > 0) {
463                         /* free old buffer */
464                         LASSERT(info->mti_big_lmm);
465                         OBD_FREE_LARGE(info->mti_big_lmm,
466                                        info->mti_big_lmmsize);
467                         info->mti_big_lmm = NULL;
468                         info->mti_big_lmmsize = 0;
469                 }
470
471                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
472                 if (info->mti_big_lmm == NULL)
473                         RETURN(-ENOMEM);
474                 info->mti_big_lmmsize = size;
475         }
476         LASSERT(info->mti_big_lmmsize >= rc);
477
478         info->mti_buf.lb_buf = info->mti_big_lmm;
479         info->mti_buf.lb_len = info->mti_big_lmmsize;
480         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
481
482         RETURN(rc);
483 }
484
485 int mdt_attr_get_lov(struct mdt_thread_info *info,
486                      struct mdt_object *o, struct md_attr *ma)
487 {
488         struct md_object *next = mdt_object_child(o);
489         struct lu_buf    *buf = &info->mti_buf;
490         int rc;
491
492         buf->lb_buf = ma->ma_lmm;
493         buf->lb_len = ma->ma_lmm_size;
494         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
495         if (rc > 0) {
496                 ma->ma_lmm_size = rc;
497                 ma->ma_valid |= MA_LOV;
498                 rc = 0;
499         } else if (rc == -ENODATA) {
500                 /* no LOV EA */
501                 rc = 0;
502         } else if (rc == -ERANGE) {
503                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LOV);
504                 if (rc > 0) {
505                         info->mti_big_lmm_used = 1;
506                         ma->ma_valid |= MA_LOV;
507                         ma->ma_lmm = info->mti_big_lmm;
508                         ma->ma_lmm_size = rc;
509                         /* update mdt_max_mdsize so all clients
510                          * will be aware about that */
511                         if (info->mti_mdt->mdt_max_mdsize < rc)
512                                 info->mti_mdt->mdt_max_mdsize = rc;
513                         rc = 0;
514                 }
515         }
516
517         return rc;
518 }
519
520 int mdt_attr_get_pfid(struct mdt_thread_info *info,
521                       struct mdt_object *o, struct lu_fid *pfid)
522 {
523         struct lu_buf           *buf = &info->mti_buf;
524         struct link_ea_header   *leh;
525         struct link_ea_entry    *lee;
526         int                      rc;
527         ENTRY;
528
529         buf->lb_buf = info->mti_big_lmm;
530         buf->lb_len = info->mti_big_lmmsize;
531         rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
532                           buf, XATTR_NAME_LINK);
533         /* ignore errors, MA_PFID won't be set and it is
534          * up to the caller to treat this as an error */
535         if (rc == -ERANGE || buf->lb_len == 0) {
536                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
537                 buf->lb_buf = info->mti_big_lmm;
538                 buf->lb_len = info->mti_big_lmmsize;
539         }
540
541         if (rc < 0)
542                 RETURN(rc);
543         if (rc < sizeof(*leh)) {
544                 CERROR("short LinkEA on "DFID": rc = %d\n",
545                        PFID(mdt_object_fid(o)), rc);
546                 RETURN(-ENODATA);
547         }
548
549         leh = (struct link_ea_header *) buf->lb_buf;
550         lee = (struct link_ea_entry *)(leh + 1);
551         if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
552                 leh->leh_magic = LINK_EA_MAGIC;
553                 leh->leh_reccount = __swab32(leh->leh_reccount);
554                 leh->leh_len = __swab64(leh->leh_len);
555         }
556         if (leh->leh_magic != LINK_EA_MAGIC)
557                 RETURN(-EINVAL);
558         if (leh->leh_reccount == 0)
559                 RETURN(-ENODATA);
560
561         memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
562         fid_be_to_cpu(pfid, pfid);
563
564         RETURN(0);
565 }
566
567 int mdt_attr_get_complex(struct mdt_thread_info *info,
568                          struct mdt_object *o, struct md_attr *ma)
569 {
570         const struct lu_env *env = info->mti_env;
571         struct md_object    *next = mdt_object_child(o);
572         struct lu_buf       *buf = &info->mti_buf;
573         u32                  mode = lu_object_attr(&next->mo_lu);
574         int                  need = ma->ma_need;
575         int                  rc = 0, rc2;
576         ENTRY;
577
578         ma->ma_valid = 0;
579
580         if (need & MA_INODE) {
581                 ma->ma_need = MA_INODE;
582                 rc = mo_attr_get(env, next, ma);
583                 if (rc)
584                         GOTO(out, rc);
585                 ma->ma_valid |= MA_INODE;
586         }
587
588         if (need & MA_PFID) {
589                 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
590                 if (rc == 0)
591                         ma->ma_valid |= MA_PFID;
592                 /* ignore this error, parent fid is not mandatory */
593                 rc = 0;
594         }
595
596         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
597                 rc = mdt_attr_get_lov(info, o, ma);
598                 if (rc)
599                         GOTO(out, rc);
600         }
601
602         if (need & MA_LMV && S_ISDIR(mode)) {
603                 buf->lb_buf = ma->ma_lmv;
604                 buf->lb_len = ma->ma_lmv_size;
605                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
606                 if (rc2 > 0) {
607                         ma->ma_lmv_size = rc2;
608                         ma->ma_valid |= MA_LMV;
609                 } else if (rc2 == -ENODATA) {
610                         /* no LMV EA */
611                         ma->ma_lmv_size = 0;
612                 } else
613                         GOTO(out, rc = rc2);
614         }
615
616         if (need & MA_SOM && S_ISREG(mode)) {
617                 buf->lb_buf = info->mti_xattr_buf;
618                 buf->lb_len = sizeof(info->mti_xattr_buf);
619                 CLASSERT(sizeof(struct som_attrs) <=
620                          sizeof(info->mti_xattr_buf));
621                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_SOM);
622                 rc2 = lustre_buf2som(info->mti_xattr_buf, rc2, ma->ma_som);
623                 if (rc2 == 0)
624                         ma->ma_valid |= MA_SOM;
625                 else if (rc2 < 0 && rc2 != -ENODATA)
626                         GOTO(out, rc = rc2);
627         }
628
629         if (need & MA_HSM && S_ISREG(mode)) {
630                 buf->lb_buf = info->mti_xattr_buf;
631                 buf->lb_len = sizeof(info->mti_xattr_buf);
632                 CLASSERT(sizeof(struct hsm_attrs) <=
633                          sizeof(info->mti_xattr_buf));
634                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM);
635                 rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm);
636                 if (rc2 == 0)
637                         ma->ma_valid |= MA_HSM;
638                 else if (rc2 < 0 && rc2 != -ENODATA)
639                         GOTO(out, rc = rc2);
640         }
641
642 #ifdef CONFIG_FS_POSIX_ACL
643         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
644                 buf->lb_buf = ma->ma_acl;
645                 buf->lb_len = ma->ma_acl_size;
646                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
647                 if (rc2 > 0) {
648                         ma->ma_acl_size = rc2;
649                         ma->ma_valid |= MA_ACL_DEF;
650                 } else if (rc2 == -ENODATA) {
651                         /* no ACLs */
652                         ma->ma_acl_size = 0;
653                 } else
654                         GOTO(out, rc = rc2);
655         }
656 #endif
657 out:
658         ma->ma_need = need;
659         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
660                rc, ma->ma_valid, ma->ma_lmm);
661         RETURN(rc);
662 }
663
664 static int mdt_getattr_internal(struct mdt_thread_info *info,
665                                 struct mdt_object *o, int ma_need)
666 {
667         struct md_object        *next = mdt_object_child(o);
668         const struct mdt_body   *reqbody = info->mti_body;
669         struct ptlrpc_request   *req = mdt_info_req(info);
670         struct md_attr          *ma = &info->mti_attr;
671         struct lu_attr          *la = &ma->ma_attr;
672         struct req_capsule      *pill = info->mti_pill;
673         const struct lu_env     *env = info->mti_env;
674         struct mdt_body         *repbody;
675         struct lu_buf           *buffer = &info->mti_buf;
676         int                     rc;
677         int                     is_root;
678         ENTRY;
679
680         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
681                 RETURN(err_serious(-ENOMEM));
682
683         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
684
685         ma->ma_valid = 0;
686
687         if (mdt_object_remote(o)) {
688                 /* This object is located on remote node.*/
689                 /* Return -EIO for old client */
690                 if (!mdt_is_dne_client(req->rq_export))
691                         GOTO(out, rc = -EIO);
692
693                 repbody->fid1 = *mdt_object_fid(o);
694                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
695                 GOTO(out, rc = 0);
696         }
697
698         buffer->lb_len = reqbody->eadatasize;
699         if (buffer->lb_len > 0)
700                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
701         else
702                 buffer->lb_buf = NULL;
703
704         /* If it is dir object and client require MEA, then we got MEA */
705         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
706             reqbody->valid & OBD_MD_MEA) {
707                 /* Assumption: MDT_MD size is enough for lmv size. */
708                 ma->ma_lmv = buffer->lb_buf;
709                 ma->ma_lmv_size = buffer->lb_len;
710                 ma->ma_need = MA_LMV | MA_INODE;
711         } else {
712                 ma->ma_lmm = buffer->lb_buf;
713                 ma->ma_lmm_size = buffer->lb_len;
714                 ma->ma_need = MA_LOV | MA_INODE | MA_HSM;
715         }
716
717         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
718             reqbody->valid & OBD_MD_FLDIREA  &&
719             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
720                 /* get default stripe info for this dir. */
721                 ma->ma_need |= MA_LOV_DEF;
722         }
723         ma->ma_need |= ma_need;
724         if (ma->ma_need & MA_SOM)
725                 ma->ma_som = &info->mti_u.som.data;
726
727         rc = mdt_attr_get_complex(info, o, ma);
728         if (unlikely(rc)) {
729                 CERROR("getattr error for "DFID": %d\n",
730                         PFID(mdt_object_fid(o)), rc);
731                 RETURN(rc);
732         }
733
734         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
735
736         /* the Lustre protocol supposes to return default striping
737          * on the user-visible root if explicitly requested */
738         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
739             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
740                 struct lu_fid      rootfid;
741                 struct mdt_object *root;
742                 struct mdt_device *mdt = info->mti_mdt;
743
744                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
745                 if (rc)
746                         RETURN(rc);
747                 root = mdt_object_find(env, mdt, &rootfid);
748                 if (IS_ERR(root))
749                         RETURN(PTR_ERR(root));
750                 rc = mdt_attr_get_lov(info, root, ma);
751                 mdt_object_put(info->mti_env, root);
752                 if (unlikely(rc)) {
753                         CERROR("getattr error for "DFID": %d\n",
754                                         PFID(mdt_object_fid(o)), rc);
755                         RETURN(rc);
756                 }
757         }
758
759         if (likely(ma->ma_valid & MA_INODE))
760                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
761         else
762                 RETURN(-EFAULT);
763
764         if (mdt_body_has_lov(la, reqbody)) {
765                 if (ma->ma_valid & MA_LOV) {
766                         LASSERT(ma->ma_lmm_size);
767                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
768                         repbody->eadatasize = ma->ma_lmm_size;
769                         if (S_ISDIR(la->la_mode))
770                                 repbody->valid |= OBD_MD_FLDIREA;
771                         else
772                                 repbody->valid |= OBD_MD_FLEASIZE;
773                 }
774                 if (ma->ma_valid & MA_LMV) {
775                         LASSERT(S_ISDIR(la->la_mode));
776                         repbody->eadatasize = ma->ma_lmv_size;
777                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
778                 }
779         } else if (S_ISLNK(la->la_mode) &&
780                    reqbody->valid & OBD_MD_LINKNAME) {
781                 buffer->lb_buf = ma->ma_lmm;
782                 /* eadatasize from client includes NULL-terminator, so
783                  * there is no need to read it */
784                 buffer->lb_len = reqbody->eadatasize - 1;
785                 rc = mo_readlink(env, next, buffer);
786                 if (unlikely(rc <= 0)) {
787                         CERROR("readlink failed: %d\n", rc);
788                         rc = -EFAULT;
789                 } else {
790                         int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
791
792                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
793                                 rc -= 2;
794                         repbody->valid |= OBD_MD_LINKNAME;
795                         /* we need to report back size with NULL-terminator
796                          * because client expects that */
797                         repbody->eadatasize = rc + 1;
798                         if (repbody->eadatasize != reqbody->eadatasize)
799                                 CERROR("Read shorter symlink %d, expected %d\n",
800                                        rc, reqbody->eadatasize - 1);
801                         /* NULL terminate */
802                         ((char *)ma->ma_lmm)[rc] = 0;
803
804                         /* If the total CDEBUG() size is larger than a page, it
805                          * will print a warning to the console, avoid this by
806                          * printing just the last part of the symlink. */
807                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
808                                print_limit < rc ? "..." : "", print_limit,
809                                (char *)ma->ma_lmm + rc - print_limit, rc);
810                         rc = 0;
811                 }
812         }
813
814         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
815                 repbody->max_cookiesize = 0;
816                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
817                 repbody->valid |= OBD_MD_FLMODEASIZE;
818                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
819                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
820                        repbody->max_cookiesize);
821         }
822
823         if (exp_connect_rmtclient(info->mti_exp) &&
824             reqbody->valid & OBD_MD_FLRMTPERM) {
825                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
826
827                 /* mdt_getattr_lock only */
828                 rc = mdt_pack_remote_perm(info, o, buf);
829                 if (rc) {
830                         repbody->valid &= ~OBD_MD_FLRMTPERM;
831                         repbody->aclsize = 0;
832                         RETURN(rc);
833                 } else {
834                         repbody->valid |= OBD_MD_FLRMTPERM;
835                         repbody->aclsize = sizeof(struct mdt_remote_perm);
836                 }
837         }
838 #ifdef CONFIG_FS_POSIX_ACL
839         else if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
840                  (reqbody->valid & OBD_MD_FLACL)) {
841                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
842                 buffer->lb_len = req_capsule_get_size(pill,
843                                                       &RMF_ACL, RCL_SERVER);
844                 if (buffer->lb_len > 0) {
845                         rc = mo_xattr_get(env, next, buffer,
846                                           XATTR_NAME_ACL_ACCESS);
847                         if (rc < 0) {
848                                 if (rc == -ENODATA) {
849                                         repbody->aclsize = 0;
850                                         repbody->valid |= OBD_MD_FLACL;
851                                         rc = 0;
852                                 } else if (rc == -EOPNOTSUPP) {
853                                         rc = 0;
854                                 } else {
855                                         CERROR("got acl size: %d\n", rc);
856                                 }
857                         } else {
858                                 repbody->aclsize = rc;
859                                 repbody->valid |= OBD_MD_FLACL;
860                                 rc = 0;
861                         }
862                 }
863         }
864 #endif
865
866         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
867             info->mti_mdt->mdt_opts.mo_mds_capa &&
868             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
869                 struct lustre_capa *capa;
870
871                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
872                 LASSERT(capa);
873                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
874                 rc = mo_capa_get(env, next, capa, 0);
875                 if (rc)
876                         RETURN(rc);
877                 repbody->valid |= OBD_MD_FLMDSCAPA;
878         }
879
880 out:
881         if (rc == 0)
882                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
883
884         RETURN(rc);
885 }
886
887 static int mdt_renew_capa(struct mdt_thread_info *info)
888 {
889         struct mdt_object  *obj = info->mti_object;
890         struct mdt_body    *body;
891         struct lustre_capa *capa, *c;
892         int rc;
893         ENTRY;
894
895         /* if object doesn't exist, or server has disabled capability,
896          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
897          * flag not set.
898          */
899         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
900             !(exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA))
901                 RETURN(0);
902
903         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
904         LASSERT(body != NULL);
905
906         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
907         LASSERT(c);
908
909         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
910         LASSERT(capa);
911
912         *capa = *c;
913         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
914         if (rc == 0)
915                 body->valid |= OBD_MD_FLOSSCAPA;
916         RETURN(rc);
917 }
918
919 int mdt_getattr(struct mdt_thread_info *info)
920 {
921         struct mdt_object       *obj = info->mti_object;
922         struct req_capsule      *pill = info->mti_pill;
923         struct mdt_body         *reqbody;
924         struct mdt_body         *repbody;
925         mode_t                   mode;
926         int rc, rc2;
927         ENTRY;
928
929         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
930         LASSERT(reqbody);
931
932         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
933                 rc = req_capsule_server_pack(pill);
934                 if (unlikely(rc))
935                         RETURN(err_serious(rc));
936                 rc = mdt_renew_capa(info);
937                 GOTO(out_shrink, rc);
938         }
939
940         LASSERT(obj != NULL);
941         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
942
943         mode = lu_object_attr(&obj->mot_obj.mo_lu);
944
945         /* old clients may not report needed easize, use max value then */
946         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
947                              reqbody->eadatasize == 0 ?
948                              info->mti_mdt->mdt_max_mdsize :
949                              reqbody->eadatasize);
950
951         rc = req_capsule_server_pack(pill);
952         if (unlikely(rc != 0))
953                 RETURN(err_serious(rc));
954
955         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
956         LASSERT(repbody != NULL);
957         repbody->eadatasize = 0;
958         repbody->aclsize = 0;
959
960         if (reqbody->valid & OBD_MD_FLRMTPERM)
961                 rc = mdt_init_ucred(info, reqbody);
962         else
963                 rc = mdt_check_ucred(info);
964         if (unlikely(rc))
965                 GOTO(out_shrink, rc);
966
967         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
968
969         /*
970          * Don't check capability at all, because rename might getattr for
971          * remote obj, and at that time no capability is available.
972          */
973         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
974         rc = mdt_getattr_internal(info, obj, 0);
975         if (reqbody->valid & OBD_MD_FLRMTPERM)
976                 mdt_exit_ucred(info);
977         EXIT;
978 out_shrink:
979         mdt_client_compatibility(info);
980         rc2 = mdt_fix_reply(info);
981         if (rc == 0)
982                 rc = rc2;
983         return rc;
984 }
985
986 int mdt_is_subdir(struct mdt_thread_info *info)
987 {
988         struct mdt_object     *o = info->mti_object;
989         struct req_capsule    *pill = info->mti_pill;
990         const struct mdt_body *body = info->mti_body;
991         struct mdt_body       *repbody;
992         int                    rc;
993         ENTRY;
994
995         LASSERT(o != NULL);
996
997         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
998
999         /*
1000          * We save last checked parent fid to @repbody->fid1 for remote
1001          * directory case.
1002          */
1003         LASSERT(fid_is_sane(&body->fid2));
1004         LASSERT(mdt_object_exists(o) && !mdt_object_remote(o));
1005         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1006                            &body->fid2, &repbody->fid1);
1007         if (rc == 0 || rc == -EREMOTE)
1008                 repbody->valid |= OBD_MD_FLID;
1009
1010         RETURN(rc);
1011 }
1012
1013 int mdt_swap_layouts(struct mdt_thread_info *info)
1014 {
1015         struct ptlrpc_request   *req = mdt_info_req(info);
1016         struct obd_export       *exp = req->rq_export;
1017         struct mdt_object       *o1, *o2, *o;
1018         struct mdt_lock_handle  *lh1, *lh2;
1019         struct mdc_swap_layouts *msl;
1020         int                      rc;
1021         ENTRY;
1022
1023         /* client does not support layout lock, so layout swaping
1024          * is disabled.
1025          * FIXME: there is a problem for old clients which don't support
1026          * layout lock yet. If those clients have already opened the file
1027          * they won't be notified at all so that old layout may still be
1028          * used to do IO. This can be fixed after file release is landed by
1029          * doing exclusive open and taking full EX ibits lock. - Jinshan */
1030         if (!exp_connect_layout(exp))
1031                 RETURN(-EOPNOTSUPP);
1032
1033         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA1, RCL_CLIENT))
1034                 mdt_set_capainfo(info, 0, &info->mti_body->fid1,
1035                                  req_capsule_client_get(info->mti_pill,
1036                                                         &RMF_CAPA1));
1037
1038         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA2, RCL_CLIENT))
1039                 mdt_set_capainfo(info, 1, &info->mti_body->fid2,
1040                                  req_capsule_client_get(info->mti_pill,
1041                                                         &RMF_CAPA2));
1042
1043         o1 = info->mti_object;
1044         o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
1045                                 &info->mti_body->fid2);
1046         if (IS_ERR(o))
1047                 GOTO(out, rc = PTR_ERR(o));
1048
1049         if (mdt_object_remote(o) || !mdt_object_exists(o)) /* remote object */
1050                 GOTO(put, rc = -ENOENT);
1051
1052         rc = lu_fid_cmp(&info->mti_body->fid1, &info->mti_body->fid2);
1053         if (unlikely(rc == 0)) /* same file, you kidding me? no-op. */
1054                 GOTO(put, rc);
1055
1056         if (rc < 0)
1057                 swap(o1, o2);
1058
1059         /* permission check. Make sure the calling process having permission
1060          * to write both files. */
1061         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
1062                                 MAY_WRITE);
1063         if (rc < 0)
1064                 GOTO(put, rc);
1065
1066         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
1067                                 MAY_WRITE);
1068         if (rc < 0)
1069                 GOTO(put, rc);
1070
1071         msl = req_capsule_client_get(info->mti_pill, &RMF_SWAP_LAYOUTS);
1072         LASSERT(msl != NULL);
1073
1074         lh1 = &info->mti_lh[MDT_LH_NEW];
1075         mdt_lock_reg_init(lh1, LCK_EX);
1076         lh2 = &info->mti_lh[MDT_LH_OLD];
1077         mdt_lock_reg_init(lh2, LCK_EX);
1078
1079         rc = mdt_object_lock(info, o1, lh1, MDS_INODELOCK_LAYOUT,
1080                              MDT_LOCAL_LOCK);
1081         if (rc < 0)
1082                 GOTO(put, rc);
1083
1084         rc = mdt_object_lock(info, o2, lh2, MDS_INODELOCK_LAYOUT,
1085                              MDT_LOCAL_LOCK);
1086         if (rc < 0)
1087                 GOTO(unlock1, rc);
1088
1089         rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
1090                              mdt_object_child(o2), msl->msl_flags);
1091         GOTO(unlock2, rc);
1092 unlock2:
1093         mdt_object_unlock(info, o2, lh2, rc);
1094 unlock1:
1095         mdt_object_unlock(info, o1, lh1, rc);
1096 put:
1097         mdt_object_put(info->mti_env, o);
1098 out:
1099         RETURN(rc);
1100 }
1101
1102 static int mdt_raw_lookup(struct mdt_thread_info *info,
1103                           struct mdt_object *parent,
1104                           const struct lu_name *lname,
1105                           struct ldlm_reply *ldlm_rep)
1106 {
1107         struct md_object *next = mdt_object_child(info->mti_object);
1108         const struct mdt_body *reqbody = info->mti_body;
1109         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1110         struct mdt_body *repbody;
1111         int rc;
1112         ENTRY;
1113
1114         if (reqbody->valid != OBD_MD_FLID)
1115                 RETURN(0);
1116
1117         LASSERT(!info->mti_cross_ref);
1118
1119         /* Only got the fid of this obj by name */
1120         fid_zero(child_fid);
1121         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1122                         &info->mti_spec);
1123 #if 0
1124         /* XXX is raw_lookup possible as intent operation? */
1125         if (rc != 0) {
1126                 if (rc == -ENOENT)
1127                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1128                 RETURN(rc);
1129         } else
1130                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1131
1132         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1133 #endif
1134         if (rc == 0) {
1135                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1136                 repbody->fid1 = *child_fid;
1137                 repbody->valid = OBD_MD_FLID;
1138         }
1139         RETURN(1);
1140 }
1141
1142 /*
1143  * UPDATE lock should be taken against parent, and be release before exit;
1144  * child_bits lock should be taken against child, and be returned back:
1145  *            (1)normal request should release the child lock;
1146  *            (2)intent request will grant the lock to client.
1147  */
1148 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1149                                  struct mdt_lock_handle *lhc,
1150                                  __u64 child_bits,
1151                                  struct ldlm_reply *ldlm_rep)
1152 {
1153         struct ptlrpc_request  *req       = mdt_info_req(info);
1154         struct mdt_body        *reqbody   = NULL;
1155         struct mdt_object      *parent    = info->mti_object;
1156         struct mdt_object      *child;
1157         struct md_object       *next      = mdt_object_child(parent);
1158         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1159         struct lu_name         *lname     = NULL;
1160         const char             *name      = NULL;
1161         int                     namelen   = 0;
1162         struct mdt_lock_handle *lhp       = NULL;
1163         struct ldlm_lock       *lock;
1164         struct ldlm_res_id     *res_id;
1165         int                     is_resent;
1166         int                     ma_need = 0;
1167         int                     rc;
1168
1169         ENTRY;
1170
1171         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1172         LASSERT(ergo(is_resent,
1173                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1174
1175         LASSERT(parent != NULL);
1176         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1177         if (name == NULL)
1178                 RETURN(err_serious(-EFAULT));
1179
1180         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1181                                        RCL_CLIENT) - 1;
1182         if (!info->mti_cross_ref) {
1183                 /*
1184                  * XXX: Check for "namelen == 0" is for getattr by fid
1185                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1186                  * that is the name must contain at least one character and
1187                  * the terminating '\0'
1188                  */
1189                 if (namelen == 0) {
1190                         reqbody = req_capsule_client_get(info->mti_pill,
1191                                                          &RMF_MDT_BODY);
1192                         if (unlikely(reqbody == NULL))
1193                                 RETURN(err_serious(-EFAULT));
1194
1195                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1196                                 RETURN(err_serious(-EINVAL));
1197
1198                         name = NULL;
1199                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1200                                "ldlm_rep = %p\n",
1201                                PFID(mdt_object_fid(parent)),
1202                                PFID(&reqbody->fid2), ldlm_rep);
1203                 } else {
1204                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1205                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1206                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1207                                name, ldlm_rep);
1208                 }
1209         }
1210         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1211
1212         if (unlikely(!mdt_object_exists(parent))) {
1213                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1214                                 &parent->mot_obj.mo_lu,
1215                                 "Parent doesn't exist!\n");
1216                 RETURN(-ESTALE);
1217         } else if (!info->mti_cross_ref) {
1218                 LASSERTF(!mdt_object_remote(parent),
1219                          "Parent "DFID" is on remote server\n",
1220                          PFID(mdt_object_fid(parent)));
1221         }
1222         if (lname) {
1223                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1224                 if (rc != 0) {
1225                         if (rc > 0)
1226                                 rc = 0;
1227                         RETURN(rc);
1228                 }
1229         }
1230
1231         if (info->mti_cross_ref) {
1232                 /* Only getattr on the child. Parent is on another node. */
1233                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1234                 child = parent;
1235                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1236                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1237
1238                 if (is_resent) {
1239                         /* Do not take lock for resent case. */
1240                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1241                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1242                                  lhc->mlh_reg_lh.cookie);
1243                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1244                                                 &lock->l_resource->lr_name));
1245                         LDLM_LOCK_PUT(lock);
1246                         rc = 0;
1247                 } else {
1248                         mdt_lock_handle_init(lhc);
1249                         mdt_lock_reg_init(lhc, LCK_PR);
1250
1251                         /*
1252                          * Object's name is on another MDS, no lookup lock is
1253                          * needed here but update is.
1254                          */
1255                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1256                         child_bits |= MDS_INODELOCK_PERM | MDS_INODELOCK_UPDATE;
1257
1258                         rc = mdt_object_lock(info, child, lhc, child_bits,
1259                                              MDT_LOCAL_LOCK);
1260                 }
1261                 if (rc == 0) {
1262                         /* Finally, we can get attr for child. */
1263                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1264                                          BYPASS_CAPA);
1265                         rc = mdt_getattr_internal(info, child, 0);
1266                         if (unlikely(rc != 0))
1267                                 mdt_object_unlock(info, child, lhc, 1);
1268                 }
1269                 RETURN(rc);
1270         }
1271
1272         if (lname) {
1273                 /* step 1: lock parent only if parent is a directory */
1274                 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1275                         lhp = &info->mti_lh[MDT_LH_PARENT];
1276                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1277                         rc = mdt_object_lock(info, parent, lhp,
1278                                              MDS_INODELOCK_UPDATE,
1279                                              MDT_LOCAL_LOCK);
1280                         if (unlikely(rc != 0))
1281                                 RETURN(rc);
1282                 }
1283
1284                 /* step 2: lookup child's fid by name */
1285                 fid_zero(child_fid);
1286                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1287                                 &info->mti_spec);
1288
1289                 if (rc != 0) {
1290                         if (rc == -ENOENT)
1291                                 mdt_set_disposition(info, ldlm_rep,
1292                                                     DISP_LOOKUP_NEG);
1293                         GOTO(out_parent, rc);
1294                 } else
1295                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1296         } else {
1297                 *child_fid = reqbody->fid2;
1298                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1299         }
1300
1301         /*
1302          *step 3: find the child object by fid & lock it.
1303          *        regardless if it is local or remote.
1304          */
1305         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1306
1307         if (unlikely(IS_ERR(child)))
1308                 GOTO(out_parent, rc = PTR_ERR(child));
1309         if (is_resent) {
1310                 /* Do not take lock for resent case. */
1311                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1312                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1313                          lhc->mlh_reg_lh.cookie);
1314
1315                 res_id = &lock->l_resource->lr_name;
1316                 if (!fid_res_name_eq(mdt_object_fid(child),
1317                                     &lock->l_resource->lr_name)) {
1318                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1319                                                  &lock->l_resource->lr_name),
1320                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1321                                  (unsigned long)res_id->name[0],
1322                                  (unsigned long)res_id->name[1],
1323                                  (unsigned long)res_id->name[2],
1324                                  PFID(mdt_object_fid(parent)));
1325                           CWARN("Although resent, but still not get child lock"
1326                                 "parent:"DFID" child:"DFID"\n",
1327                                 PFID(mdt_object_fid(parent)),
1328                                 PFID(mdt_object_fid(child)));
1329                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1330                           LDLM_LOCK_PUT(lock);
1331                           GOTO(relock, 0);
1332                 }
1333                 LDLM_LOCK_PUT(lock);
1334                 rc = 0;
1335         } else {
1336                 bool try_layout = false;
1337
1338 relock:
1339                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1340                 mdt_lock_handle_init(lhc);
1341                 mdt_lock_reg_init(lhc, LCK_PR);
1342
1343                 if (!mdt_object_exists(child)) {
1344                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1345                                         &child->mot_obj.mo_lu,
1346                                         "Object doesn't exist!\n");
1347                         GOTO(out_child, rc = -ENOENT);
1348                 }
1349
1350                 if (!(child_bits & MDS_INODELOCK_UPDATE) &&
1351                       mdt_object_exists(child) && !mdt_object_remote(child)) {
1352                         struct md_attr *ma = &info->mti_attr;
1353
1354                         ma->ma_valid = 0;
1355                         ma->ma_need = MA_INODE;
1356                         rc = mdt_attr_get_complex(info, child, ma);
1357                         if (unlikely(rc != 0))
1358                                 GOTO(out_child, rc);
1359
1360                         /* If the file has not been changed for some time, we
1361                          * return not only a LOOKUP lock, but also an UPDATE
1362                          * lock and this might save us RPC on later STAT. For
1363                          * directories, it also let negative dentry starts
1364                          * working for this dir. */
1365                         if (ma->ma_valid & MA_INODE &&
1366                             ma->ma_attr.la_valid & LA_CTIME &&
1367                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1368                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1369                                 child_bits |= MDS_INODELOCK_UPDATE;
1370                 }
1371
1372                 /* layout lock must be granted in a best-effort way
1373                  * for IT operations */
1374                 LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT));
1375                 if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) &&
1376                     exp_connect_layout(info->mti_exp) &&
1377                     S_ISREG(lu_object_attr(&child->mot_obj.mo_lu)) &&
1378                     ldlm_rep != NULL) {
1379                         /* try to grant layout lock for regular file. */
1380                         try_layout = true;
1381                 }
1382
1383                 rc = 0;
1384                 if (try_layout) {
1385                         child_bits |= MDS_INODELOCK_LAYOUT;
1386                         /* try layout lock, it may fail to be granted due to
1387                          * contention at LOOKUP or UPDATE */
1388                         if (!mdt_object_lock_try(info, child, lhc, child_bits,
1389                                                  MDT_CROSS_LOCK)) {
1390                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1391                                 LASSERT(child_bits != 0);
1392                                 rc = mdt_object_lock(info, child, lhc,
1393                                                 child_bits, MDT_CROSS_LOCK);
1394                         } else {
1395                                 ma_need |= MA_LOV;
1396                         }
1397                 } else {
1398                         rc = mdt_object_lock(info, child, lhc, child_bits,
1399                                                 MDT_CROSS_LOCK);
1400                 }
1401                 if (unlikely(rc != 0))
1402                         GOTO(out_child, rc);
1403         }
1404
1405         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1406         /* Get MA_SOM attributes if update lock is given. */
1407         if (lock &&
1408             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1409             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1410                 ma_need |= MA_SOM;
1411
1412         /* finally, we can get attr for child. */
1413         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1414         rc = mdt_getattr_internal(info, child, ma_need);
1415         if (unlikely(rc != 0)) {
1416                 mdt_object_unlock(info, child, lhc, 1);
1417         } else if (lock) {
1418                 /* Debugging code. */
1419                 res_id = &lock->l_resource->lr_name;
1420                 LDLM_DEBUG(lock, "Returning lock to client");
1421                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1422                                          &lock->l_resource->lr_name),
1423                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1424                          (unsigned long)res_id->name[0],
1425                          (unsigned long)res_id->name[1],
1426                          (unsigned long)res_id->name[2],
1427                          PFID(mdt_object_fid(child)));
1428                 if (mdt_object_exists(child) && !mdt_object_remote(child))
1429                         mdt_pack_size2body(info, child);
1430         }
1431         if (lock)
1432                 LDLM_LOCK_PUT(lock);
1433
1434         EXIT;
1435 out_child:
1436         mdt_object_put(info->mti_env, child);
1437 out_parent:
1438         if (lhp)
1439                 mdt_object_unlock(info, parent, lhp, 1);
1440         return rc;
1441 }
1442
1443 /* normal handler: should release the child lock */
1444 int mdt_getattr_name(struct mdt_thread_info *info)
1445 {
1446         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1447         struct mdt_body        *reqbody;
1448         struct mdt_body        *repbody;
1449         int rc, rc2;
1450         ENTRY;
1451
1452         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1453         LASSERT(reqbody != NULL);
1454         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1455         LASSERT(repbody != NULL);
1456
1457         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1458         repbody->eadatasize = 0;
1459         repbody->aclsize = 0;
1460
1461         rc = mdt_init_ucred(info, reqbody);
1462         if (unlikely(rc))
1463                 GOTO(out_shrink, rc);
1464
1465         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1466         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1467                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1468                 lhc->mlh_reg_lh.cookie = 0;
1469         }
1470         mdt_exit_ucred(info);
1471         EXIT;
1472 out_shrink:
1473         mdt_client_compatibility(info);
1474         rc2 = mdt_fix_reply(info);
1475         if (rc == 0)
1476                 rc = rc2;
1477         return rc;
1478 }
1479
1480 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1481                          void *karg, void *uarg);
1482
1483 int mdt_set_info(struct mdt_thread_info *info)
1484 {
1485         struct ptlrpc_request *req = mdt_info_req(info);
1486         char *key;
1487         void *val;
1488         int keylen, vallen, rc = 0;
1489         ENTRY;
1490
1491         rc = req_capsule_server_pack(info->mti_pill);
1492         if (rc)
1493                 RETURN(rc);
1494
1495         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1496         if (key == NULL) {
1497                 DEBUG_REQ(D_HA, req, "no set_info key");
1498                 RETURN(-EFAULT);
1499         }
1500
1501         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1502                                       RCL_CLIENT);
1503
1504         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1505         if (val == NULL) {
1506                 DEBUG_REQ(D_HA, req, "no set_info val");
1507                 RETURN(-EFAULT);
1508         }
1509
1510         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1511                                       RCL_CLIENT);
1512
1513         /* Swab any part of val you need to here */
1514         if (KEY_IS(KEY_READ_ONLY)) {
1515                 req->rq_status = 0;
1516                 lustre_msg_set_status(req->rq_repmsg, 0);
1517
1518                 spin_lock(&req->rq_export->exp_lock);
1519                 if (*(__u32 *)val)
1520                         *exp_connect_flags_ptr(req->rq_export) |=
1521                                 OBD_CONNECT_RDONLY;
1522                 else
1523                         *exp_connect_flags_ptr(req->rq_export) &=
1524                                 ~OBD_CONNECT_RDONLY;
1525                 spin_unlock(&req->rq_export->exp_lock);
1526
1527         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1528                 struct changelog_setinfo *cs =
1529                         (struct changelog_setinfo *)val;
1530                 if (vallen != sizeof(*cs)) {
1531                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1532                         RETURN(-EINVAL);
1533                 }
1534                 if (ptlrpc_req_need_swab(req)) {
1535                         __swab64s(&cs->cs_recno);
1536                         __swab32s(&cs->cs_id);
1537                 }
1538
1539                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1540                                    vallen, val, NULL);
1541                 lustre_msg_set_status(req->rq_repmsg, rc);
1542
1543         } else {
1544                 RETURN(-EINVAL);
1545         }
1546         RETURN(0);
1547 }
1548
1549 /**
1550  * Top-level handler for MDT connection requests.
1551  */
1552 int mdt_connect(struct mdt_thread_info *info)
1553 {
1554         int rc;
1555         struct obd_connect_data *reply;
1556         struct obd_export *exp;
1557         struct ptlrpc_request *req = mdt_info_req(info);
1558
1559         rc = target_handle_connect(req);
1560         if (rc != 0)
1561                 return err_serious(rc);
1562
1563         LASSERT(req->rq_export != NULL);
1564         info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1565         rc = mdt_init_sec_level(info);
1566         if (rc != 0) {
1567                 obd_disconnect(class_export_get(req->rq_export));
1568                 return rc;
1569         }
1570
1571         /* To avoid exposing partially initialized connection flags, changes up
1572          * to this point have been staged in reply->ocd_connect_flags. Now that
1573          * connection handling has completed successfully, atomically update
1574          * the connect flags in the shared export data structure. LU-1623 */
1575         reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
1576         exp = req->rq_export;
1577         spin_lock(&exp->exp_lock);
1578         *exp_connect_flags_ptr(exp) = reply->ocd_connect_flags;
1579         spin_unlock(&exp->exp_lock);
1580
1581         rc = mdt_init_idmap(info);
1582         if (rc != 0)
1583                 obd_disconnect(class_export_get(req->rq_export));
1584
1585         return rc;
1586 }
1587
1588 int mdt_disconnect(struct mdt_thread_info *info)
1589 {
1590         int rc;
1591         ENTRY;
1592
1593         rc = target_handle_disconnect(mdt_info_req(info));
1594         if (rc)
1595                 rc = err_serious(rc);
1596         RETURN(rc);
1597 }
1598
1599 static int mdt_sendpage(struct mdt_thread_info *info,
1600                         struct lu_rdpg *rdpg, int nob)
1601 {
1602         struct ptlrpc_request   *req = mdt_info_req(info);
1603         struct obd_export       *exp = req->rq_export;
1604         struct ptlrpc_bulk_desc *desc;
1605         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1606         int                      tmpcount;
1607         int                      tmpsize;
1608         int                      i;
1609         int                      rc;
1610         ENTRY;
1611
1612         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
1613                                     MDS_BULK_PORTAL);
1614         if (desc == NULL)
1615                 RETURN(-ENOMEM);
1616
1617         if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1618                 /* old client requires reply size in it's PAGE_SIZE,
1619                  * which is rdpg->rp_count */
1620                 nob = rdpg->rp_count;
1621
1622         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1623              i++, tmpcount -= tmpsize) {
1624                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1625                 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1626         }
1627
1628         LASSERT(desc->bd_nob == nob);
1629         rc = target_bulk_io(exp, desc, lwi);
1630         ptlrpc_free_bulk_pin(desc);
1631         RETURN(rc);
1632 }
1633
1634 int mdt_readpage(struct mdt_thread_info *info)
1635 {
1636         struct mdt_object *object = info->mti_object;
1637         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1638         struct mdt_body   *reqbody;
1639         struct mdt_body   *repbody;
1640         int                rc;
1641         int                i;
1642         ENTRY;
1643
1644         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1645                 RETURN(err_serious(-ENOMEM));
1646
1647         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1648         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1649         if (reqbody == NULL || repbody == NULL)
1650                 RETURN(err_serious(-EFAULT));
1651
1652         /*
1653          * prepare @rdpg before calling lower layers and transfer itself. Here
1654          * reqbody->size contains offset of where to start to read and
1655          * reqbody->nlink contains number bytes to read.
1656          */
1657         rdpg->rp_hash = reqbody->size;
1658         if (rdpg->rp_hash != reqbody->size) {
1659                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1660                        rdpg->rp_hash, reqbody->size);
1661                 RETURN(-EFAULT);
1662         }
1663
1664         rdpg->rp_attrs = reqbody->mode;
1665         if (exp_connect_flags(info->mti_exp) & OBD_CONNECT_64BITHASH)
1666                 rdpg->rp_attrs |= LUDA_64BITHASH;
1667         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1668                                 exp_max_brw_size(info->mti_exp));
1669         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1670                           CFS_PAGE_SHIFT;
1671         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1672         if (rdpg->rp_pages == NULL)
1673                 RETURN(-ENOMEM);
1674
1675         for (i = 0; i < rdpg->rp_npages; ++i) {
1676                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1677                 if (rdpg->rp_pages[i] == NULL)
1678                         GOTO(free_rdpg, rc = -ENOMEM);
1679         }
1680
1681         /* call lower layers to fill allocated pages with directory data */
1682         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1683         if (rc < 0)
1684                 GOTO(free_rdpg, rc);
1685
1686         /* send pages to client */
1687         rc = mdt_sendpage(info, rdpg, rc);
1688
1689         EXIT;
1690 free_rdpg:
1691
1692         for (i = 0; i < rdpg->rp_npages; i++)
1693                 if (rdpg->rp_pages[i] != NULL)
1694                         cfs_free_page(rdpg->rp_pages[i]);
1695         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1696
1697         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1698                 RETURN(0);
1699
1700         return rc;
1701 }
1702
1703 static int mdt_reint_internal(struct mdt_thread_info *info,
1704                               struct mdt_lock_handle *lhc,
1705                               __u32 op)
1706 {
1707         struct req_capsule      *pill = info->mti_pill;
1708         struct mdt_body         *repbody;
1709         int                      rc = 0, rc2;
1710         ENTRY;
1711
1712
1713         rc = mdt_reint_unpack(info, op);
1714         if (rc != 0) {
1715                 CERROR("Can't unpack reint, rc %d\n", rc);
1716                 RETURN(err_serious(rc));
1717         }
1718
1719         /* for replay (no_create) lmm is not needed, client has it already */
1720         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1721                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1722                                      info->mti_rr.rr_eadatalen);
1723
1724         /* llog cookies are always 0, the field is kept for compatibility */
1725         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1726                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1727
1728         rc = req_capsule_server_pack(pill);
1729         if (rc != 0) {
1730                 CERROR("Can't pack response, rc %d\n", rc);
1731                 RETURN(err_serious(rc));
1732         }
1733
1734         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1735                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1736                 LASSERT(repbody);
1737                 repbody->eadatasize = 0;
1738                 repbody->aclsize = 0;
1739         }
1740
1741         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1742
1743         /* for replay no cookkie / lmm need, because client have this already */
1744         if (info->mti_spec.no_create)
1745                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1746                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1747
1748         rc = mdt_init_ucred_reint(info);
1749         if (rc)
1750                 GOTO(out_shrink, rc);
1751
1752         rc = mdt_fix_attr_ucred(info, op);
1753         if (rc != 0)
1754                 GOTO(out_ucred, rc = err_serious(rc));
1755
1756         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1757                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1758                 GOTO(out_ucred, rc);
1759         }
1760         rc = mdt_reint_rec(info, lhc);
1761         EXIT;
1762 out_ucred:
1763         mdt_exit_ucred(info);
1764 out_shrink:
1765         mdt_client_compatibility(info);
1766         rc2 = mdt_fix_reply(info);
1767         if (rc == 0)
1768                 rc = rc2;
1769         return rc;
1770 }
1771
1772 static long mdt_reint_opcode(struct mdt_thread_info *info,
1773                              const struct req_format **fmt)
1774 {
1775         struct mdt_rec_reint *rec;
1776         long opc;
1777
1778         opc = err_serious(-EFAULT);
1779         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1780         if (rec != NULL) {
1781                 opc = rec->rr_opcode;
1782                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1783                 if (opc < REINT_MAX && fmt[opc] != NULL)
1784                         req_capsule_extend(info->mti_pill, fmt[opc]);
1785                 else {
1786                         CERROR("Unsupported opc: %ld\n", opc);
1787                         opc = err_serious(opc);
1788                 }
1789         }
1790         return opc;
1791 }
1792
1793 int mdt_reint(struct mdt_thread_info *info)
1794 {
1795         long opc;
1796         int  rc;
1797
1798         static const struct req_format *reint_fmts[REINT_MAX] = {
1799                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1800                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1801                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1802                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1803                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1804                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1805                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR,
1806                 [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK
1807         };
1808
1809         ENTRY;
1810
1811         opc = mdt_reint_opcode(info, reint_fmts);
1812         if (opc >= 0) {
1813                 /*
1814                  * No lock possible here from client to pass it to reint code
1815                  * path.
1816                  */
1817                 rc = mdt_reint_internal(info, NULL, opc);
1818         } else {
1819                 rc = opc;
1820         }
1821
1822         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1823         RETURN(rc);
1824 }
1825
1826 /* this should sync the whole device */
1827 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1828 {
1829         struct dt_device *dt = mdt->mdt_bottom;
1830         int rc;
1831         ENTRY;
1832
1833         rc = dt->dd_ops->dt_sync(env, dt);
1834         RETURN(rc);
1835 }
1836
1837 /* this should sync this object */
1838 static int mdt_object_sync(struct mdt_thread_info *info)
1839 {
1840         struct md_object *next;
1841         int rc;
1842         ENTRY;
1843
1844         if (!mdt_object_exists(info->mti_object)) {
1845                 CWARN("Non existing object  "DFID"!\n",
1846                       PFID(mdt_object_fid(info->mti_object)));
1847                 RETURN(-ESTALE);
1848         }
1849         next = mdt_object_child(info->mti_object);
1850         rc = mo_object_sync(info->mti_env, next);
1851
1852         RETURN(rc);
1853 }
1854
1855 int mdt_sync(struct mdt_thread_info *info)
1856 {
1857         struct ptlrpc_request *req = mdt_info_req(info);
1858         struct req_capsule *pill = info->mti_pill;
1859         struct mdt_body *body;
1860         int rc;
1861         ENTRY;
1862
1863         /* The fid may be zero, so we req_capsule_set manually */
1864         req_capsule_set(pill, &RQF_MDS_SYNC);
1865
1866         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1867         if (body == NULL)
1868                 RETURN(err_serious(-EINVAL));
1869
1870         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1871                 RETURN(err_serious(-ENOMEM));
1872
1873         if (fid_seq(&body->fid1) == 0) {
1874                 /* sync the whole device */
1875                 rc = req_capsule_server_pack(pill);
1876                 if (rc == 0)
1877                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1878                 else
1879                         rc = err_serious(rc);
1880         } else {
1881                 /* sync an object */
1882                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1883                 if (rc == 0) {
1884                         rc = mdt_object_sync(info);
1885                         if (rc == 0) {
1886                                 const struct lu_fid *fid;
1887                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1888
1889                                 info->mti_attr.ma_need = MA_INODE;
1890                                 info->mti_attr.ma_valid = 0;
1891                                 rc = mdt_attr_get_complex(info, info->mti_object,
1892                                                           &info->mti_attr);
1893                                 if (rc == 0) {
1894                                         body = req_capsule_server_get(pill,
1895                                                                 &RMF_MDT_BODY);
1896                                         fid = mdt_object_fid(info->mti_object);
1897                                         mdt_pack_attr2body(info, body, la, fid);
1898                                 }
1899                         }
1900                 } else
1901                         rc = err_serious(rc);
1902         }
1903         if (rc == 0)
1904                 mdt_counter_incr(req, LPROC_MDT_SYNC);
1905
1906         RETURN(rc);
1907 }
1908
1909 /*
1910  * Quotacheck handler.
1911  * in-kernel quotacheck isn't supported any more.
1912  */
1913 int mdt_quotacheck(struct mdt_thread_info *info)
1914 {
1915         struct obd_quotactl     *oqctl;
1916         int                      rc;
1917         ENTRY;
1918
1919         oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
1920         if (oqctl == NULL)
1921                 RETURN(err_serious(-EPROTO));
1922
1923         rc = req_capsule_server_pack(info->mti_pill);
1924         if (rc)
1925                 RETURN(err_serious(rc));
1926
1927         /* deprecated, not used any more */
1928         RETURN(-EOPNOTSUPP);
1929 }
1930
1931 /*
1932  * Handle quota control requests to consult current usage/limit, but also
1933  * to configure quota enforcement
1934  */
1935 int mdt_quotactl(struct mdt_thread_info *info)
1936 {
1937         struct obd_export       *exp  = info->mti_exp;
1938         struct req_capsule      *pill = info->mti_pill;
1939         struct obd_quotactl     *oqctl, *repoqc;
1940         int                      id, rc;
1941         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
1942         ENTRY;
1943
1944         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1945         if (oqctl == NULL)
1946                 RETURN(err_serious(-EPROTO));
1947
1948         rc = req_capsule_server_pack(pill);
1949         if (rc)
1950                 RETURN(err_serious(rc));
1951
1952         switch (oqctl->qc_cmd) {
1953         case Q_QUOTACHECK:
1954         case LUSTRE_Q_INVALIDATE:
1955         case LUSTRE_Q_FINVALIDATE:
1956         case Q_QUOTAON:
1957         case Q_QUOTAOFF:
1958         case Q_INITQUOTA:
1959                 /* deprecated, not used any more */
1960                 RETURN(-EOPNOTSUPP);
1961                 /* master quotactl */
1962         case Q_GETINFO:
1963         case Q_SETINFO:
1964         case Q_SETQUOTA:
1965         case Q_GETQUOTA:
1966                 if (qmt == NULL)
1967                         RETURN(-EOPNOTSUPP);
1968                 /* slave quotactl */
1969         case Q_GETOINFO:
1970         case Q_GETOQUOTA:
1971                 break;
1972         default:
1973                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
1974                 RETURN(-EFAULT);
1975         }
1976
1977         /* map uid/gid for remote client */
1978         id = oqctl->qc_id;
1979         if (exp_connect_rmtclient(exp)) {
1980                 struct lustre_idmap_table *idmap;
1981
1982                 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
1983
1984                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1985                              oqctl->qc_cmd != Q_GETINFO))
1986                         RETURN(-EPERM);
1987
1988                 if (oqctl->qc_type == USRQUOTA)
1989                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1990                                                      oqctl->qc_id);
1991                 else if (oqctl->qc_type == GRPQUOTA)
1992                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1993                                                      oqctl->qc_id);
1994                 else
1995                         RETURN(-EINVAL);
1996
1997                 if (id == CFS_IDMAP_NOTFOUND) {
1998                         CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
1999                         RETURN(-EACCES);
2000                 }
2001         }
2002
2003         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2004         if (repoqc == NULL)
2005                 RETURN(err_serious(-EFAULT));
2006
2007         if (oqctl->qc_id != id)
2008                 swap(oqctl->qc_id, id);
2009
2010         switch (oqctl->qc_cmd) {
2011
2012         case Q_GETINFO:
2013         case Q_SETINFO:
2014         case Q_SETQUOTA:
2015         case Q_GETQUOTA:
2016                 /* forward quotactl request to QMT */
2017                 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2018                 break;
2019
2020         case Q_GETOINFO:
2021         case Q_GETOQUOTA:
2022                 /* slave quotactl */
2023                 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2024                                    oqctl);
2025                 break;
2026
2027         default:
2028                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2029                 RETURN(-EFAULT);
2030         }
2031
2032         if (oqctl->qc_id != id)
2033                 swap(oqctl->qc_id, id);
2034
2035         *repoqc = *oqctl;
2036         RETURN(rc);
2037 }
2038
2039 /*
2040  * OBD PING and other handlers.
2041  */
2042 int mdt_obd_ping(struct mdt_thread_info *info)
2043 {
2044         int rc;
2045         ENTRY;
2046
2047         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2048
2049         rc = target_handle_ping(mdt_info_req(info));
2050         if (rc < 0)
2051                 rc = err_serious(rc);
2052         RETURN(rc);
2053 }
2054
2055 /*
2056  * OBD_IDX_READ handler
2057  */
2058 int mdt_obd_idx_read(struct mdt_thread_info *info)
2059 {
2060         struct mdt_device       *mdt = info->mti_mdt;
2061         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2062         struct idx_info         *req_ii, *rep_ii;
2063         int                      rc, i;
2064         ENTRY;
2065
2066         memset(rdpg, 0, sizeof(*rdpg));
2067         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2068
2069         /* extract idx_info buffer from request & reply */
2070         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2071         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2072                 RETURN(err_serious(-EPROTO));
2073
2074         rc = req_capsule_server_pack(info->mti_pill);
2075         if (rc)
2076                 RETURN(err_serious(rc));
2077
2078         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2079         if (rep_ii == NULL)
2080                 RETURN(err_serious(-EFAULT));
2081         rep_ii->ii_magic = IDX_INFO_MAGIC;
2082
2083         /* extract hash to start with */
2084         rdpg->rp_hash = req_ii->ii_hash_start;
2085
2086         /* extract requested attributes */
2087         rdpg->rp_attrs = req_ii->ii_attrs;
2088
2089         /* check that fid packed in request is valid and supported */
2090         if (!fid_is_sane(&req_ii->ii_fid))
2091                 RETURN(-EINVAL);
2092         rep_ii->ii_fid = req_ii->ii_fid;
2093
2094         /* copy flags */
2095         rep_ii->ii_flags = req_ii->ii_flags;
2096
2097         /* compute number of pages to allocate, ii_count is the number of 4KB
2098          * containers */
2099         if (req_ii->ii_count <= 0)
2100                 GOTO(out, rc = -EFAULT);
2101         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2102                                exp_max_brw_size(info->mti_exp));
2103         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2104
2105         /* allocate pages to store the containers */
2106         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2107         if (rdpg->rp_pages == NULL)
2108                 GOTO(out, rc = -ENOMEM);
2109         for (i = 0; i < rdpg->rp_npages; i++) {
2110                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2111                 if (rdpg->rp_pages[i] == NULL)
2112                         GOTO(out, rc = -ENOMEM);
2113         }
2114
2115         /* populate pages with key/record pairs */
2116         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2117         if (rc < 0)
2118                 GOTO(out, rc);
2119
2120         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2121                  "asked %d > %d\n", rc, rdpg->rp_count);
2122
2123         /* send pages to client */
2124         rc = mdt_sendpage(info, rdpg, rc);
2125
2126         GOTO(out, rc);
2127 out:
2128         if (rdpg->rp_pages) {
2129                 for (i = 0; i < rdpg->rp_npages; i++)
2130                         if (rdpg->rp_pages[i])
2131                                 cfs_free_page(rdpg->rp_pages[i]);
2132                 OBD_FREE(rdpg->rp_pages,
2133                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2134         }
2135         return rc;
2136 }
2137
2138 int mdt_obd_log_cancel(struct mdt_thread_info *info)
2139 {
2140         return err_serious(-EOPNOTSUPP);
2141 }
2142
2143 int mdt_obd_qc_callback(struct mdt_thread_info *info)
2144 {
2145         return err_serious(-EOPNOTSUPP);
2146 }
2147
2148 /*
2149  * LLOG handlers.
2150  */
2151
2152 /** clone llog ctxt from child (mdd)
2153  * This allows remote llog (replicator) access.
2154  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2155  * context was originally set up, or we can handle them directly.
2156  * I choose the latter, but that means I need any llog
2157  * contexts set up by child to be accessable by the mdt.  So we clone the
2158  * context into our context list here.
2159  */
2160 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2161                                int idx)
2162 {
2163         struct md_device  *next = mdt->mdt_child;
2164         struct llog_ctxt *ctxt;
2165         int rc;
2166
2167         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2168                 return 0;
2169
2170         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2171         if (rc || ctxt == NULL) {
2172                 return 0;
2173         }
2174
2175         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2176         if (rc)
2177                 CERROR("Can't set mdt ctxt %d\n", rc);
2178
2179         return rc;
2180 }
2181
2182 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2183                                  struct mdt_device *mdt, int idx)
2184 {
2185         struct llog_ctxt *ctxt;
2186
2187         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2188         if (ctxt == NULL)
2189                 return 0;
2190         /* Put once for the get we just did, and once for the clone */
2191         llog_ctxt_put(ctxt);
2192         llog_ctxt_put(ctxt);
2193         return 0;
2194 }
2195
2196 int mdt_llog_create(struct mdt_thread_info *info)
2197 {
2198         int rc;
2199
2200         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2201         rc = llog_origin_handle_open(mdt_info_req(info));
2202         return (rc < 0 ? err_serious(rc) : rc);
2203 }
2204
2205 int mdt_llog_destroy(struct mdt_thread_info *info)
2206 {
2207         int rc;
2208
2209         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2210         rc = llog_origin_handle_destroy(mdt_info_req(info));
2211         return (rc < 0 ? err_serious(rc) : rc);
2212 }
2213
2214 int mdt_llog_read_header(struct mdt_thread_info *info)
2215 {
2216         int rc;
2217
2218         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2219         rc = llog_origin_handle_read_header(mdt_info_req(info));
2220         return (rc < 0 ? err_serious(rc) : rc);
2221 }
2222
2223 int mdt_llog_next_block(struct mdt_thread_info *info)
2224 {
2225         int rc;
2226
2227         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2228         rc = llog_origin_handle_next_block(mdt_info_req(info));
2229         return (rc < 0 ? err_serious(rc) : rc);
2230 }
2231
2232 int mdt_llog_prev_block(struct mdt_thread_info *info)
2233 {
2234         int rc;
2235
2236         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2237         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2238         return (rc < 0 ? err_serious(rc) : rc);
2239 }
2240
2241
2242 /*
2243  * DLM handlers.
2244  */
2245
2246 static struct ldlm_callback_suite cbs = {
2247         .lcs_completion = ldlm_server_completion_ast,
2248         .lcs_blocking   = ldlm_server_blocking_ast,
2249         .lcs_glimpse    = ldlm_server_glimpse_ast
2250 };
2251
2252 int mdt_enqueue(struct mdt_thread_info *info)
2253 {
2254         struct ptlrpc_request *req;
2255         int rc;
2256
2257         /*
2258          * info->mti_dlm_req already contains swapped and (if necessary)
2259          * converted dlm request.
2260          */
2261         LASSERT(info->mti_dlm_req != NULL);
2262
2263         req = mdt_info_req(info);
2264         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2265                                   req, info->mti_dlm_req, &cbs);
2266         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2267         return rc ? err_serious(rc) : req->rq_status;
2268 }
2269
2270 int mdt_convert(struct mdt_thread_info *info)
2271 {
2272         int rc;
2273         struct ptlrpc_request *req;
2274
2275         LASSERT(info->mti_dlm_req);
2276         req = mdt_info_req(info);
2277         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2278         return rc ? err_serious(rc) : req->rq_status;
2279 }
2280
2281 int mdt_bl_callback(struct mdt_thread_info *info)
2282 {
2283         CERROR("bl callbacks should not happen on MDS\n");
2284         LBUG();
2285         return err_serious(-EOPNOTSUPP);
2286 }
2287
2288 int mdt_cp_callback(struct mdt_thread_info *info)
2289 {
2290         CERROR("cp callbacks should not happen on MDS\n");
2291         LBUG();
2292         return err_serious(-EOPNOTSUPP);
2293 }
2294
2295 /*
2296  * sec context handlers
2297  */
2298 int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2299 {
2300         int rc;
2301
2302         rc = mdt_handle_idmap(info);
2303
2304         if (unlikely(rc)) {
2305                 struct ptlrpc_request *req = mdt_info_req(info);
2306                 __u32                  opc;
2307
2308                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2309                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2310                         sptlrpc_svc_ctx_invalidate(req);
2311         }
2312
2313         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2314
2315         return rc;
2316 }
2317
2318 /*
2319  * quota request handlers
2320  */
2321 int mdt_quota_dqacq(struct mdt_thread_info *info)
2322 {
2323         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2324         int                      rc;
2325         ENTRY;
2326
2327         if (qmt == NULL)
2328                 RETURN(err_serious(-EOPNOTSUPP));
2329
2330         rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2331         RETURN(rc);
2332 }
2333
2334 static struct mdt_object *mdt_obj(struct lu_object *o)
2335 {
2336         LASSERT(lu_device_is_mdt(o->lo_dev));
2337         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2338 }
2339
2340 struct mdt_object *mdt_object_new(const struct lu_env *env,
2341                                   struct mdt_device *d,
2342                                   const struct lu_fid *f)
2343 {
2344         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2345         struct lu_object *o;
2346         struct mdt_object *m;
2347         ENTRY;
2348
2349         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2350         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2351         if (unlikely(IS_ERR(o)))
2352                 m = (struct mdt_object *)o;
2353         else
2354                 m = mdt_obj(o);
2355         RETURN(m);
2356 }
2357
2358 struct mdt_object *mdt_object_find(const struct lu_env *env,
2359                                    struct mdt_device *d,
2360                                    const struct lu_fid *f)
2361 {
2362         struct lu_object *o;
2363         struct mdt_object *m;
2364         ENTRY;
2365
2366         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2367         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2368         if (unlikely(IS_ERR(o)))
2369                 m = (struct mdt_object *)o;
2370         else
2371                 m = mdt_obj(o);
2372         RETURN(m);
2373 }
2374
2375 /**
2376  * Asyncronous commit for mdt device.
2377  *
2378  * Pass asynchonous commit call down the MDS stack.
2379  *
2380  * \param env environment
2381  * \param mdt the mdt device
2382  */
2383 static void mdt_device_commit_async(const struct lu_env *env,
2384                                     struct mdt_device *mdt)
2385 {
2386         struct dt_device *dt = mdt->mdt_bottom;
2387         int rc;
2388
2389         rc = dt->dd_ops->dt_commit_async(env, dt);
2390         if (unlikely(rc != 0))
2391                 CWARN("async commit start failed with rc = %d", rc);
2392 }
2393
2394 /**
2395  * Mark the lock as "synchonous".
2396  *
2397  * Mark the lock to deffer transaction commit to the unlock time.
2398  *
2399  * \param lock the lock to mark as "synchonous"
2400  *
2401  * \see mdt_is_lock_sync
2402  * \see mdt_save_lock
2403  */
2404 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2405 {
2406         lock->l_ast_data = (void*)1;
2407 }
2408
2409 /**
2410  * Check whehter the lock "synchonous" or not.
2411  *
2412  * \param lock the lock to check
2413  * \retval 1 the lock is "synchonous"
2414  * \retval 0 the lock isn't "synchronous"
2415  *
2416  * \see mdt_set_lock_sync
2417  * \see mdt_save_lock
2418  */
2419 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2420 {
2421         return lock->l_ast_data != NULL;
2422 }
2423
2424 /**
2425  * Blocking AST for mdt locks.
2426  *
2427  * Starts transaction commit if in case of COS lock conflict or
2428  * deffers such a commit to the mdt_save_lock.
2429  *
2430  * \param lock the lock which blocks a request or cancelling lock
2431  * \param desc unused
2432  * \param data unused
2433  * \param flag indicates whether this cancelling or blocking callback
2434  * \retval 0
2435  * \see ldlm_blocking_ast_nocheck
2436  */
2437 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2438                      void *data, int flag)
2439 {
2440         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2441         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2442         int rc;
2443         ENTRY;
2444
2445         if (flag == LDLM_CB_CANCELING)
2446                 RETURN(0);
2447         lock_res_and_lock(lock);
2448         if (lock->l_blocking_ast != mdt_blocking_ast) {
2449                 unlock_res_and_lock(lock);
2450                 RETURN(0);
2451         }
2452         if (mdt_cos_is_enabled(mdt) &&
2453             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2454             lock->l_blocking_lock != NULL &&
2455             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2456                 mdt_set_lock_sync(lock);
2457         }
2458         rc = ldlm_blocking_ast_nocheck(lock);
2459
2460         /* There is no lock conflict if l_blocking_lock == NULL,
2461          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2462          * when the last reference to a local lock was released */
2463         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2464                 struct lu_env env;
2465
2466                 rc = lu_env_init(&env, LCT_LOCAL);
2467                 if (unlikely(rc != 0))
2468                         CWARN("lu_env initialization failed with rc = %d,"
2469                               "cannot start asynchronous commit\n", rc);
2470                 else
2471                         mdt_device_commit_async(&env, mdt);
2472                 lu_env_fini(&env);
2473         }
2474         RETURN(rc);
2475 }
2476
2477 int mdt_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2478                         void *data, int flag)
2479 {
2480         struct lustre_handle lockh;
2481         int               rc;
2482
2483         switch (flag) {
2484         case LDLM_CB_BLOCKING:
2485                 ldlm_lock2handle(lock, &lockh);
2486                 rc = ldlm_cli_cancel(&lockh);
2487                 if (rc < 0) {
2488                         CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2489                         RETURN(rc);
2490                 }
2491                 break;
2492         case LDLM_CB_CANCELING:
2493                 LDLM_DEBUG(lock, "Revoke remote lock\n");
2494                 break;
2495         default:
2496                 LBUG();
2497         }
2498         RETURN(0);
2499 }
2500
2501 int mdt_remote_object_lock(struct mdt_thread_info *mti,
2502                            struct mdt_object *o, struct lustre_handle *lh,
2503                            ldlm_mode_t mode, __u64 ibits)
2504 {
2505         struct ldlm_enqueue_info *einfo = &mti->mti_einfo;
2506         ldlm_policy_data_t *policy = &mti->mti_policy;
2507         int rc = 0;
2508         ENTRY;
2509
2510         LASSERT(mdt_object_remote(o));
2511
2512         LASSERT((ibits & MDS_INODELOCK_UPDATE));
2513
2514         memset(einfo, 0, sizeof(*einfo));
2515         einfo->ei_type = LDLM_IBITS;
2516         einfo->ei_mode = mode;
2517         einfo->ei_cb_bl = mdt_md_blocking_ast;
2518         einfo->ei_cb_cp = ldlm_completion_ast;
2519
2520         memset(policy, 0, sizeof(*policy));
2521         policy->l_inodebits.bits = ibits;
2522
2523         rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo,
2524                             policy);
2525         RETURN(rc);
2526 }
2527
2528 static int mdt_object_lock0(struct mdt_thread_info *info, struct mdt_object *o,
2529                             struct mdt_lock_handle *lh, __u64 ibits,
2530                             bool nonblock, int locality)
2531 {
2532         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2533         ldlm_policy_data_t *policy = &info->mti_policy;
2534         struct ldlm_res_id *res_id = &info->mti_res_id;
2535         __u64 dlmflags;
2536         int rc;
2537         ENTRY;
2538
2539         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2540         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2541         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2542         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2543
2544         if (mdt_object_remote(o)) {
2545                 if (locality == MDT_CROSS_LOCK) {
2546                         ibits &= ~(MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM);
2547                         ibits |= MDS_INODELOCK_LOOKUP;
2548                 } else {
2549                         LASSERTF(!(ibits &
2550                                   (MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM)),
2551                                 "%s: wrong bit "LPX64" for remote obj "DFID"\n",
2552                                 mdt_obd_name(info->mti_mdt), ibits,
2553                                 PFID(mdt_object_fid(o)));
2554                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2555                 }
2556                 /* No PDO lock on remote object */
2557                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2558         }
2559
2560         if (lh->mlh_type == MDT_PDO_LOCK) {
2561                 /* check for exists after object is locked */
2562                 if (mdt_object_exists(o) == 0) {
2563                         /* Non-existent object shouldn't have PDO lock */
2564                         RETURN(-ESTALE);
2565                 } else {
2566                         /* Non-dir object shouldn't have PDO lock */
2567                         if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2568                                 RETURN(-ENOTDIR);
2569                 }
2570         }
2571
2572         memset(policy, 0, sizeof(*policy));
2573         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2574
2575         dlmflags = LDLM_FL_ATOMIC_CB;
2576         if (nonblock)
2577                 dlmflags |= LDLM_FL_BLOCK_NOWAIT;
2578
2579         /*
2580          * Take PDO lock on whole directory and build correct @res_id for lock
2581          * on part of directory.
2582          */
2583         if (lh->mlh_pdo_hash != 0) {
2584                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2585                 mdt_lock_pdo_mode(info, o, lh);
2586                 if (lh->mlh_pdo_mode != LCK_NL) {
2587                         /*
2588                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2589                          * is never going to be sent to client and we do not
2590                          * want it slowed down due to possible cancels.
2591                          */
2592                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2593                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2594                                           policy, res_id, dlmflags,
2595                                           &info->mti_exp->exp_handle.h_cookie);
2596                         if (unlikely(rc))
2597                                 RETURN(rc);
2598                 }
2599
2600                 /*
2601                  * Finish res_id initializing by name hash marking part of
2602                  * directory which is taking modification.
2603                  */
2604                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2605         }
2606
2607         policy->l_inodebits.bits = ibits;
2608
2609         /*
2610          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2611          * going to be sent to client. If it is - mdt_intent_policy() path will
2612          * fix it up and turn FL_LOCAL flag off.
2613          */
2614         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2615                           res_id, LDLM_FL_LOCAL_ONLY | dlmflags,
2616                           &info->mti_exp->exp_handle.h_cookie);
2617         if (rc)
2618                 mdt_object_unlock(info, o, lh, 1);
2619         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2620                  lh->mlh_pdo_hash != 0 &&
2621                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2622                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2623         }
2624
2625         RETURN(rc);
2626 }
2627
2628 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2629                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2630 {
2631         return mdt_object_lock0(info, o, lh, ibits, false, locality);
2632 }
2633
2634 int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *o,
2635                         struct mdt_lock_handle *lh, __u64 ibits, int locality)
2636 {
2637         struct mdt_lock_handle tmp = *lh;
2638         int rc;
2639
2640         rc = mdt_object_lock0(info, o, &tmp, ibits, true, locality);
2641         if (rc == 0)
2642                 *lh = tmp;
2643
2644         return rc == 0;
2645 }
2646
2647 /**
2648  * Save a lock within request object.
2649  *
2650  * Keep the lock referenced until whether client ACK or transaction
2651  * commit happens or release the lock immediately depending on input
2652  * parameters. If COS is ON, a write lock is converted to COS lock
2653  * before saving.
2654  *
2655  * \param info thead info object
2656  * \param h lock handle
2657  * \param mode lock mode
2658  * \param decref force immediate lock releasing
2659  */
2660 static
2661 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2662                    ldlm_mode_t mode, int decref)
2663 {
2664         ENTRY;
2665
2666         if (lustre_handle_is_used(h)) {
2667                 if (decref || !info->mti_has_trans ||
2668                     !(mode & (LCK_PW | LCK_EX))){
2669                         mdt_fid_unlock(h, mode);
2670                 } else {
2671                         struct mdt_device *mdt = info->mti_mdt;
2672                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2673                         struct ptlrpc_request *req = mdt_info_req(info);
2674                         int no_ack = 0;
2675
2676                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2677                                  h->cookie);
2678                         CDEBUG(D_HA, "request = %p reply state = %p"
2679                                " transno = "LPD64"\n",
2680                                req, req->rq_reply_state, req->rq_transno);
2681                         if (mdt_cos_is_enabled(mdt)) {
2682                                 no_ack = 1;
2683                                 ldlm_lock_downgrade(lock, LCK_COS);
2684                                 mode = LCK_COS;
2685                         }
2686                         ptlrpc_save_lock(req, h, mode, no_ack);
2687                         if (mdt_is_lock_sync(lock)) {
2688                                 CDEBUG(D_HA, "found sync-lock,"
2689                                        " async commit started\n");
2690                                 mdt_device_commit_async(info->mti_env,
2691                                                         mdt);
2692                         }
2693                         LDLM_LOCK_PUT(lock);
2694                 }
2695                 h->cookie = 0ull;
2696         }
2697
2698         EXIT;
2699 }
2700
2701 /**
2702  * Unlock mdt object.
2703  *
2704  * Immeditely release the regular lock and the PDO lock or save the
2705  * lock in reqeuest and keep them referenced until client ACK or
2706  * transaction commit.
2707  *
2708  * \param info thread info object
2709  * \param o mdt object
2710  * \param lh mdt lock handle referencing regular and PDO locks
2711  * \param decref force immediate lock releasing
2712  */
2713 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2714                        struct mdt_lock_handle *lh, int decref)
2715 {
2716         ENTRY;
2717
2718         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2719         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2720
2721         if (lustre_handle_is_used(&lh->mlh_rreg_lh))
2722                 ldlm_lock_decref(&lh->mlh_rreg_lh, lh->mlh_rreg_mode);
2723
2724         EXIT;
2725 }
2726
2727 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2728                                         const struct lu_fid *f,
2729                                         struct mdt_lock_handle *lh,
2730                                         __u64 ibits)
2731 {
2732         struct mdt_object *o;
2733
2734         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2735         if (!IS_ERR(o)) {
2736                 int rc;
2737
2738                 rc = mdt_object_lock(info, o, lh, ibits,
2739                                      MDT_LOCAL_LOCK);
2740                 if (rc != 0) {
2741                         mdt_object_put(info->mti_env, o);
2742                         o = ERR_PTR(rc);
2743                 }
2744         }
2745         return o;
2746 }
2747
2748 void mdt_object_unlock_put(struct mdt_thread_info * info,
2749                            struct mdt_object * o,
2750                            struct mdt_lock_handle *lh,
2751                            int decref)
2752 {
2753         mdt_object_unlock(info, o, lh, decref);
2754         mdt_object_put(info->mti_env, o);
2755 }
2756
2757 struct mdt_handler *mdt_handler_find(__u32 opc, struct mdt_opc_slice *supported)
2758 {
2759         struct mdt_opc_slice *s;
2760         struct mdt_handler   *h;
2761
2762         h = NULL;
2763         for (s = supported; s->mos_hs != NULL; s++) {
2764                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2765                         h = s->mos_hs + (opc - s->mos_opc_start);
2766                         if (likely(h->mh_opc != 0))
2767                                 LASSERTF(h->mh_opc == opc,
2768                                          "opcode mismatch %d != %d\n",
2769                                          h->mh_opc, opc);
2770                         else
2771                                 h = NULL; /* unsupported opc */
2772                         break;
2773                 }
2774         }
2775         return h;
2776 }
2777
2778 static int mdt_lock_resname_compat(struct mdt_device *m,
2779                                    struct ldlm_request *req)
2780 {
2781         /* XXX something... later. */
2782         return 0;
2783 }
2784
2785 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2786 {
2787         /* XXX something... later. */
2788         return 0;
2789 }
2790
2791 /*
2792  * Generic code handling requests that have struct mdt_body passed in:
2793  *
2794  *  - extract mdt_body from request and save it in @info, if present;
2795  *
2796  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2797  *  @info;
2798  *
2799  *  - if HABEO_CORPUS flag is set for this request type check whether object
2800  *  actually exists on storage (lu_object_exists()).
2801  *
2802  */
2803 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2804 {
2805         const struct mdt_body    *body;
2806         struct mdt_object        *obj;
2807         const struct lu_env      *env;
2808         struct req_capsule       *pill;
2809         int                       rc;
2810         ENTRY;
2811
2812         env = info->mti_env;
2813         pill = info->mti_pill;
2814
2815         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2816         if (body == NULL)
2817                 RETURN(-EFAULT);
2818
2819         if (!(body->valid & OBD_MD_FLID))
2820                 RETURN(0);
2821
2822         if (!fid_is_sane(&body->fid1)) {
2823                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2824                 RETURN(-EINVAL);
2825         }
2826
2827         /*
2828          * Do not get size or any capa fields before we check that request
2829          * contains capa actually. There are some requests which do not, for
2830          * instance MDS_IS_SUBDIR.
2831          */
2832         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2833             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2834                 mdt_set_capainfo(info, 0, &body->fid1,
2835                                  req_capsule_client_get(pill, &RMF_CAPA1));
2836
2837         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2838         if (!IS_ERR(obj)) {
2839                 if ((flags & HABEO_CORPUS) &&
2840                     !mdt_object_exists(obj)) {
2841                         mdt_object_put(env, obj);
2842                         /* for capability renew ENOENT will be handled in
2843                          * mdt_renew_capa */
2844                         if (body->valid & OBD_MD_FLOSSCAPA)
2845                                 rc = 0;
2846                         else
2847                                 rc = -ENOENT;
2848                 } else {
2849                         info->mti_object = obj;
2850                         rc = 0;
2851                 }
2852         } else
2853                 rc = PTR_ERR(obj);
2854
2855         RETURN(rc);
2856 }
2857
2858 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2859 {
2860         struct req_capsule *pill = info->mti_pill;
2861         int rc;
2862         ENTRY;
2863
2864         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2865                 rc = mdt_body_unpack(info, flags);
2866         else
2867                 rc = 0;
2868
2869         if (rc == 0 && (flags & HABEO_REFERO)) {
2870                 /* Pack reply. */
2871                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2872                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2873                                              info->mti_body->eadatasize);
2874                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2875                         req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2876                                              RCL_SERVER, 0);
2877
2878                 rc = req_capsule_server_pack(pill);
2879         }
2880         RETURN(rc);
2881 }
2882
2883 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2884 {
2885         struct md_device *next = m->mdt_child;
2886
2887         return next->md_ops->mdo_init_capa_ctxt(env, next,
2888                                                 m->mdt_opts.mo_mds_capa,
2889                                                 m->mdt_capa_timeout,
2890                                                 m->mdt_capa_alg,
2891                                                 m->mdt_capa_keys);
2892 }
2893
2894 /*
2895  * Invoke handler for this request opc. Also do necessary preprocessing
2896  * (according to handler ->mh_flags), and post-processing (setting of
2897  * ->last_{xid,committed}).
2898  */
2899 static int mdt_req_handle(struct mdt_thread_info *info,
2900                           struct mdt_handler *h, struct ptlrpc_request *req)
2901 {
2902         int   rc, serious = 0;
2903         __u32 flags;
2904
2905         ENTRY;
2906
2907         LASSERT(h->mh_act != NULL);
2908         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2909         LASSERT(current->journal_info == NULL);
2910
2911         /*
2912          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2913          * to put same checks into handlers like mdt_close(), mdt_reint(),
2914          * etc., without talking to mdt authors first. Checking same thing
2915          * there again is useless and returning 0 error without packing reply
2916          * is buggy! Handlers either pack reply or return error.
2917          *
2918          * We return 0 here and do not send any reply in order to emulate
2919          * network failure. Do not send any reply in case any of NET related
2920          * fail_id has occured.
2921          */
2922         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2923                 RETURN(0);
2924
2925         rc = 0;
2926         flags = h->mh_flags;
2927         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2928
2929         if (h->mh_fmt != NULL) {
2930                 req_capsule_set(info->mti_pill, h->mh_fmt);
2931                 rc = mdt_unpack_req_pack_rep(info, flags);
2932         }
2933
2934         if (rc == 0 && flags & MUTABOR &&
2935             exp_connect_flags(req->rq_export) & OBD_CONNECT_RDONLY)
2936                 /* should it be rq_status? */
2937                 rc = -EROFS;
2938
2939         if (rc == 0 && flags & HABEO_CLAVIS) {
2940                 struct ldlm_request *dlm_req;
2941
2942                 LASSERT(h->mh_fmt != NULL);
2943
2944                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2945                 if (dlm_req != NULL) {
2946                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2947                                         LDLM_IBITS &&
2948                                      dlm_req->lock_desc.l_policy_data.\
2949                                         l_inodebits.bits == 0)) {
2950                                 /*
2951                                  * Lock without inodebits makes no sense and
2952                                  * will oops later in ldlm. If client miss to
2953                                  * set such bits, do not trigger ASSERTION.
2954                                  *
2955                                  * For liblustre flock case, it maybe zero.
2956                                  */
2957                                 rc = -EPROTO;
2958                         } else {
2959                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2960                                         rc = mdt_lock_resname_compat(
2961                                                                 info->mti_mdt,
2962                                                                 dlm_req);
2963                                 info->mti_dlm_req = dlm_req;
2964                         }
2965                 } else {
2966                         rc = -EFAULT;
2967                 }
2968         }
2969
2970         /* capability setting changed via /proc, needs reinitialize ctxt */
2971         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2972                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2973                 info->mti_mdt->mdt_capa_conf = 0;
2974         }
2975
2976         if (likely(rc == 0)) {
2977                 /*
2978                  * Process request, there can be two types of rc:
2979                  * 1) errors with msg unpack/pack, other failures outside the
2980                  * operation itself. This is counted as serious errors;
2981                  * 2) errors during fs operation, should be placed in rq_status
2982                  * only
2983                  */
2984                 rc = h->mh_act(info);
2985                 if (rc == 0 &&
2986                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2987                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2988                                   "pack reply and returned 0 error\n",
2989                                   h->mh_name);
2990                         LBUG();
2991                 }
2992                 serious = is_serious(rc);
2993                 rc = clear_serious(rc);
2994         } else
2995                 serious = 1;
2996
2997         req->rq_status = rc;
2998
2999         /*
3000          * ELDLM_* codes which > 0 should be in rq_status only as well as
3001          * all non-serious errors.
3002          */
3003         if (rc > 0 || !serious)
3004                 rc = 0;
3005
3006         LASSERT(current->journal_info == NULL);
3007
3008         if (rc == 0 && (flags & HABEO_CLAVIS) &&
3009             info->mti_mdt->mdt_opts.mo_compat_resname) {
3010                 struct ldlm_reply *dlmrep;
3011
3012                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3013                 if (dlmrep != NULL)
3014                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3015         }
3016
3017         /* If we're DISCONNECTing, the mdt_export_data is already freed */
3018         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3019                 target_committed_to_req(req);
3020
3021         if (unlikely(req_is_replay(req) &&
3022                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3023                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3024                 LBUG();
3025         }
3026
3027         target_send_reply(req, rc, info->mti_fail_id);
3028         RETURN(0);
3029 }
3030
3031 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3032 {
3033         lh->mlh_type = MDT_NUL_LOCK;
3034         lh->mlh_reg_lh.cookie = 0ull;
3035         lh->mlh_reg_mode = LCK_MINMODE;
3036         lh->mlh_pdo_lh.cookie = 0ull;
3037         lh->mlh_pdo_mode = LCK_MINMODE;
3038         lh->mlh_rreg_lh.cookie = 0ull;
3039         lh->mlh_rreg_mode = LCK_MINMODE;
3040 }
3041
3042 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3043 {
3044         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3045         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3046 }
3047
3048 /*
3049  * Initialize fields of struct mdt_thread_info. Other fields are left in
3050  * uninitialized state, because it's too expensive to zero out whole
3051  * mdt_thread_info (> 1K) on each request arrival.
3052  */
3053 static void mdt_thread_info_init(struct ptlrpc_request *req,
3054                                  struct mdt_thread_info *info)
3055 {
3056         int i;
3057
3058         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3059         info->mti_pill = &req->rq_pill;
3060
3061         /* lock handle */
3062         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3063                 mdt_lock_handle_init(&info->mti_lh[i]);
3064
3065         /* mdt device: it can be NULL while CONNECT */
3066         if (req->rq_export) {
3067                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3068                 info->mti_exp = req->rq_export;
3069         } else
3070                 info->mti_mdt = NULL;
3071         info->mti_env = req->rq_svc_thread->t_env;
3072         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3073         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3074         info->mti_mos = NULL;
3075
3076         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3077         info->mti_body = NULL;
3078         info->mti_object = NULL;
3079         info->mti_dlm_req = NULL;
3080         info->mti_has_trans = 0;
3081         info->mti_cross_ref = 0;
3082         info->mti_opdata = 0;
3083         info->mti_big_lmm_used = 0;
3084
3085         /* To not check for split by default. */
3086         info->mti_spec.no_create = 0;
3087         info->mti_spec.sp_rm_entry = 0;
3088 }
3089
3090 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3091 {
3092         int i;
3093
3094         req_capsule_fini(info->mti_pill);
3095         if (info->mti_object != NULL) {
3096                 mdt_object_put(info->mti_env, info->mti_object);
3097                 info->mti_object = NULL;
3098         }
3099         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3100                 mdt_lock_handle_fini(&info->mti_lh[i]);
3101         info->mti_env = NULL;
3102 }
3103
3104 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3105                                        struct obd_device *obd, int *process)
3106 {
3107         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3108         case MDS_CONNECT: /* This will never get here, but for completeness. */
3109         case OST_CONNECT: /* This will never get here, but for completeness. */
3110         case MDS_DISCONNECT:
3111         case OST_DISCONNECT:
3112         case OBD_IDX_READ:
3113                *process = 1;
3114                RETURN(0);
3115
3116         case MDS_CLOSE:
3117         case MDS_DONE_WRITING:
3118         case MDS_SYNC: /* used in unmounting */
3119         case OBD_PING:
3120         case MDS_REINT:
3121         case SEQ_QUERY:
3122         case FLD_QUERY:
3123         case LDLM_ENQUEUE:
3124                 *process = target_queue_recovery_request(req, obd);
3125                 RETURN(0);
3126
3127         default:
3128                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3129                 *process = -EAGAIN;
3130                 RETURN(0);
3131         }
3132 }
3133
3134 /*
3135  * Handle recovery. Return:
3136  *        +1: continue request processing;
3137  *       -ve: abort immediately with the given error code;
3138  *         0: send reply with error code in req->rq_status;
3139  */
3140 static int mdt_recovery(struct mdt_thread_info *info)
3141 {
3142         struct ptlrpc_request *req = mdt_info_req(info);
3143         struct obd_device *obd;
3144
3145         ENTRY;
3146
3147         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3148         case MDS_CONNECT:
3149         case SEC_CTX_INIT:
3150         case SEC_CTX_INIT_CONT:
3151         case SEC_CTX_FINI:
3152                 {
3153 #if 0
3154                         int rc;
3155
3156                         rc = mdt_handle_idmap(info);
3157                         if (rc)
3158                                 RETURN(rc);
3159                         else
3160 #endif
3161                                 RETURN(+1);
3162                 }
3163         }
3164
3165         if (unlikely(!class_connected_export(req->rq_export))) {
3166                 CDEBUG(D_HA, "operation %d on unconnected MDS from %s\n",
3167                        lustre_msg_get_opc(req->rq_reqmsg),
3168                        libcfs_id2str(req->rq_peer));
3169                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3170                  * mds_A will get -ENOTCONN(especially for ping req),
3171                  * which will cause that mds_A deactive timeout, then when
3172                  * mds_A cleanup, the cleanup process will be suspended since
3173                  * deactive timeout is not zero.
3174                  */
3175                 req->rq_status = -ENOTCONN;
3176                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3177                 RETURN(0);
3178         }
3179
3180         /* sanity check: if the xid matches, the request must be marked as a
3181          * resent or replayed */
3182         if (req_xid_is_last(req)) {
3183                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3184                       (MSG_RESENT | MSG_REPLAY))) {
3185                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3186                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3187                                   lustre_msg_get_flags(req->rq_reqmsg));
3188                         LBUG();
3189                         req->rq_status = -ENOTCONN;
3190                         RETURN(-ENOTCONN);
3191                 }
3192         }
3193
3194         /* else: note the opposite is not always true; a RESENT req after a
3195          * failover will usually not match the last_xid, since it was likely
3196          * never committed. A REPLAYed request will almost never match the
3197          * last xid, however it could for a committed, but still retained,
3198          * open. */
3199
3200         obd = req->rq_export->exp_obd;
3201
3202         /* Check for aborted recovery... */
3203         if (unlikely(obd->obd_recovering)) {
3204                 int rc;
3205                 int should_process;
3206                 DEBUG_REQ(D_INFO, req, "Got new replay");
3207                 rc = mdt_filter_recovery_request(req, obd, &should_process);
3208                 if (rc != 0 || !should_process)
3209                         RETURN(rc);
3210                 else if (should_process < 0) {
3211                         req->rq_status = should_process;
3212                         rc = ptlrpc_error(req);
3213                         RETURN(rc);
3214                 }
3215         }
3216         RETURN(+1);
3217 }
3218
3219 static int mdt_msg_check_version(struct lustre_msg *msg)
3220 {
3221         int rc;
3222
3223         switch (lustre_msg_get_opc(msg)) {
3224         case MDS_CONNECT:
3225         case MDS_DISCONNECT:
3226         case OBD_PING:
3227         case SEC_CTX_INIT:
3228         case SEC_CTX_INIT_CONT:
3229         case SEC_CTX_FINI:
3230         case OBD_IDX_READ:
3231                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
3232                 if (rc)
3233                         CERROR("bad opc %u version %08x, expecting %08x\n",
3234                                lustre_msg_get_opc(msg),
3235                                lustre_msg_get_version(msg),
3236                                LUSTRE_OBD_VERSION);
3237                 break;
3238         case MDS_GETSTATUS:
3239         case MDS_GETATTR:
3240         case MDS_GETATTR_NAME:
3241         case MDS_STATFS:
3242         case MDS_READPAGE:
3243         case MDS_WRITEPAGE:
3244         case MDS_IS_SUBDIR:
3245         case MDS_REINT:
3246         case MDS_CLOSE:
3247         case MDS_DONE_WRITING:
3248         case MDS_PIN:
3249         case MDS_SYNC:
3250         case MDS_GETXATTR:
3251         case MDS_SETXATTR:
3252         case MDS_SET_INFO:
3253         case MDS_GET_INFO:
3254         case MDS_HSM_PROGRESS:
3255         case MDS_HSM_REQUEST:
3256         case MDS_HSM_CT_REGISTER:
3257         case MDS_HSM_CT_UNREGISTER:
3258         case MDS_HSM_STATE_GET:
3259         case MDS_HSM_STATE_SET:
3260         case MDS_HSM_ACTION:
3261         case MDS_QUOTACHECK:
3262         case MDS_QUOTACTL:
3263         case UPDATE_OBJ:
3264         case MDS_SWAP_LAYOUTS:
3265         case QUOTA_DQACQ:
3266         case QUOTA_DQREL:
3267         case SEQ_QUERY:
3268         case FLD_QUERY:
3269                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
3270                 if (rc)
3271                         CERROR("bad opc %u version %08x, expecting %08x\n",
3272                                lustre_msg_get_opc(msg),
3273                                lustre_msg_get_version(msg),
3274                                LUSTRE_MDS_VERSION);
3275                 break;
3276         case LDLM_ENQUEUE:
3277         case LDLM_CONVERT:
3278         case LDLM_BL_CALLBACK:
3279         case LDLM_CP_CALLBACK:
3280                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
3281                 if (rc)
3282                         CERROR("bad opc %u version %08x, expecting %08x\n",
3283                                lustre_msg_get_opc(msg),
3284                                lustre_msg_get_version(msg),
3285                                LUSTRE_DLM_VERSION);
3286                 break;
3287         case OBD_LOG_CANCEL:
3288         case LLOG_ORIGIN_HANDLE_CREATE:
3289         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
3290         case LLOG_ORIGIN_HANDLE_READ_HEADER:
3291         case LLOG_ORIGIN_HANDLE_CLOSE:
3292         case LLOG_ORIGIN_HANDLE_DESTROY:
3293         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
3294         case LLOG_CATINFO:
3295                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
3296                 if (rc)
3297                         CERROR("bad opc %u version %08x, expecting %08x\n",
3298                                lustre_msg_get_opc(msg),
3299                                lustre_msg_get_version(msg),
3300                                LUSTRE_LOG_VERSION);
3301                 break;
3302         default:
3303                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
3304                 rc = -ENOTSUPP;
3305         }
3306         return rc;
3307 }
3308
3309 static int mdt_handle0(struct ptlrpc_request *req,
3310                        struct mdt_thread_info *info,
3311                        struct mdt_opc_slice *supported)
3312 {
3313         struct mdt_handler *h;
3314         struct lustre_msg  *msg;
3315         int                 rc;
3316
3317         ENTRY;
3318
3319         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
3320                 RETURN(0);
3321
3322         LASSERT(current->journal_info == NULL);
3323
3324         msg = req->rq_reqmsg;
3325         rc = mdt_msg_check_version(msg);
3326         if (likely(rc == 0)) {
3327                 rc = mdt_recovery(info);
3328                 if (likely(rc == +1)) {
3329                         h = mdt_handler_find(lustre_msg_get_opc(msg),
3330                                              supported);
3331                         if (likely(h != NULL)) {
3332                                 rc = mdt_req_handle(info, h, req);
3333                         } else {
3334                                 CERROR("The unsupported opc: 0x%x\n",
3335                                        lustre_msg_get_opc(msg) );
3336                                 req->rq_status = -ENOTSUPP;
3337                                 rc = ptlrpc_error(req);
3338                                 RETURN(rc);
3339                         }
3340                 }
3341         } else
3342                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
3343         RETURN(rc);
3344 }
3345
3346 /*
3347  * MDT handler function called by ptlrpc service thread when request comes.
3348  *
3349  * XXX common "target" functionality should be factored into separate module
3350  * shared by mdt, ost and stand-alone services like fld.
3351  */
3352 int mdt_handle_common(struct ptlrpc_request *req,
3353                       struct mdt_opc_slice *supported)
3354 {
3355         struct lu_env          *env;
3356         struct mdt_thread_info *info;
3357         int                     rc;
3358         ENTRY;
3359
3360         env = req->rq_svc_thread->t_env;
3361         /* Refill(initilize) the context(mdt_thread_info), in case it is
3362          * not initialized yet. Usually it happens during start up, after
3363          * MDS(ptlrpc threads) is start up, it gets the first CONNECT request,
3364          * before MDT_thread_info is initialized */
3365         lu_env_refill(env);
3366         LASSERT(env != NULL);
3367         LASSERT(env->le_ses != NULL);
3368         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
3369         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3370         LASSERT(info != NULL);
3371
3372         mdt_thread_info_init(req, info);
3373
3374         rc = mdt_handle0(req, info, supported);
3375
3376         mdt_thread_info_fini(info);
3377         RETURN(rc);
3378 }
3379
3380 /*
3381  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
3382  * as well.
3383  */
3384 int mdt_recovery_handle(struct ptlrpc_request *req)
3385 {
3386         int rc;
3387         ENTRY;
3388
3389         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3390         case FLD_QUERY:
3391                 rc = mdt_handle_common(req, mdt_fld_handlers);
3392                 break;
3393         case SEQ_QUERY:
3394                 rc = mdt_handle_common(req, mdt_seq_handlers);
3395                 break;
3396         default:
3397                 rc = mdt_handle_common(req, mdt_regular_handlers);
3398                 break;
3399         }
3400
3401         RETURN(rc);
3402 }
3403
3404 enum mdt_it_code {
3405         MDT_IT_OPEN,
3406         MDT_IT_OCREAT,
3407         MDT_IT_CREATE,
3408         MDT_IT_GETATTR,
3409         MDT_IT_READDIR,
3410         MDT_IT_LOOKUP,
3411         MDT_IT_UNLINK,
3412         MDT_IT_TRUNC,
3413         MDT_IT_GETXATTR,
3414         MDT_IT_LAYOUT,
3415         MDT_IT_QUOTA,
3416         MDT_IT_NR
3417 };
3418
3419 static int mdt_intent_getattr(enum mdt_it_code opcode,
3420                               struct mdt_thread_info *info,
3421                               struct ldlm_lock **,
3422                               __u64);
3423 static int mdt_intent_layout(enum mdt_it_code opcode,
3424                              struct mdt_thread_info *info,
3425                              struct ldlm_lock **,
3426                              __u64);
3427 static int mdt_intent_reint(enum mdt_it_code opcode,
3428                             struct mdt_thread_info *info,
3429                             struct ldlm_lock **,
3430                             __u64);
3431
3432 static struct mdt_it_flavor {
3433         const struct req_format *it_fmt;
3434         __u32                    it_flags;
3435         int                    (*it_act)(enum mdt_it_code ,
3436                                          struct mdt_thread_info *,
3437                                          struct ldlm_lock **,
3438                                          __u64);
3439         long                     it_reint;
3440 } mdt_it_flavor[] = {
3441         [MDT_IT_OPEN]     = {
3442                 .it_fmt   = &RQF_LDLM_INTENT,
3443                 /*.it_flags = HABEO_REFERO,*/
3444                 .it_flags = 0,
3445                 .it_act   = mdt_intent_reint,
3446                 .it_reint = REINT_OPEN
3447         },
3448         [MDT_IT_OCREAT]   = {
3449                 .it_fmt   = &RQF_LDLM_INTENT,
3450                 .it_flags = MUTABOR,
3451                 .it_act   = mdt_intent_reint,
3452                 .it_reint = REINT_OPEN
3453         },
3454         [MDT_IT_CREATE]   = {
3455                 .it_fmt   = &RQF_LDLM_INTENT,
3456                 .it_flags = MUTABOR,
3457                 .it_act   = mdt_intent_reint,
3458                 .it_reint = REINT_CREATE
3459         },
3460         [MDT_IT_GETATTR]  = {
3461                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3462                 .it_flags = HABEO_REFERO,
3463                 .it_act   = mdt_intent_getattr
3464         },
3465         [MDT_IT_READDIR]  = {
3466                 .it_fmt   = NULL,
3467                 .it_flags = 0,
3468                 .it_act   = NULL
3469         },
3470         [MDT_IT_LOOKUP]   = {
3471                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3472                 .it_flags = HABEO_REFERO,
3473                 .it_act   = mdt_intent_getattr
3474         },
3475         [MDT_IT_UNLINK]   = {
3476                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3477                 .it_flags = MUTABOR,
3478                 .it_act   = NULL,
3479                 .it_reint = REINT_UNLINK
3480         },
3481         [MDT_IT_TRUNC]    = {
3482                 .it_fmt   = NULL,
3483                 .it_flags = MUTABOR,
3484                 .it_act   = NULL
3485         },
3486         [MDT_IT_GETXATTR] = {
3487                 .it_fmt   = NULL,
3488                 .it_flags = 0,
3489                 .it_act   = NULL
3490         },
3491         [MDT_IT_LAYOUT] = {
3492                 .it_fmt   = &RQF_LDLM_INTENT_LAYOUT,
3493                 .it_flags = 0,
3494                 .it_act   = mdt_intent_layout
3495         }
3496 };
3497
3498 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3499                             struct ldlm_lock **lockp,
3500                             struct ldlm_lock *new_lock,
3501                             struct mdt_lock_handle *lh,
3502                             __u64 flags)
3503 {
3504         struct ptlrpc_request  *req = mdt_info_req(info);
3505         struct ldlm_lock       *lock = *lockp;
3506
3507         /*
3508          * Get new lock only for cases when possible resent did not find any
3509          * lock.
3510          */
3511         if (new_lock == NULL)
3512                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3513
3514         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3515                 lh->mlh_reg_lh.cookie = 0;
3516                 RETURN(0);
3517         }
3518
3519         LASSERTF(new_lock != NULL,
3520                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3521
3522         /*
3523          * If we've already given this lock to a client once, then we should
3524          * have no readers or writers.  Otherwise, we should have one reader
3525          * _or_ writer ref (which will be zeroed below) before returning the
3526          * lock to a client.
3527          */
3528         if (new_lock->l_export == req->rq_export) {
3529                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3530         } else {
3531                 LASSERT(new_lock->l_export == NULL);
3532                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3533         }
3534
3535         *lockp = new_lock;
3536
3537         if (new_lock->l_export == req->rq_export) {
3538                 /*
3539                  * Already gave this to the client, which means that we
3540                  * reconstructed a reply.
3541                  */
3542                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3543                         MSG_RESENT);
3544                 lh->mlh_reg_lh.cookie = 0;
3545                 RETURN(ELDLM_LOCK_REPLACED);
3546         }
3547
3548         /*
3549          * Fixup the lock to be given to the client.
3550          */
3551         lock_res_and_lock(new_lock);
3552         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3553          * possible blocking AST. */
3554         while (new_lock->l_readers > 0) {
3555                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3556                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3557                 new_lock->l_readers--;
3558         }
3559         while (new_lock->l_writers > 0) {
3560                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3561                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3562                 new_lock->l_writers--;
3563         }
3564
3565         new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3566         new_lock->l_blocking_ast = lock->l_blocking_ast;
3567         new_lock->l_completion_ast = lock->l_completion_ast;
3568         new_lock->l_remote_handle = lock->l_remote_handle;
3569         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3570
3571         unlock_res_and_lock(new_lock);
3572
3573         cfs_hash_add(new_lock->l_export->exp_lock_hash,
3574                      &new_lock->l_remote_handle,
3575                      &new_lock->l_exp_hash);
3576
3577         LDLM_LOCK_RELEASE(new_lock);
3578         lh->mlh_reg_lh.cookie = 0;
3579
3580         RETURN(ELDLM_LOCK_REPLACED);
3581 }
3582
3583 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3584                                     struct ldlm_lock *new_lock,
3585                                     struct ldlm_lock **old_lock,
3586                                     struct mdt_lock_handle *lh)
3587 {
3588         struct ptlrpc_request  *req = mdt_info_req(info);
3589         struct obd_export      *exp = req->rq_export;
3590         struct lustre_handle    remote_hdl;
3591         struct ldlm_request    *dlmreq;
3592         struct ldlm_lock       *lock;
3593
3594         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3595                 return;
3596
3597         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3598         remote_hdl = dlmreq->lock_handle[0];
3599
3600         /* In the function below, .hs_keycmp resolves to
3601          * ldlm_export_lock_keycmp() */
3602         /* coverity[overrun-buffer-val] */
3603         lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3604         if (lock) {
3605                 if (lock != new_lock) {
3606                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3607                         lh->mlh_reg_mode = lock->l_granted_mode;
3608
3609                         LDLM_DEBUG(lock, "Restoring lock cookie");
3610                         DEBUG_REQ(D_DLMTRACE, req,
3611                                   "restoring lock cookie "LPX64,
3612                                   lh->mlh_reg_lh.cookie);
3613                         if (old_lock)
3614                                 *old_lock = LDLM_LOCK_GET(lock);
3615                         cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3616                         return;
3617                 }
3618
3619                 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3620         }
3621
3622         /*
3623          * If the xid matches, then we know this is a resent request, and allow
3624          * it. (It's probably an OPEN, for which we don't send a lock.
3625          */
3626         if (req_xid_is_last(req))
3627                 return;
3628
3629         /*
3630          * This remote handle isn't enqueued, so we never received or processed
3631          * this request.  Clear MSG_RESENT, because it can be handled like any
3632          * normal request now.
3633          */
3634         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3635
3636         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3637                   remote_hdl.cookie);
3638 }
3639
3640 static int mdt_intent_getattr(enum mdt_it_code opcode,
3641                               struct mdt_thread_info *info,
3642                               struct ldlm_lock **lockp,
3643                               __u64 flags)
3644 {
3645         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3646         struct ldlm_lock       *new_lock = NULL;
3647         __u64                   child_bits;
3648         struct ldlm_reply      *ldlm_rep;
3649         struct ptlrpc_request  *req;
3650         struct mdt_body        *reqbody;
3651         struct mdt_body        *repbody;
3652         int                     rc, rc2;
3653         ENTRY;
3654
3655         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
3656         LASSERT(reqbody);
3657
3658         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
3659         LASSERT(repbody);
3660
3661         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
3662         repbody->eadatasize = 0;
3663         repbody->aclsize = 0;
3664
3665         switch (opcode) {
3666         case MDT_IT_LOOKUP:
3667                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM;
3668                 break;
3669         case MDT_IT_GETATTR:
3670                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
3671                              MDS_INODELOCK_PERM;
3672                 break;
3673         default:
3674                 CERROR("Unsupported intent (%d)\n", opcode);
3675                 GOTO(out_shrink, rc = -EINVAL);
3676         }
3677
3678         rc = mdt_init_ucred(info, reqbody);
3679         if (rc)
3680                 GOTO(out_shrink, rc);
3681
3682         req = info->mti_pill->rc_req;
3683         ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3684         mdt_set_disposition(info, ldlm_rep, DISP_IT_EXECD);
3685
3686         /* Get lock from request for possible resent case. */
3687         mdt_intent_fixup_resent(info, *lockp, &new_lock, lhc);
3688
3689         ldlm_rep->lock_policy_res2 =
3690                 mdt_getattr_name_lock(info, lhc, child_bits, ldlm_rep);
3691
3692         if (mdt_get_disposition(ldlm_rep, DISP_LOOKUP_NEG))
3693                 ldlm_rep->lock_policy_res2 = 0;
3694         if (!mdt_get_disposition(ldlm_rep, DISP_LOOKUP_POS) ||
3695             ldlm_rep->lock_policy_res2) {
3696                 lhc->mlh_reg_lh.cookie = 0ull;
3697                 GOTO(out_ucred, rc = ELDLM_LOCK_ABORTED);
3698         }
3699
3700         rc = mdt_intent_lock_replace(info, lockp, new_lock, lhc, flags);
3701         EXIT;
3702 out_ucred:
3703         mdt_exit_ucred(info);
3704 out_shrink:
3705         mdt_client_compatibility(info);
3706         rc2 = mdt_fix_reply(info);
3707         if (rc == 0)
3708                 rc = rc2;
3709         return rc;
3710 }
3711
3712 static int mdt_intent_layout(enum mdt_it_code opcode,
3713                              struct mdt_thread_info *info,
3714                              struct ldlm_lock **lockp,
3715                              __u64 flags)
3716 {
3717         struct layout_intent *layout;
3718         int rc;
3719         ENTRY;
3720
3721         if (opcode != MDT_IT_LAYOUT) {
3722                 CERROR("%s: Unknown intent (%d)\n",
3723                         info->mti_exp->exp_obd->obd_name, opcode);
3724                 RETURN(-EINVAL);
3725         }
3726
3727         (*lockp)->l_lvb_type = LVB_T_LAYOUT;
3728         req_capsule_set_size(info->mti_pill, &RMF_DLM_LVB, RCL_SERVER,
3729                         ldlm_lvbo_size(*lockp));
3730         rc = req_capsule_server_pack(info->mti_pill);
3731         if (rc != 0)
3732                 RETURN(-EINVAL);
3733
3734         layout = req_capsule_client_get(info->mti_pill, &RMF_LAYOUT_INTENT);
3735         LASSERT(layout != NULL);
3736         if (layout->li_opc == LAYOUT_INTENT_ACCESS)
3737                 /* return to normal ldlm handling */
3738       &nbs