Whamcloud - gitweb
LU-3253 mdt: use lu_object/device in mdt_object/device
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #include <lustre_acl.h>
69 #include <lustre_param.h>
70 #include <lustre_quota.h>
71 #include <lustre_linkea.h>
72 #include <lustre_lfsck.h>
73
74 mdl_mode_t mdt_mdl_lock_modes[] = {
75         [LCK_MINMODE] = MDL_MINMODE,
76         [LCK_EX]      = MDL_EX,
77         [LCK_PW]      = MDL_PW,
78         [LCK_PR]      = MDL_PR,
79         [LCK_CW]      = MDL_CW,
80         [LCK_CR]      = MDL_CR,
81         [LCK_NL]      = MDL_NL,
82         [LCK_GROUP]   = MDL_GROUP
83 };
84
85 ldlm_mode_t mdt_dlm_lock_modes[] = {
86         [MDL_MINMODE] = LCK_MINMODE,
87         [MDL_EX]      = LCK_EX,
88         [MDL_PW]      = LCK_PW,
89         [MDL_PR]      = LCK_PR,
90         [MDL_CW]      = LCK_CW,
91         [MDL_CR]      = LCK_CR,
92         [MDL_NL]      = LCK_NL,
93         [MDL_GROUP]   = LCK_GROUP
94 };
95
96 static struct mdt_device *mdt_dev(struct lu_device *d);
97 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
98
99 static const struct lu_object_operations mdt_obj_ops;
100
101 /* Slab for MDT object allocation */
102 static struct kmem_cache *mdt_object_kmem;
103
104 static struct lu_kmem_descr mdt_caches[] = {
105         {
106                 .ckd_cache = &mdt_object_kmem,
107                 .ckd_name  = "mdt_obj",
108                 .ckd_size  = sizeof(struct mdt_object)
109         },
110         {
111                 .ckd_cache = NULL
112         }
113 };
114
115 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
116 {
117         if (!rep)
118                 return 0;
119         return (rep->lock_policy_res1 & flag);
120 }
121
122 void mdt_clear_disposition(struct mdt_thread_info *info,
123                            struct ldlm_reply *rep, int flag)
124 {
125         if (info)
126                 info->mti_opdata &= ~flag;
127         if (rep)
128                 rep->lock_policy_res1 &= ~flag;
129 }
130
131 void mdt_set_disposition(struct mdt_thread_info *info,
132                          struct ldlm_reply *rep, int flag)
133 {
134         if (info)
135                 info->mti_opdata |= flag;
136         if (rep)
137                 rep->lock_policy_res1 |= flag;
138 }
139
140 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
141 {
142         lh->mlh_pdo_hash = 0;
143         lh->mlh_reg_mode = lm;
144         lh->mlh_rreg_mode = lm;
145         lh->mlh_type = MDT_REG_LOCK;
146 }
147
148 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
149                        const char *name, int namelen)
150 {
151         lh->mlh_reg_mode = lm;
152         lh->mlh_rreg_mode = lm;
153         lh->mlh_type = MDT_PDO_LOCK;
154
155         if (name != NULL && (name[0] != '\0')) {
156                 LASSERT(namelen > 0);
157                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
158                 /* XXX Workaround for LU-2856
159                  * Zero is a valid return value of full_name_hash, but several
160                  * users of mlh_pdo_hash assume a non-zero hash value. We
161                  * therefore map zero onto an arbitrary, but consistent
162                  * value (1) to avoid problems further down the road. */
163                 if (unlikely(!lh->mlh_pdo_hash))
164                         lh->mlh_pdo_hash = 1;
165         } else {
166                 LASSERT(namelen == 0);
167                 lh->mlh_pdo_hash = 0ull;
168         }
169 }
170
171 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
172                               struct mdt_lock_handle *lh)
173 {
174         mdl_mode_t mode;
175         ENTRY;
176
177         /*
178          * Any dir access needs couple of locks:
179          *
180          * 1) on part of dir we gonna take lookup/modify;
181          *
182          * 2) on whole dir to protect it from concurrent splitting and/or to
183          * flush client's cache for readdir().
184          *
185          * so, for a given mode and object this routine decides what lock mode
186          * to use for lock #2:
187          *
188          * 1) if caller's gonna lookup in dir then we need to protect dir from
189          * being splitted only - LCK_CR
190          *
191          * 2) if caller's gonna modify dir then we need to protect dir from
192          * being splitted and to flush cache - LCK_CW
193          *
194          * 3) if caller's gonna modify dir and that dir seems ready for
195          * splitting then we need to protect it from any type of access
196          * (lookup/modify/split) - LCK_EX --bzzz
197          */
198
199         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
200         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
201
202         /*
203          * Ask underlaying level its opinion about preferable PDO lock mode
204          * having access type passed as regular lock mode:
205          *
206          * - MDL_MINMODE means that lower layer does not want to specify lock
207          * mode;
208          *
209          * - MDL_NL means that no PDO lock should be taken. This is used in some
210          * cases. Say, for non-splittable directories no need to use PDO locks
211          * at all.
212          */
213         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
214                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
215
216         if (mode != MDL_MINMODE) {
217                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
218         } else {
219                 /*
220                  * Lower layer does not want to specify locking mode. We do it
221                  * our selves. No special protection is needed, just flush
222                  * client's cache on modification and allow concurrent
223                  * mondification.
224                  */
225                 switch (lh->mlh_reg_mode) {
226                 case LCK_EX:
227                         lh->mlh_pdo_mode = LCK_EX;
228                         break;
229                 case LCK_PR:
230                         lh->mlh_pdo_mode = LCK_CR;
231                         break;
232                 case LCK_PW:
233                         lh->mlh_pdo_mode = LCK_CW;
234                         break;
235                 default:
236                         CERROR("Not expected lock type (0x%x)\n",
237                                (int)lh->mlh_reg_mode);
238                         LBUG();
239                 }
240         }
241
242         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
243         EXIT;
244 }
245
246 int mdt_getstatus(struct mdt_thread_info *info)
247 {
248         struct mdt_device       *mdt  = info->mti_mdt;
249         struct mdt_body         *repbody;
250         int                     rc;
251         ENTRY;
252
253         rc = mdt_check_ucred(info);
254         if (rc)
255                 RETURN(err_serious(rc));
256
257         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
258                 RETURN(err_serious(-ENOMEM));
259
260         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
261         repbody->fid1 = mdt->mdt_md_root_fid;
262         repbody->valid |= OBD_MD_FLID;
263
264         if (mdt->mdt_opts.mo_mds_capa &&
265             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
266                 struct mdt_object  *root;
267                 struct lustre_capa *capa;
268
269                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
270                 if (IS_ERR(root))
271                         RETURN(PTR_ERR(root));
272
273                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
274                 LASSERT(capa);
275                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
276                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
277                                  0);
278                 mdt_object_put(info->mti_env, root);
279                 if (rc == 0)
280                         repbody->valid |= OBD_MD_FLMDSCAPA;
281         }
282
283         RETURN(rc);
284 }
285
286 int mdt_statfs(struct mdt_thread_info *info)
287 {
288         struct ptlrpc_request           *req = mdt_info_req(info);
289         struct md_device                *next = info->mti_mdt->mdt_child;
290         struct ptlrpc_service_part      *svcpt;
291         struct obd_statfs               *osfs;
292         int                             rc;
293
294         ENTRY;
295
296         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
297
298         /* This will trigger a watchdog timeout */
299         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
300                          (MDT_SERVICE_WATCHDOG_FACTOR *
301                           at_get(&svcpt->scp_at_estimate)) + 1);
302
303         rc = mdt_check_ucred(info);
304         if (rc)
305                 RETURN(err_serious(rc));
306
307         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
308                 RETURN(err_serious(-ENOMEM));
309
310         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
311         if (!osfs)
312                 RETURN(-EPROTO);
313
314         /** statfs information are cached in the mdt_device */
315         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
316                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
317                 /** statfs data is too old, get up-to-date one */
318                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
319                 if (rc)
320                         RETURN(rc);
321                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
322                 info->mti_mdt->mdt_osfs = *osfs;
323                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
324                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
325         } else {
326                 /** use cached statfs data */
327                 spin_lock(&info->mti_mdt->mdt_osfs_lock);
328                 *osfs = info->mti_mdt->mdt_osfs;
329                 spin_unlock(&info->mti_mdt->mdt_osfs_lock);
330         }
331
332         if (rc == 0)
333                 mdt_counter_incr(req, LPROC_MDT_STATFS);
334
335         RETURN(rc);
336 }
337
338 /**
339  * Pack SOM attributes into the reply.
340  * Call under a DLM UPDATE lock.
341  */
342 static void mdt_pack_size2body(struct mdt_thread_info *info,
343                                struct mdt_object *mo)
344 {
345         struct mdt_body *b;
346         struct md_attr *ma = &info->mti_attr;
347
348         LASSERT(ma->ma_attr.la_valid & LA_MODE);
349         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
350
351         /* Check if Size-on-MDS is supported, if this is a regular file,
352          * if SOM is enabled on the object and if SOM cache exists and valid.
353          * Otherwise do not pack Size-on-MDS attributes to the reply. */
354         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
355             !S_ISREG(ma->ma_attr.la_mode) ||
356             !mdt_object_is_som_enabled(mo) ||
357             !(ma->ma_valid & MA_SOM))
358                 return;
359
360         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
361         b->size = ma->ma_som->msd_size;
362         b->blocks = ma->ma_som->msd_blocks;
363 }
364
365 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
366                         const struct lu_attr *attr, const struct lu_fid *fid)
367 {
368         struct md_attr *ma = &info->mti_attr;
369
370         LASSERT(ma->ma_valid & MA_INODE);
371
372         b->atime      = attr->la_atime;
373         b->mtime      = attr->la_mtime;
374         b->ctime      = attr->la_ctime;
375         b->mode       = attr->la_mode;
376         b->size       = attr->la_size;
377         b->blocks     = attr->la_blocks;
378         b->uid        = attr->la_uid;
379         b->gid        = attr->la_gid;
380         b->flags      = attr->la_flags;
381         b->nlink      = attr->la_nlink;
382         b->rdev       = attr->la_rdev;
383
384         /*XXX should pack the reply body according to lu_valid*/
385         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
386                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
387                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
388                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
389
390         if (!S_ISREG(attr->la_mode)) {
391                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
392         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
393                 /* means no objects are allocated on osts. */
394                 LASSERT(!(ma->ma_valid & MA_LOV));
395                 /* just ignore blocks occupied by extend attributes on MDS */
396                 b->blocks = 0;
397                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
398                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
399         }
400
401         if (fid) {
402                 b->fid1 = *fid;
403                 b->valid |= OBD_MD_FLID;
404                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
405                                 PFID(fid), b->nlink, b->mode, b->size);
406         }
407
408         if (info)
409                 mdt_body_reverse_idmap(info, b);
410
411         if (b->valid & OBD_MD_FLSIZE)
412                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
413                        PFID(fid), (unsigned long long)b->size);
414 }
415
416 static inline int mdt_body_has_lov(const struct lu_attr *la,
417                                    const struct mdt_body *body)
418 {
419         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
420                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
421 }
422
423 void mdt_client_compatibility(struct mdt_thread_info *info)
424 {
425         struct mdt_body       *body;
426         struct ptlrpc_request *req = mdt_info_req(info);
427         struct obd_export     *exp = req->rq_export;
428         struct md_attr        *ma = &info->mti_attr;
429         struct lu_attr        *la = &ma->ma_attr;
430         ENTRY;
431
432         if (exp_connect_layout(exp))
433                 /* the client can deal with 16-bit lmm_stripe_count */
434                 RETURN_EXIT;
435
436         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
437
438         if (!mdt_body_has_lov(la, body))
439                 RETURN_EXIT;
440
441         /* now we have a reply with a lov for a client not compatible with the
442          * layout lock so we have to clean the layout generation number */
443         if (S_ISREG(la->la_mode))
444                 ma->ma_lmm->lmm_layout_gen = 0;
445         EXIT;
446 }
447
448 static int mdt_big_xattr_get(struct mdt_thread_info *info, struct mdt_object *o,
449                              char *name)
450 {
451         const struct lu_env *env = info->mti_env;
452         int rc;
453         ENTRY;
454
455         LASSERT(info->mti_big_lmm_used == 0);
456         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL, name);
457         if (rc < 0)
458                 RETURN(rc);
459
460         /* big_lmm may need to be grown */
461         if (info->mti_big_lmmsize < rc) {
462                 int size = size_roundup_power2(rc);
463
464                 if (info->mti_big_lmmsize > 0) {
465                         /* free old buffer */
466                         LASSERT(info->mti_big_lmm);
467                         OBD_FREE_LARGE(info->mti_big_lmm,
468                                        info->mti_big_lmmsize);
469                         info->mti_big_lmm = NULL;
470                         info->mti_big_lmmsize = 0;
471                 }
472
473                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
474                 if (info->mti_big_lmm == NULL)
475                         RETURN(-ENOMEM);
476                 info->mti_big_lmmsize = size;
477         }
478         LASSERT(info->mti_big_lmmsize >= rc);
479
480         info->mti_buf.lb_buf = info->mti_big_lmm;
481         info->mti_buf.lb_len = info->mti_big_lmmsize;
482         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf, name);
483
484         RETURN(rc);
485 }
486
487 int mdt_attr_get_lov(struct mdt_thread_info *info,
488                      struct mdt_object *o, struct md_attr *ma)
489 {
490         struct md_object *next = mdt_object_child(o);
491         struct lu_buf    *buf = &info->mti_buf;
492         int rc;
493
494         buf->lb_buf = ma->ma_lmm;
495         buf->lb_len = ma->ma_lmm_size;
496         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
497         if (rc > 0) {
498                 ma->ma_lmm_size = rc;
499                 ma->ma_valid |= MA_LOV;
500                 rc = 0;
501         } else if (rc == -ENODATA) {
502                 /* no LOV EA */
503                 rc = 0;
504         } else if (rc == -ERANGE) {
505                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LOV);
506                 if (rc > 0) {
507                         info->mti_big_lmm_used = 1;
508                         ma->ma_valid |= MA_LOV;
509                         ma->ma_lmm = info->mti_big_lmm;
510                         ma->ma_lmm_size = rc;
511                         /* update mdt_max_mdsize so all clients
512                          * will be aware about that */
513                         if (info->mti_mdt->mdt_max_mdsize < rc)
514                                 info->mti_mdt->mdt_max_mdsize = rc;
515                         rc = 0;
516                 }
517         }
518
519         return rc;
520 }
521
522 int mdt_attr_get_pfid(struct mdt_thread_info *info,
523                       struct mdt_object *o, struct lu_fid *pfid)
524 {
525         struct lu_buf           *buf = &info->mti_buf;
526         struct link_ea_header   *leh;
527         struct link_ea_entry    *lee;
528         int                      rc;
529         ENTRY;
530
531         buf->lb_buf = info->mti_big_lmm;
532         buf->lb_len = info->mti_big_lmmsize;
533         rc = mo_xattr_get(info->mti_env, mdt_object_child(o),
534                           buf, XATTR_NAME_LINK);
535         /* ignore errors, MA_PFID won't be set and it is
536          * up to the caller to treat this as an error */
537         if (rc == -ERANGE || buf->lb_len == 0) {
538                 rc = mdt_big_xattr_get(info, o, XATTR_NAME_LINK);
539                 buf->lb_buf = info->mti_big_lmm;
540                 buf->lb_len = info->mti_big_lmmsize;
541         }
542
543         if (rc < 0)
544                 RETURN(rc);
545         if (rc < sizeof(*leh)) {
546                 CERROR("short LinkEA on "DFID": rc = %d\n",
547                        PFID(mdt_object_fid(o)), rc);
548                 RETURN(-ENODATA);
549         }
550
551         leh = (struct link_ea_header *) buf->lb_buf;
552         lee = (struct link_ea_entry *)(leh + 1);
553         if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
554                 leh->leh_magic = LINK_EA_MAGIC;
555                 leh->leh_reccount = __swab32(leh->leh_reccount);
556                 leh->leh_len = __swab64(leh->leh_len);
557         }
558         if (leh->leh_magic != LINK_EA_MAGIC)
559                 RETURN(-EINVAL);
560         if (leh->leh_reccount == 0)
561                 RETURN(-ENODATA);
562
563         memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
564         fid_be_to_cpu(pfid, pfid);
565
566         RETURN(0);
567 }
568
569 int mdt_attr_get_complex(struct mdt_thread_info *info,
570                          struct mdt_object *o, struct md_attr *ma)
571 {
572         const struct lu_env *env = info->mti_env;
573         struct md_object    *next = mdt_object_child(o);
574         struct lu_buf       *buf = &info->mti_buf;
575         u32                  mode = lu_object_attr(&next->mo_lu);
576         int                  need = ma->ma_need;
577         int                  rc = 0, rc2;
578         ENTRY;
579
580         ma->ma_valid = 0;
581
582         if (need & MA_INODE) {
583                 ma->ma_need = MA_INODE;
584                 rc = mo_attr_get(env, next, ma);
585                 if (rc)
586                         GOTO(out, rc);
587                 ma->ma_valid |= MA_INODE;
588         }
589
590         if (need & MA_PFID) {
591                 rc = mdt_attr_get_pfid(info, o, &ma->ma_pfid);
592                 if (rc == 0)
593                         ma->ma_valid |= MA_PFID;
594                 /* ignore this error, parent fid is not mandatory */
595                 rc = 0;
596         }
597
598         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
599                 rc = mdt_attr_get_lov(info, o, ma);
600                 if (rc)
601                         GOTO(out, rc);
602         }
603
604         if (need & MA_LMV && S_ISDIR(mode)) {
605                 buf->lb_buf = ma->ma_lmv;
606                 buf->lb_len = ma->ma_lmv_size;
607                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
608                 if (rc2 > 0) {
609                         ma->ma_lmv_size = rc2;
610                         ma->ma_valid |= MA_LMV;
611                 } else if (rc2 == -ENODATA) {
612                         /* no LMV EA */
613                         ma->ma_lmv_size = 0;
614                 } else
615                         GOTO(out, rc = rc2);
616         }
617
618         if (need & MA_SOM && S_ISREG(mode)) {
619                 buf->lb_buf = info->mti_xattr_buf;
620                 buf->lb_len = sizeof(info->mti_xattr_buf);
621                 CLASSERT(sizeof(struct som_attrs) <=
622                          sizeof(info->mti_xattr_buf));
623                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_SOM);
624                 rc2 = lustre_buf2som(info->mti_xattr_buf, rc2, ma->ma_som);
625                 if (rc2 == 0)
626                         ma->ma_valid |= MA_SOM;
627                 else if (rc2 < 0 && rc2 != -ENODATA)
628                         GOTO(out, rc = rc2);
629         }
630
631         if (need & MA_HSM && S_ISREG(mode)) {
632                 buf->lb_buf = info->mti_xattr_buf;
633                 buf->lb_len = sizeof(info->mti_xattr_buf);
634                 CLASSERT(sizeof(struct hsm_attrs) <=
635                          sizeof(info->mti_xattr_buf));
636                 rc2 = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_HSM);
637                 rc2 = lustre_buf2hsm(info->mti_xattr_buf, rc2, &ma->ma_hsm);
638                 if (rc2 == 0)
639                         ma->ma_valid |= MA_HSM;
640                 else if (rc2 < 0 && rc2 != -ENODATA)
641                         GOTO(out, rc = rc2);
642         }
643
644 #ifdef CONFIG_FS_POSIX_ACL
645         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
646                 buf->lb_buf = ma->ma_acl;
647                 buf->lb_len = ma->ma_acl_size;
648                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
649                 if (rc2 > 0) {
650                         ma->ma_acl_size = rc2;
651                         ma->ma_valid |= MA_ACL_DEF;
652                 } else if (rc2 == -ENODATA) {
653                         /* no ACLs */
654                         ma->ma_acl_size = 0;
655                 } else
656                         GOTO(out, rc = rc2);
657         }
658 #endif
659 out:
660         ma->ma_need = need;
661         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
662                rc, ma->ma_valid, ma->ma_lmm);
663         RETURN(rc);
664 }
665
666 static int mdt_getattr_internal(struct mdt_thread_info *info,
667                                 struct mdt_object *o, int ma_need)
668 {
669         struct md_object        *next = mdt_object_child(o);
670         const struct mdt_body   *reqbody = info->mti_body;
671         struct ptlrpc_request   *req = mdt_info_req(info);
672         struct md_attr          *ma = &info->mti_attr;
673         struct lu_attr          *la = &ma->ma_attr;
674         struct req_capsule      *pill = info->mti_pill;
675         const struct lu_env     *env = info->mti_env;
676         struct mdt_body         *repbody;
677         struct lu_buf           *buffer = &info->mti_buf;
678         int                     rc;
679         int                     is_root;
680         ENTRY;
681
682         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
683                 RETURN(err_serious(-ENOMEM));
684
685         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
686
687         ma->ma_valid = 0;
688
689         if (mdt_object_remote(o)) {
690                 /* This object is located on remote node.*/
691                 /* Return -EIO for old client */
692                 if (!mdt_is_dne_client(req->rq_export))
693                         GOTO(out, rc = -EIO);
694
695                 repbody->fid1 = *mdt_object_fid(o);
696                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
697                 GOTO(out, rc = 0);
698         }
699
700         buffer->lb_len = reqbody->eadatasize;
701         if (buffer->lb_len > 0)
702                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
703         else
704                 buffer->lb_buf = NULL;
705
706         /* If it is dir object and client require MEA, then we got MEA */
707         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
708             reqbody->valid & OBD_MD_MEA) {
709                 /* Assumption: MDT_MD size is enough for lmv size. */
710                 ma->ma_lmv = buffer->lb_buf;
711                 ma->ma_lmv_size = buffer->lb_len;
712                 ma->ma_need = MA_LMV | MA_INODE;
713         } else {
714                 ma->ma_lmm = buffer->lb_buf;
715                 ma->ma_lmm_size = buffer->lb_len;
716                 ma->ma_need = MA_LOV | MA_INODE | MA_HSM;
717         }
718
719         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
720             reqbody->valid & OBD_MD_FLDIREA  &&
721             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
722                 /* get default stripe info for this dir. */
723                 ma->ma_need |= MA_LOV_DEF;
724         }
725         ma->ma_need |= ma_need;
726         if (ma->ma_need & MA_SOM)
727                 ma->ma_som = &info->mti_u.som.data;
728
729         rc = mdt_attr_get_complex(info, o, ma);
730         if (unlikely(rc)) {
731                 CERROR("%s: getattr error for "DFID": rc = %d\n",
732                        mdt_obd_name(info->mti_mdt),
733                        PFID(mdt_object_fid(o)), rc);
734                 RETURN(rc);
735         }
736
737         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
738
739         /* the Lustre protocol supposes to return default striping
740          * on the user-visible root if explicitly requested */
741         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
742             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
743                 struct lu_fid      rootfid;
744                 struct mdt_object *root;
745                 struct mdt_device *mdt = info->mti_mdt;
746
747                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
748                 if (rc)
749                         RETURN(rc);
750                 root = mdt_object_find(env, mdt, &rootfid);
751                 if (IS_ERR(root))
752                         RETURN(PTR_ERR(root));
753                 rc = mdt_attr_get_lov(info, root, ma);
754                 mdt_object_put(info->mti_env, root);
755                 if (unlikely(rc)) {
756                         CERROR("%s: getattr error for "DFID": rc = %d\n",
757                                mdt_obd_name(info->mti_mdt),
758                                PFID(mdt_object_fid(o)), rc);
759                         RETURN(rc);
760                 }
761         }
762
763         if (likely(ma->ma_valid & MA_INODE))
764                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
765         else
766                 RETURN(-EFAULT);
767
768         if (mdt_body_has_lov(la, reqbody)) {
769                 if (ma->ma_valid & MA_LOV) {
770                         LASSERT(ma->ma_lmm_size);
771                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
772                         repbody->eadatasize = ma->ma_lmm_size;
773                         if (S_ISDIR(la->la_mode))
774                                 repbody->valid |= OBD_MD_FLDIREA;
775                         else
776                                 repbody->valid |= OBD_MD_FLEASIZE;
777                 }
778                 if (ma->ma_valid & MA_LMV) {
779                         LASSERT(S_ISDIR(la->la_mode));
780                         repbody->eadatasize = ma->ma_lmv_size;
781                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
782                 }
783         } else if (S_ISLNK(la->la_mode) &&
784                    reqbody->valid & OBD_MD_LINKNAME) {
785                 buffer->lb_buf = ma->ma_lmm;
786                 /* eadatasize from client includes NULL-terminator, so
787                  * there is no need to read it */
788                 buffer->lb_len = reqbody->eadatasize - 1;
789                 rc = mo_readlink(env, next, buffer);
790                 if (unlikely(rc <= 0)) {
791                         CERROR("%s: readlink failed for "DFID": rc = %d\n",
792                                mdt_obd_name(info->mti_mdt),
793                                PFID(mdt_object_fid(o)), rc);
794                         rc = -EFAULT;
795                 } else {
796                         int print_limit = min_t(int, PAGE_CACHE_SIZE - 128, rc);
797
798                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
799                                 rc -= 2;
800                         repbody->valid |= OBD_MD_LINKNAME;
801                         /* we need to report back size with NULL-terminator
802                          * because client expects that */
803                         repbody->eadatasize = rc + 1;
804                         if (repbody->eadatasize != reqbody->eadatasize)
805                                 CDEBUG(D_INODE, "%s: Read shorter symlink %d "
806                                        "on "DFID ", expected %d\n",
807                                        mdt_obd_name(info->mti_mdt),
808                                        rc, PFID(mdt_object_fid(o)),
809                                        reqbody->eadatasize - 1);
810                         /* NULL terminate */
811                         ((char *)ma->ma_lmm)[rc] = 0;
812
813                         /* If the total CDEBUG() size is larger than a page, it
814                          * will print a warning to the console, avoid this by
815                          * printing just the last part of the symlink. */
816                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
817                                print_limit < rc ? "..." : "", print_limit,
818                                (char *)ma->ma_lmm + rc - print_limit, rc);
819                         rc = 0;
820                 }
821         }
822
823         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
824                 repbody->max_cookiesize = 0;
825                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
826                 repbody->valid |= OBD_MD_FLMODEASIZE;
827                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
828                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
829                        repbody->max_cookiesize);
830         }
831
832         if (exp_connect_rmtclient(info->mti_exp) &&
833             reqbody->valid & OBD_MD_FLRMTPERM) {
834                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
835
836                 /* mdt_getattr_lock only */
837                 rc = mdt_pack_remote_perm(info, o, buf);
838                 if (rc) {
839                         repbody->valid &= ~OBD_MD_FLRMTPERM;
840                         repbody->aclsize = 0;
841                         RETURN(rc);
842                 } else {
843                         repbody->valid |= OBD_MD_FLRMTPERM;
844                         repbody->aclsize = sizeof(struct mdt_remote_perm);
845                 }
846         }
847 #ifdef CONFIG_FS_POSIX_ACL
848         else if ((exp_connect_flags(req->rq_export) & OBD_CONNECT_ACL) &&
849                  (reqbody->valid & OBD_MD_FLACL)) {
850                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
851                 buffer->lb_len = req_capsule_get_size(pill,
852                                                       &RMF_ACL, RCL_SERVER);
853                 if (buffer->lb_len > 0) {
854                         rc = mo_xattr_get(env, next, buffer,
855                                           XATTR_NAME_ACL_ACCESS);
856                         if (rc < 0) {
857                                 if (rc == -ENODATA) {
858                                         repbody->aclsize = 0;
859                                         repbody->valid |= OBD_MD_FLACL;
860                                         rc = 0;
861                                 } else if (rc == -EOPNOTSUPP) {
862                                         rc = 0;
863                                 } else {
864                                         CERROR("%s: unable to read "DFID
865                                                " ACL: rc = %d\n",
866                                                mdt_obd_name(info->mti_mdt),
867                                                PFID(mdt_object_fid(o)), rc);
868                                 }
869                         } else {
870                                 repbody->aclsize = rc;
871                                 repbody->valid |= OBD_MD_FLACL;
872                                 rc = 0;
873                         }
874                 }
875         }
876 #endif
877
878         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
879             info->mti_mdt->mdt_opts.mo_mds_capa &&
880             exp_connect_flags(info->mti_exp) & OBD_CONNECT_MDS_CAPA) {
881                 struct lustre_capa *capa;
882
883                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
884                 LASSERT(capa);
885                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
886                 rc = mo_capa_get(env, next, capa, 0);
887                 if (rc)
888                         RETURN(rc);
889                 repbody->valid |= OBD_MD_FLMDSCAPA;
890         }
891
892 out:
893         if (rc == 0)
894                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
895
896         RETURN(rc);
897 }
898
899 static int mdt_renew_capa(struct mdt_thread_info *info)
900 {
901         struct mdt_object  *obj = info->mti_object;
902         struct mdt_body    *body;
903         struct lustre_capa *capa, *c;
904         int rc;
905         ENTRY;
906
907         /* if object doesn't exist, or server has disabled capability,
908          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
909          * flag not set.
910          */
911         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
912             !(exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA))
913                 RETURN(0);
914
915         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
916         LASSERT(body != NULL);
917
918         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
919         LASSERT(c);
920
921         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
922         LASSERT(capa);
923
924         *capa = *c;
925         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
926         if (rc == 0)
927                 body->valid |= OBD_MD_FLOSSCAPA;
928         RETURN(rc);
929 }
930
931 int mdt_getattr(struct mdt_thread_info *info)
932 {
933         struct mdt_object       *obj = info->mti_object;
934         struct req_capsule      *pill = info->mti_pill;
935         struct mdt_body         *reqbody;
936         struct mdt_body         *repbody;
937         mode_t                   mode;
938         int rc, rc2;
939         ENTRY;
940
941         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
942         LASSERT(reqbody);
943
944         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
945                 rc = req_capsule_server_pack(pill);
946                 if (unlikely(rc))
947                         RETURN(err_serious(rc));
948                 rc = mdt_renew_capa(info);
949                 GOTO(out_shrink, rc);
950         }
951
952         LASSERT(obj != NULL);
953         LASSERT(lu_object_assert_exists(&obj->mot_obj));
954
955         mode = lu_object_attr(&obj->mot_obj);
956
957         /* old clients may not report needed easize, use max value then */
958         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
959                              reqbody->eadatasize == 0 ?
960                              info->mti_mdt->mdt_max_mdsize :
961                              reqbody->eadatasize);
962
963         rc = req_capsule_server_pack(pill);
964         if (unlikely(rc != 0))
965                 RETURN(err_serious(rc));
966
967         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
968         LASSERT(repbody != NULL);
969         repbody->eadatasize = 0;
970         repbody->aclsize = 0;
971
972         if (reqbody->valid & OBD_MD_FLRMTPERM)
973                 rc = mdt_init_ucred(info, reqbody);
974         else
975                 rc = mdt_check_ucred(info);
976         if (unlikely(rc))
977                 GOTO(out_shrink, rc);
978
979         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
980
981         /*
982          * Don't check capability at all, because rename might getattr for
983          * remote obj, and at that time no capability is available.
984          */
985         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
986         rc = mdt_getattr_internal(info, obj, 0);
987         if (reqbody->valid & OBD_MD_FLRMTPERM)
988                 mdt_exit_ucred(info);
989         EXIT;
990 out_shrink:
991         mdt_client_compatibility(info);
992         rc2 = mdt_fix_reply(info);
993         if (rc == 0)
994                 rc = rc2;
995         return rc;
996 }
997
998 int mdt_is_subdir(struct mdt_thread_info *info)
999 {
1000         struct mdt_object     *o = info->mti_object;
1001         struct req_capsule    *pill = info->mti_pill;
1002         const struct mdt_body *body = info->mti_body;
1003         struct mdt_body       *repbody;
1004         int                    rc;
1005         ENTRY;
1006
1007         LASSERT(o != NULL);
1008
1009         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1010
1011         /*
1012          * We save last checked parent fid to @repbody->fid1 for remote
1013          * directory case.
1014          */
1015         LASSERT(fid_is_sane(&body->fid2));
1016         LASSERT(mdt_object_exists(o) && !mdt_object_remote(o));
1017         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1018                            &body->fid2, &repbody->fid1);
1019         if (rc == 0 || rc == -EREMOTE)
1020                 repbody->valid |= OBD_MD_FLID;
1021
1022         RETURN(rc);
1023 }
1024
1025 int mdt_swap_layouts(struct mdt_thread_info *info)
1026 {
1027         struct ptlrpc_request   *req = mdt_info_req(info);
1028         struct obd_export       *exp = req->rq_export;
1029         struct mdt_object       *o1, *o2, *o;
1030         struct mdt_lock_handle  *lh1, *lh2;
1031         struct mdc_swap_layouts *msl;
1032         int                      rc;
1033         ENTRY;
1034
1035         /* client does not support layout lock, so layout swaping
1036          * is disabled.
1037          * FIXME: there is a problem for old clients which don't support
1038          * layout lock yet. If those clients have already opened the file
1039          * they won't be notified at all so that old layout may still be
1040          * used to do IO. This can be fixed after file release is landed by
1041          * doing exclusive open and taking full EX ibits lock. - Jinshan */
1042         if (!exp_connect_layout(exp))
1043                 RETURN(-EOPNOTSUPP);
1044
1045         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA1, RCL_CLIENT))
1046                 mdt_set_capainfo(info, 0, &info->mti_body->fid1,
1047                                  req_capsule_client_get(info->mti_pill,
1048                                                         &RMF_CAPA1));
1049
1050         if (req_capsule_get_size(info->mti_pill, &RMF_CAPA2, RCL_CLIENT))
1051                 mdt_set_capainfo(info, 1, &info->mti_body->fid2,
1052                                  req_capsule_client_get(info->mti_pill,
1053                                                         &RMF_CAPA2));
1054
1055         o1 = info->mti_object;
1056         o = o2 = mdt_object_find(info->mti_env, info->mti_mdt,
1057                                 &info->mti_body->fid2);
1058         if (IS_ERR(o))
1059                 GOTO(out, rc = PTR_ERR(o));
1060
1061         if (mdt_object_remote(o) || !mdt_object_exists(o)) /* remote object */
1062                 GOTO(put, rc = -ENOENT);
1063
1064         rc = lu_fid_cmp(&info->mti_body->fid1, &info->mti_body->fid2);
1065         if (unlikely(rc == 0)) /* same file, you kidding me? no-op. */
1066                 GOTO(put, rc);
1067
1068         if (rc < 0)
1069                 swap(o1, o2);
1070
1071         /* permission check. Make sure the calling process having permission
1072          * to write both files. */
1073         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
1074                                 MAY_WRITE);
1075         if (rc < 0)
1076                 GOTO(put, rc);
1077
1078         rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
1079                                 MAY_WRITE);
1080         if (rc < 0)
1081                 GOTO(put, rc);
1082
1083         msl = req_capsule_client_get(info->mti_pill, &RMF_SWAP_LAYOUTS);
1084         if (msl == NULL)
1085                 GOTO(put, rc = -EPROTO);
1086
1087         lh1 = &info->mti_lh[MDT_LH_NEW];
1088         mdt_lock_reg_init(lh1, LCK_EX);
1089         lh2 = &info->mti_lh[MDT_LH_OLD];
1090         mdt_lock_reg_init(lh2, LCK_EX);
1091
1092         rc = mdt_object_lock(info, o1, lh1, MDS_INODELOCK_LAYOUT,
1093                              MDT_LOCAL_LOCK);
1094         if (rc < 0)
1095                 GOTO(put, rc);
1096
1097         rc = mdt_object_lock(info, o2, lh2, MDS_INODELOCK_LAYOUT,
1098                              MDT_LOCAL_LOCK);
1099         if (rc < 0)
1100                 GOTO(unlock1, rc);
1101
1102         rc = mo_swap_layouts(info->mti_env, mdt_object_child(o1),
1103                              mdt_object_child(o2), msl->msl_flags);
1104         GOTO(unlock2, rc);
1105 unlock2:
1106         mdt_object_unlock(info, o2, lh2, rc);
1107 unlock1:
1108         mdt_object_unlock(info, o1, lh1, rc);
1109 put:
1110         mdt_object_put(info->mti_env, o);
1111 out:
1112         RETURN(rc);
1113 }
1114
1115 static int mdt_raw_lookup(struct mdt_thread_info *info,
1116                           struct mdt_object *parent,
1117                           const struct lu_name *lname,
1118                           struct ldlm_reply *ldlm_rep)
1119 {
1120         struct md_object *next = mdt_object_child(info->mti_object);
1121         const struct mdt_body *reqbody = info->mti_body;
1122         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1123         struct mdt_body *repbody;
1124         int rc;
1125         ENTRY;
1126
1127         if (reqbody->valid != OBD_MD_FLID)
1128                 RETURN(0);
1129
1130         LASSERT(!info->mti_cross_ref);
1131
1132         /* Only got the fid of this obj by name */
1133         fid_zero(child_fid);
1134         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1135                         &info->mti_spec);
1136 #if 0
1137         /* XXX is raw_lookup possible as intent operation? */
1138         if (rc != 0) {
1139                 if (rc == -ENOENT)
1140                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1141                 RETURN(rc);
1142         } else
1143                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1144
1145         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1146 #endif
1147         if (rc == 0) {
1148                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1149                 repbody->fid1 = *child_fid;
1150                 repbody->valid = OBD_MD_FLID;
1151         }
1152         RETURN(1);
1153 }
1154
1155 /*
1156  * UPDATE lock should be taken against parent, and be release before exit;
1157  * child_bits lock should be taken against child, and be returned back:
1158  *            (1)normal request should release the child lock;
1159  *            (2)intent request will grant the lock to client.
1160  */
1161 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1162                                  struct mdt_lock_handle *lhc,
1163                                  __u64 child_bits,
1164                                  struct ldlm_reply *ldlm_rep)
1165 {
1166         struct ptlrpc_request  *req       = mdt_info_req(info);
1167         struct mdt_body        *reqbody   = NULL;
1168         struct mdt_object      *parent    = info->mti_object;
1169         struct mdt_object      *child;
1170         struct md_object       *next      = mdt_object_child(parent);
1171         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1172         struct lu_name         *lname     = NULL;
1173         const char             *name      = NULL;
1174         int                     namelen   = 0;
1175         struct mdt_lock_handle *lhp       = NULL;
1176         struct ldlm_lock       *lock;
1177         struct ldlm_res_id     *res_id;
1178         int                     is_resent;
1179         int                     ma_need = 0;
1180         int                     rc;
1181
1182         ENTRY;
1183
1184         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1185         LASSERT(ergo(is_resent,
1186                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1187
1188         LASSERT(parent != NULL);
1189         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1190         if (name == NULL)
1191                 RETURN(err_serious(-EFAULT));
1192
1193         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1194                                        RCL_CLIENT) - 1;
1195         if (!info->mti_cross_ref) {
1196                 /*
1197                  * XXX: Check for "namelen == 0" is for getattr by fid
1198                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1199                  * that is the name must contain at least one character and
1200                  * the terminating '\0'
1201                  */
1202                 if (namelen == 0) {
1203                         reqbody = req_capsule_client_get(info->mti_pill,
1204                                                          &RMF_MDT_BODY);
1205                         if (unlikely(reqbody == NULL))
1206                                 RETURN(err_serious(-EFAULT));
1207
1208                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1209                                 RETURN(err_serious(-EINVAL));
1210
1211                         name = NULL;
1212                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1213                                "ldlm_rep = %p\n",
1214                                PFID(mdt_object_fid(parent)),
1215                                PFID(&reqbody->fid2), ldlm_rep);
1216                 } else {
1217                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1218                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1219                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1220                                name, ldlm_rep);
1221                 }
1222         }
1223         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1224
1225         if (unlikely(!mdt_object_exists(parent))) {
1226                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1227                                 &parent->mot_obj,
1228                                 "Parent doesn't exist!\n");
1229                 RETURN(-ESTALE);
1230         } else if (!info->mti_cross_ref) {
1231                 LASSERTF(!mdt_object_remote(parent),
1232                          "Parent "DFID" is on remote server\n",
1233                          PFID(mdt_object_fid(parent)));
1234         }
1235         if (lname) {
1236                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1237                 if (rc != 0) {
1238                         if (rc > 0)
1239                                 rc = 0;
1240                         RETURN(rc);
1241                 }
1242         }
1243
1244         if (info->mti_cross_ref) {
1245                 /* Only getattr on the child. Parent is on another node. */
1246                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1247                 child = parent;
1248                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1249                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1250
1251                 if (is_resent) {
1252                         /* Do not take lock for resent case. */
1253                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1254                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1255                                  lhc->mlh_reg_lh.cookie);
1256                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1257                                                 &lock->l_resource->lr_name));
1258                         LDLM_LOCK_PUT(lock);
1259                         rc = 0;
1260                 } else {
1261                         mdt_lock_handle_init(lhc);
1262                         mdt_lock_reg_init(lhc, LCK_PR);
1263
1264                         /*
1265                          * Object's name is on another MDS, no lookup lock is
1266                          * needed here but update is.
1267                          */
1268                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1269                         child_bits |= MDS_INODELOCK_PERM | MDS_INODELOCK_UPDATE;
1270
1271                         rc = mdt_object_lock(info, child, lhc, child_bits,
1272                                              MDT_LOCAL_LOCK);
1273                 }
1274                 if (rc == 0) {
1275                         /* Finally, we can get attr for child. */
1276                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1277                                          BYPASS_CAPA);
1278                         rc = mdt_getattr_internal(info, child, 0);
1279                         if (unlikely(rc != 0))
1280                                 mdt_object_unlock(info, child, lhc, 1);
1281                 }
1282                 RETURN(rc);
1283         }
1284
1285         if (lname) {
1286                 /* step 1: lock parent only if parent is a directory */
1287                 if (S_ISDIR(lu_object_attr(&parent->mot_obj))) {
1288                         lhp = &info->mti_lh[MDT_LH_PARENT];
1289                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1290                         rc = mdt_object_lock(info, parent, lhp,
1291                                              MDS_INODELOCK_UPDATE,
1292                                              MDT_LOCAL_LOCK);
1293                         if (unlikely(rc != 0))
1294                                 RETURN(rc);
1295                 }
1296
1297                 /* step 2: lookup child's fid by name */
1298                 fid_zero(child_fid);
1299                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1300                                 &info->mti_spec);
1301
1302                 if (rc != 0) {
1303                         if (rc == -ENOENT)
1304                                 mdt_set_disposition(info, ldlm_rep,
1305                                                     DISP_LOOKUP_NEG);
1306                         GOTO(out_parent, rc);
1307                 } else
1308                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1309         } else {
1310                 *child_fid = reqbody->fid2;
1311                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1312         }
1313
1314         /*
1315          *step 3: find the child object by fid & lock it.
1316          *        regardless if it is local or remote.
1317          */
1318         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1319
1320         if (unlikely(IS_ERR(child)))
1321                 GOTO(out_parent, rc = PTR_ERR(child));
1322         if (is_resent) {
1323                 /* Do not take lock for resent case. */
1324                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1325                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1326                          lhc->mlh_reg_lh.cookie);
1327
1328                 res_id = &lock->l_resource->lr_name;
1329                 if (!fid_res_name_eq(mdt_object_fid(child),
1330                                     &lock->l_resource->lr_name)) {
1331                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1332                                                  &lock->l_resource->lr_name),
1333                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1334                                  (unsigned long)res_id->name[0],
1335                                  (unsigned long)res_id->name[1],
1336                                  (unsigned long)res_id->name[2],
1337                                  PFID(mdt_object_fid(parent)));
1338                           CWARN("Although resent, but still not get child lock"
1339                                 "parent:"DFID" child:"DFID"\n",
1340                                 PFID(mdt_object_fid(parent)),
1341                                 PFID(mdt_object_fid(child)));
1342                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1343                           LDLM_LOCK_PUT(lock);
1344                           GOTO(relock, 0);
1345                 }
1346                 LDLM_LOCK_PUT(lock);
1347                 rc = 0;
1348         } else {
1349                 bool try_layout = false;
1350
1351 relock:
1352                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1353                 mdt_lock_handle_init(lhc);
1354                 mdt_lock_reg_init(lhc, LCK_PR);
1355
1356                 if (!mdt_object_exists(child)) {
1357                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1358                                         &child->mot_obj,
1359                                         "Object doesn't exist!\n");
1360                         GOTO(out_child, rc = -ENOENT);
1361                 }
1362
1363                 if (!(child_bits & MDS_INODELOCK_UPDATE) &&
1364                       mdt_object_exists(child) && !mdt_object_remote(child)) {
1365                         struct md_attr *ma = &info->mti_attr;
1366
1367                         ma->ma_valid = 0;
1368                         ma->ma_need = MA_INODE;
1369                         rc = mdt_attr_get_complex(info, child, ma);
1370                         if (unlikely(rc != 0))
1371                                 GOTO(out_child, rc);
1372
1373                         /* If the file has not been changed for some time, we
1374                          * return not only a LOOKUP lock, but also an UPDATE
1375                          * lock and this might save us RPC on later STAT. For
1376                          * directories, it also let negative dentry starts
1377                          * working for this dir. */
1378                         if (ma->ma_valid & MA_INODE &&
1379                             ma->ma_attr.la_valid & LA_CTIME &&
1380                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1381                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1382                                 child_bits |= MDS_INODELOCK_UPDATE;
1383                 }
1384
1385                 /* layout lock must be granted in a best-effort way
1386                  * for IT operations */
1387                 LASSERT(!(child_bits & MDS_INODELOCK_LAYOUT));
1388                 if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_GETATTR) &&
1389                     exp_connect_layout(info->mti_exp) &&
1390                     S_ISREG(lu_object_attr(&child->mot_obj)) &&
1391                     ldlm_rep != NULL) {
1392                         /* try to grant layout lock for regular file. */
1393                         try_layout = true;
1394                 }
1395
1396                 rc = 0;
1397                 if (try_layout) {
1398                         child_bits |= MDS_INODELOCK_LAYOUT;
1399                         /* try layout lock, it may fail to be granted due to
1400                          * contention at LOOKUP or UPDATE */
1401                         if (!mdt_object_lock_try(info, child, lhc, child_bits,
1402                                                  MDT_CROSS_LOCK)) {
1403                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1404                                 LASSERT(child_bits != 0);
1405                                 rc = mdt_object_lock(info, child, lhc,
1406                                                 child_bits, MDT_CROSS_LOCK);
1407                         } else {
1408                                 ma_need |= MA_LOV;
1409                         }
1410                 } else {
1411                         rc = mdt_object_lock(info, child, lhc, child_bits,
1412                                                 MDT_CROSS_LOCK);
1413                 }
1414                 if (unlikely(rc != 0))
1415                         GOTO(out_child, rc);
1416         }
1417
1418         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1419         /* Get MA_SOM attributes if update lock is given. */
1420         if (lock &&
1421             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1422             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1423                 ma_need |= MA_SOM;
1424
1425         /* finally, we can get attr for child. */
1426         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1427         rc = mdt_getattr_internal(info, child, ma_need);
1428         if (unlikely(rc != 0)) {
1429                 mdt_object_unlock(info, child, lhc, 1);
1430         } else if (lock) {
1431                 /* Debugging code. */
1432                 res_id = &lock->l_resource->lr_name;
1433                 LDLM_DEBUG(lock, "Returning lock to client");
1434                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1435                                          &lock->l_resource->lr_name),
1436                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1437                          (unsigned long)res_id->name[0],
1438                          (unsigned long)res_id->name[1],
1439                          (unsigned long)res_id->name[2],
1440                          PFID(mdt_object_fid(child)));
1441                 if (mdt_object_exists(child) && !mdt_object_remote(child))
1442                         mdt_pack_size2body(info, child);
1443         }
1444         if (lock)
1445                 LDLM_LOCK_PUT(lock);
1446
1447         EXIT;
1448 out_child:
1449         mdt_object_put(info->mti_env, child);
1450 out_parent:
1451         if (lhp)
1452                 mdt_object_unlock(info, parent, lhp, 1);
1453         return rc;
1454 }
1455
1456 /* normal handler: should release the child lock */
1457 int mdt_getattr_name(struct mdt_thread_info *info)
1458 {
1459         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1460         struct mdt_body        *reqbody;
1461         struct mdt_body        *repbody;
1462         int rc, rc2;
1463         ENTRY;
1464
1465         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1466         LASSERT(reqbody != NULL);
1467         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1468         LASSERT(repbody != NULL);
1469
1470         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1471         repbody->eadatasize = 0;
1472         repbody->aclsize = 0;
1473
1474         rc = mdt_init_ucred(info, reqbody);
1475         if (unlikely(rc))
1476                 GOTO(out_shrink, rc);
1477
1478         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1479         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1480                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1481                 lhc->mlh_reg_lh.cookie = 0;
1482         }
1483         mdt_exit_ucred(info);
1484         EXIT;
1485 out_shrink:
1486         mdt_client_compatibility(info);
1487         rc2 = mdt_fix_reply(info);
1488         if (rc == 0)
1489                 rc = rc2;
1490         return rc;
1491 }
1492
1493 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1494                          void *karg, void *uarg);
1495
1496 int mdt_set_info(struct mdt_thread_info *info)
1497 {
1498         struct ptlrpc_request *req = mdt_info_req(info);
1499         char *key;
1500         void *val;
1501         int keylen, vallen, rc = 0;
1502         ENTRY;
1503
1504         rc = req_capsule_server_pack(info->mti_pill);
1505         if (rc)
1506                 RETURN(rc);
1507
1508         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1509         if (key == NULL) {
1510                 DEBUG_REQ(D_HA, req, "no set_info key");
1511                 RETURN(-EFAULT);
1512         }
1513
1514         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1515                                       RCL_CLIENT);
1516
1517         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1518         if (val == NULL) {
1519                 DEBUG_REQ(D_HA, req, "no set_info val");
1520                 RETURN(-EFAULT);
1521         }
1522
1523         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1524                                       RCL_CLIENT);
1525
1526         /* Swab any part of val you need to here */
1527         if (KEY_IS(KEY_READ_ONLY)) {
1528                 req->rq_status = 0;
1529                 lustre_msg_set_status(req->rq_repmsg, 0);
1530
1531                 spin_lock(&req->rq_export->exp_lock);
1532                 if (*(__u32 *)val)
1533                         *exp_connect_flags_ptr(req->rq_export) |=
1534                                 OBD_CONNECT_RDONLY;
1535                 else
1536                         *exp_connect_flags_ptr(req->rq_export) &=
1537                                 ~OBD_CONNECT_RDONLY;
1538                 spin_unlock(&req->rq_export->exp_lock);
1539
1540         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1541                 struct changelog_setinfo *cs =
1542                         (struct changelog_setinfo *)val;
1543                 if (vallen != sizeof(*cs)) {
1544                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1545                         RETURN(-EINVAL);
1546                 }
1547                 if (ptlrpc_req_need_swab(req)) {
1548                         __swab64s(&cs->cs_recno);
1549                         __swab32s(&cs->cs_id);
1550                 }
1551
1552                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1553                                    vallen, val, NULL);
1554                 lustre_msg_set_status(req->rq_repmsg, rc);
1555
1556         } else {
1557                 RETURN(-EINVAL);
1558         }
1559         RETURN(0);
1560 }
1561
1562 /**
1563  * Top-level handler for MDT connection requests.
1564  */
1565 int mdt_connect(struct mdt_thread_info *info)
1566 {
1567         int rc;
1568         struct obd_connect_data *reply;
1569         struct obd_export *exp;
1570         struct ptlrpc_request *req = mdt_info_req(info);
1571
1572         rc = target_handle_connect(req);
1573         if (rc != 0)
1574                 return err_serious(rc);
1575
1576         LASSERT(req->rq_export != NULL);
1577         info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1578         rc = mdt_init_sec_level(info);
1579         if (rc != 0) {
1580                 obd_disconnect(class_export_get(req->rq_export));
1581                 return rc;
1582         }
1583
1584         /* To avoid exposing partially initialized connection flags, changes up
1585          * to this point have been staged in reply->ocd_connect_flags. Now that
1586          * connection handling has completed successfully, atomically update
1587          * the connect flags in the shared export data structure. LU-1623 */
1588         reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
1589         exp = req->rq_export;
1590         spin_lock(&exp->exp_lock);
1591         *exp_connect_flags_ptr(exp) = reply->ocd_connect_flags;
1592         spin_unlock(&exp->exp_lock);
1593
1594         rc = mdt_init_idmap(info);
1595         if (rc != 0)
1596                 obd_disconnect(class_export_get(req->rq_export));
1597
1598         return rc;
1599 }
1600
1601 int mdt_disconnect(struct mdt_thread_info *info)
1602 {
1603         int rc;
1604         ENTRY;
1605
1606         rc = target_handle_disconnect(mdt_info_req(info));
1607         if (rc)
1608                 rc = err_serious(rc);
1609         RETURN(rc);
1610 }
1611
1612 static int mdt_sendpage(struct mdt_thread_info *info,
1613                         struct lu_rdpg *rdpg, int nob)
1614 {
1615         struct ptlrpc_request   *req = mdt_info_req(info);
1616         struct obd_export       *exp = req->rq_export;
1617         struct ptlrpc_bulk_desc *desc;
1618         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1619         int                      tmpcount;
1620         int                      tmpsize;
1621         int                      i;
1622         int                      rc;
1623         ENTRY;
1624
1625         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, 1, BULK_PUT_SOURCE,
1626                                     MDS_BULK_PORTAL);
1627         if (desc == NULL)
1628                 RETURN(-ENOMEM);
1629
1630         if (!(exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE))
1631                 /* old client requires reply size in it's PAGE_SIZE,
1632                  * which is rdpg->rp_count */
1633                 nob = rdpg->rp_count;
1634
1635         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1636              i++, tmpcount -= tmpsize) {
1637                 tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
1638                 ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
1639         }
1640
1641         LASSERT(desc->bd_nob == nob);
1642         rc = target_bulk_io(exp, desc, lwi);
1643         ptlrpc_free_bulk_pin(desc);
1644         RETURN(rc);
1645 }
1646
1647 int mdt_readpage(struct mdt_thread_info *info)
1648 {
1649         struct mdt_object *object = info->mti_object;
1650         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1651         struct mdt_body   *reqbody;
1652         struct mdt_body   *repbody;
1653         int                rc;
1654         int                i;
1655         ENTRY;
1656
1657         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1658                 RETURN(err_serious(-ENOMEM));
1659
1660         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1661         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1662         if (reqbody == NULL || repbody == NULL)
1663                 RETURN(err_serious(-EFAULT));
1664
1665         /*
1666          * prepare @rdpg before calling lower layers and transfer itself. Here
1667          * reqbody->size contains offset of where to start to read and
1668          * reqbody->nlink contains number bytes to read.
1669          */
1670         rdpg->rp_hash = reqbody->size;
1671         if (rdpg->rp_hash != reqbody->size) {
1672                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1673                        rdpg->rp_hash, reqbody->size);
1674                 RETURN(-EFAULT);
1675         }
1676
1677         rdpg->rp_attrs = reqbody->mode;
1678         if (exp_connect_flags(info->mti_exp) & OBD_CONNECT_64BITHASH)
1679                 rdpg->rp_attrs |= LUDA_64BITHASH;
1680         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1681                                 exp_max_brw_size(info->mti_exp));
1682         rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >>
1683                           PAGE_CACHE_SHIFT;
1684         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1685         if (rdpg->rp_pages == NULL)
1686                 RETURN(-ENOMEM);
1687
1688         for (i = 0; i < rdpg->rp_npages; ++i) {
1689                 rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
1690                 if (rdpg->rp_pages[i] == NULL)
1691                         GOTO(free_rdpg, rc = -ENOMEM);
1692         }
1693
1694         /* call lower layers to fill allocated pages with directory data */
1695         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1696         if (rc < 0)
1697                 GOTO(free_rdpg, rc);
1698
1699         /* send pages to client */
1700         rc = mdt_sendpage(info, rdpg, rc);
1701
1702         EXIT;
1703 free_rdpg:
1704
1705         for (i = 0; i < rdpg->rp_npages; i++)
1706                 if (rdpg->rp_pages[i] != NULL)
1707                         __free_page(rdpg->rp_pages[i]);
1708         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1709
1710         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1711                 RETURN(0);
1712
1713         return rc;
1714 }
1715
1716 static int mdt_reint_internal(struct mdt_thread_info *info,
1717                               struct mdt_lock_handle *lhc,
1718                               __u32 op)
1719 {
1720         struct req_capsule      *pill = info->mti_pill;
1721         struct mdt_body         *repbody;
1722         int                      rc = 0, rc2;
1723         ENTRY;
1724
1725
1726         rc = mdt_reint_unpack(info, op);
1727         if (rc != 0) {
1728                 CERROR("Can't unpack reint, rc %d\n", rc);
1729                 RETURN(err_serious(rc));
1730         }
1731
1732         /* for replay (no_create) lmm is not needed, client has it already */
1733         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1734                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1735                                      info->mti_rr.rr_eadatalen);
1736
1737         /* llog cookies are always 0, the field is kept for compatibility */
1738         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1739                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER, 0);
1740
1741         rc = req_capsule_server_pack(pill);
1742         if (rc != 0) {
1743                 CERROR("Can't pack response, rc %d\n", rc);
1744                 RETURN(err_serious(rc));
1745         }
1746
1747         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1748                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1749                 LASSERT(repbody);
1750                 repbody->eadatasize = 0;
1751                 repbody->aclsize = 0;
1752         }
1753
1754         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1755
1756         /* for replay no cookkie / lmm need, because client have this already */
1757         if (info->mti_spec.no_create)
1758                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1759                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1760
1761         rc = mdt_init_ucred_reint(info);
1762         if (rc)
1763                 GOTO(out_shrink, rc);
1764
1765         rc = mdt_fix_attr_ucred(info, op);
1766         if (rc != 0)
1767                 GOTO(out_ucred, rc = err_serious(rc));
1768
1769         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1770                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1771                 GOTO(out_ucred, rc);
1772         }
1773         rc = mdt_reint_rec(info, lhc);
1774         EXIT;
1775 out_ucred:
1776         mdt_exit_ucred(info);
1777 out_shrink:
1778         mdt_client_compatibility(info);
1779         rc2 = mdt_fix_reply(info);
1780         if (rc == 0)
1781                 rc = rc2;
1782         return rc;
1783 }
1784
1785 static long mdt_reint_opcode(struct mdt_thread_info *info,
1786                              const struct req_format **fmt)
1787 {
1788         struct mdt_rec_reint *rec;
1789         long opc;
1790
1791         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1792         if (rec != NULL) {
1793                 opc = rec->rr_opcode;
1794                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1795                 if (opc < REINT_MAX && fmt[opc] != NULL)
1796                         req_capsule_extend(info->mti_pill, fmt[opc]);
1797                 else {
1798                         CERROR("%s: Unsupported opcode '%ld' from client '%s': "
1799                                "rc = %d\n", mdt_obd_name(info->mti_mdt), opc,
1800                                info->mti_mdt->mdt_ldlm_client->cli_name,
1801                                -EFAULT);
1802                         opc = err_serious(-EFAULT);
1803                 }
1804         } else {
1805                 opc = err_serious(-EFAULT);
1806         }
1807         return opc;
1808 }
1809
1810 int mdt_reint(struct mdt_thread_info *info)
1811 {
1812         long opc;
1813         int  rc;
1814
1815         static const struct req_format *reint_fmts[REINT_MAX] = {
1816                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1817                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1818                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1819                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1820                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1821                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1822                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR,
1823                 [REINT_RMENTRY] = &RQF_MDS_REINT_UNLINK
1824         };
1825
1826         ENTRY;
1827
1828         opc = mdt_reint_opcode(info, reint_fmts);
1829         if (opc >= 0) {
1830                 /*
1831                  * No lock possible here from client to pass it to reint code
1832                  * path.
1833                  */
1834                 rc = mdt_reint_internal(info, NULL, opc);
1835         } else {
1836                 rc = opc;
1837         }
1838
1839         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1840         RETURN(rc);
1841 }
1842
1843 /* this should sync the whole device */
1844 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1845 {
1846         struct dt_device *dt = mdt->mdt_bottom;
1847         int rc;
1848         ENTRY;
1849
1850         rc = dt->dd_ops->dt_sync(env, dt);
1851         RETURN(rc);
1852 }
1853
1854 /* this should sync this object */
1855 static int mdt_object_sync(struct mdt_thread_info *info)
1856 {
1857         struct md_object *next;
1858         int rc;
1859         ENTRY;
1860
1861         if (!mdt_object_exists(info->mti_object)) {
1862                 CWARN("Non existing object  "DFID"!\n",
1863                       PFID(mdt_object_fid(info->mti_object)));
1864                 RETURN(-ESTALE);
1865         }
1866         next = mdt_object_child(info->mti_object);
1867         rc = mo_object_sync(info->mti_env, next);
1868
1869         RETURN(rc);
1870 }
1871
1872 int mdt_sync(struct mdt_thread_info *info)
1873 {
1874         struct ptlrpc_request *req = mdt_info_req(info);
1875         struct req_capsule *pill = info->mti_pill;
1876         struct mdt_body *body;
1877         int rc;
1878         ENTRY;
1879
1880         /* The fid may be zero, so we req_capsule_set manually */
1881         req_capsule_set(pill, &RQF_MDS_SYNC);
1882
1883         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1884         if (body == NULL)
1885                 RETURN(err_serious(-EINVAL));
1886
1887         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1888                 RETURN(err_serious(-ENOMEM));
1889
1890         if (fid_seq(&body->fid1) == 0) {
1891                 /* sync the whole device */
1892                 rc = req_capsule_server_pack(pill);
1893                 if (rc == 0)
1894                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1895                 else
1896                         rc = err_serious(rc);
1897         } else {
1898                 /* sync an object */
1899                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1900                 if (rc == 0) {
1901                         rc = mdt_object_sync(info);
1902                         if (rc == 0) {
1903                                 const struct lu_fid *fid;
1904                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1905
1906                                 info->mti_attr.ma_need = MA_INODE;
1907                                 info->mti_attr.ma_valid = 0;
1908                                 rc = mdt_attr_get_complex(info, info->mti_object,
1909                                                           &info->mti_attr);
1910                                 if (rc == 0) {
1911                                         body = req_capsule_server_get(pill,
1912                                                                 &RMF_MDT_BODY);
1913                                         fid = mdt_object_fid(info->mti_object);
1914                                         mdt_pack_attr2body(info, body, la, fid);
1915                                 }
1916                         }
1917                 } else
1918                         rc = err_serious(rc);
1919         }
1920         if (rc == 0)
1921                 mdt_counter_incr(req, LPROC_MDT_SYNC);
1922
1923         RETURN(rc);
1924 }
1925
1926 /*
1927  * Quotacheck handler.
1928  * in-kernel quotacheck isn't supported any more.
1929  */
1930 int mdt_quotacheck(struct mdt_thread_info *info)
1931 {
1932         struct obd_quotactl     *oqctl;
1933         int                      rc;
1934         ENTRY;
1935
1936         oqctl = req_capsule_client_get(info->mti_pill, &RMF_OBD_QUOTACTL);
1937         if (oqctl == NULL)
1938                 RETURN(err_serious(-EPROTO));
1939
1940         rc = req_capsule_server_pack(info->mti_pill);
1941         if (rc)
1942                 RETURN(err_serious(rc));
1943
1944         /* deprecated, not used any more */
1945         RETURN(-EOPNOTSUPP);
1946 }
1947
1948 /*
1949  * Handle quota control requests to consult current usage/limit, but also
1950  * to configure quota enforcement
1951  */
1952 int mdt_quotactl(struct mdt_thread_info *info)
1953 {
1954         struct obd_export       *exp  = info->mti_exp;
1955         struct req_capsule      *pill = info->mti_pill;
1956         struct obd_quotactl     *oqctl, *repoqc;
1957         int                      id, rc;
1958         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
1959         ENTRY;
1960
1961         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1962         if (oqctl == NULL)
1963                 RETURN(err_serious(-EPROTO));
1964
1965         rc = req_capsule_server_pack(pill);
1966         if (rc)
1967                 RETURN(err_serious(rc));
1968
1969         switch (oqctl->qc_cmd) {
1970         case Q_QUOTACHECK:
1971         case LUSTRE_Q_INVALIDATE:
1972         case LUSTRE_Q_FINVALIDATE:
1973         case Q_QUOTAON:
1974         case Q_QUOTAOFF:
1975         case Q_INITQUOTA:
1976                 /* deprecated, not used any more */
1977                 RETURN(-EOPNOTSUPP);
1978                 /* master quotactl */
1979         case Q_GETINFO:
1980         case Q_SETINFO:
1981         case Q_SETQUOTA:
1982         case Q_GETQUOTA:
1983                 if (qmt == NULL)
1984                         RETURN(-EOPNOTSUPP);
1985                 /* slave quotactl */
1986         case Q_GETOINFO:
1987         case Q_GETOQUOTA:
1988                 break;
1989         default:
1990                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
1991                 RETURN(-EFAULT);
1992         }
1993
1994         /* map uid/gid for remote client */
1995         id = oqctl->qc_id;
1996         if (exp_connect_rmtclient(exp)) {
1997                 struct lustre_idmap_table *idmap;
1998
1999                 idmap = mdt_req2med(mdt_info_req(info))->med_idmap;
2000
2001                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
2002                              oqctl->qc_cmd != Q_GETINFO))
2003                         RETURN(-EPERM);
2004
2005                 if (oqctl->qc_type == USRQUOTA)
2006                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
2007                                                      oqctl->qc_id);
2008                 else if (oqctl->qc_type == GRPQUOTA)
2009                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
2010                                                      oqctl->qc_id);
2011                 else
2012                         RETURN(-EINVAL);
2013
2014                 if (id == CFS_IDMAP_NOTFOUND) {
2015                         CDEBUG(D_QUOTA, "no mapping for id %u\n", oqctl->qc_id);
2016                         RETURN(-EACCES);
2017                 }
2018         }
2019
2020         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2021         if (repoqc == NULL)
2022                 RETURN(err_serious(-EFAULT));
2023
2024         if (oqctl->qc_id != id)
2025                 swap(oqctl->qc_id, id);
2026
2027         switch (oqctl->qc_cmd) {
2028
2029         case Q_GETINFO:
2030         case Q_SETINFO:
2031         case Q_SETQUOTA:
2032         case Q_GETQUOTA:
2033                 /* forward quotactl request to QMT */
2034                 rc = qmt_hdls.qmth_quotactl(info->mti_env, qmt, oqctl);
2035                 break;
2036
2037         case Q_GETOINFO:
2038         case Q_GETOQUOTA:
2039                 /* slave quotactl */
2040                 rc = lquotactl_slv(info->mti_env, info->mti_mdt->mdt_bottom,
2041                                    oqctl);
2042                 break;
2043
2044         default:
2045                 CERROR("Unsupported quotactl command: %d\n", oqctl->qc_cmd);
2046                 RETURN(-EFAULT);
2047         }
2048
2049         if (oqctl->qc_id != id)
2050                 swap(oqctl->qc_id, id);
2051
2052         *repoqc = *oqctl;
2053         RETURN(rc);
2054 }
2055
2056 /*
2057  * OBD PING and other handlers.
2058  */
2059 int mdt_obd_ping(struct mdt_thread_info *info)
2060 {
2061         int rc;
2062         ENTRY;
2063
2064         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2065
2066         rc = target_handle_ping(mdt_info_req(info));
2067         if (rc < 0)
2068                 rc = err_serious(rc);
2069         RETURN(rc);
2070 }
2071
2072 /*
2073  * OBD_IDX_READ handler
2074  */
2075 int mdt_obd_idx_read(struct mdt_thread_info *info)
2076 {
2077         struct mdt_device       *mdt = info->mti_mdt;
2078         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2079         struct idx_info         *req_ii, *rep_ii;
2080         int                      rc, i;
2081         ENTRY;
2082
2083         memset(rdpg, 0, sizeof(*rdpg));
2084         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2085
2086         /* extract idx_info buffer from request & reply */
2087         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2088         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2089                 RETURN(err_serious(-EPROTO));
2090
2091         rc = req_capsule_server_pack(info->mti_pill);
2092         if (rc)
2093                 RETURN(err_serious(rc));
2094
2095         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2096         if (rep_ii == NULL)
2097                 RETURN(err_serious(-EFAULT));
2098         rep_ii->ii_magic = IDX_INFO_MAGIC;
2099
2100         /* extract hash to start with */
2101         rdpg->rp_hash = req_ii->ii_hash_start;
2102
2103         /* extract requested attributes */
2104         rdpg->rp_attrs = req_ii->ii_attrs;
2105
2106         /* check that fid packed in request is valid and supported */
2107         if (!fid_is_sane(&req_ii->ii_fid))
2108                 RETURN(-EINVAL);
2109         rep_ii->ii_fid = req_ii->ii_fid;
2110
2111         /* copy flags */
2112         rep_ii->ii_flags = req_ii->ii_flags;
2113
2114         /* compute number of pages to allocate, ii_count is the number of 4KB
2115          * containers */
2116         if (req_ii->ii_count <= 0)
2117                 GOTO(out, rc = -EFAULT);
2118         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2119                                exp_max_brw_size(info->mti_exp));
2120         rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >>
2121                                 PAGE_CACHE_SHIFT;
2122
2123         /* allocate pages to store the containers */
2124         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2125         if (rdpg->rp_pages == NULL)
2126                 GOTO(out, rc = -ENOMEM);
2127         for (i = 0; i < rdpg->rp_npages; i++) {
2128                 rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
2129                 if (rdpg->rp_pages[i] == NULL)
2130                         GOTO(out, rc = -ENOMEM);
2131         }
2132
2133         /* populate pages with key/record pairs */
2134         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2135         if (rc < 0)
2136                 GOTO(out, rc);
2137
2138         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2139                  "asked %d > %d\n", rc, rdpg->rp_count);
2140
2141         /* send pages to client */
2142         rc = mdt_sendpage(info, rdpg, rc);
2143
2144         GOTO(out, rc);
2145 out:
2146         if (rdpg->rp_pages) {
2147                 for (i = 0; i < rdpg->rp_npages; i++)
2148                         if (rdpg->rp_pages[i])
2149                                 __free_page(rdpg->rp_pages[i]);
2150                 OBD_FREE(rdpg->rp_pages,
2151                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2152         }
2153         return rc;
2154 }
2155
2156 int mdt_obd_log_cancel(struct mdt_thread_info *info)
2157 {
2158         return err_serious(-EOPNOTSUPP);
2159 }
2160
2161 int mdt_obd_qc_callback(struct mdt_thread_info *info)
2162 {
2163         return err_serious(-EOPNOTSUPP);
2164 }
2165
2166 /*
2167  * LLOG handlers.
2168  */
2169
2170 /** clone llog ctxt from child (mdd)
2171  * This allows remote llog (replicator) access.
2172  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2173  * context was originally set up, or we can handle them directly.
2174  * I choose the latter, but that means I need any llog
2175  * contexts set up by child to be accessable by the mdt.  So we clone the
2176  * context into our context list here.
2177  */
2178 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2179                                int idx)
2180 {
2181         struct md_device  *next = mdt->mdt_child;
2182         struct llog_ctxt *ctxt;
2183         int rc;
2184
2185         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2186                 return 0;
2187
2188         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2189         if (rc || ctxt == NULL) {
2190                 return 0;
2191         }
2192
2193         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2194         if (rc)
2195                 CERROR("Can't set mdt ctxt %d\n", rc);
2196
2197         return rc;
2198 }
2199
2200 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2201                                  struct mdt_device *mdt, int idx)
2202 {
2203         struct llog_ctxt *ctxt;
2204
2205         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2206         if (ctxt == NULL)
2207                 return 0;
2208         /* Put once for the get we just did, and once for the clone */
2209         llog_ctxt_put(ctxt);
2210         llog_ctxt_put(ctxt);
2211         return 0;
2212 }
2213
2214 int mdt_llog_create(struct mdt_thread_info *info)
2215 {
2216         int rc;
2217
2218         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2219         rc = llog_origin_handle_open(mdt_info_req(info));
2220         return (rc < 0 ? err_serious(rc) : rc);
2221 }
2222
2223 int mdt_llog_destroy(struct mdt_thread_info *info)
2224 {
2225         int rc;
2226
2227         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2228         rc = llog_origin_handle_destroy(mdt_info_req(info));
2229         return (rc < 0 ? err_serious(rc) : rc);
2230 }
2231
2232 int mdt_llog_read_header(struct mdt_thread_info *info)
2233 {
2234         int rc;
2235
2236         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2237         rc = llog_origin_handle_read_header(mdt_info_req(info));
2238         return (rc < 0 ? err_serious(rc) : rc);
2239 }
2240
2241 int mdt_llog_next_block(struct mdt_thread_info *info)
2242 {
2243         int rc;
2244
2245         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2246         rc = llog_origin_handle_next_block(mdt_info_req(info));
2247         return (rc < 0 ? err_serious(rc) : rc);
2248 }
2249
2250 int mdt_llog_prev_block(struct mdt_thread_info *info)
2251 {
2252         int rc;
2253
2254         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2255         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2256         return (rc < 0 ? err_serious(rc) : rc);
2257 }
2258
2259
2260 /*
2261  * DLM handlers.
2262  */
2263
2264 static struct ldlm_callback_suite cbs = {
2265         .lcs_completion = ldlm_server_completion_ast,
2266         .lcs_blocking   = ldlm_server_blocking_ast,
2267         .lcs_glimpse    = ldlm_server_glimpse_ast
2268 };
2269
2270 int mdt_enqueue(struct mdt_thread_info *info)
2271 {
2272         struct ptlrpc_request *req;
2273         int rc;
2274
2275         /*
2276          * info->mti_dlm_req already contains swapped and (if necessary)
2277          * converted dlm request.
2278          */
2279         LASSERT(info->mti_dlm_req != NULL);
2280
2281         req = mdt_info_req(info);
2282         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2283                                   req, info->mti_dlm_req, &cbs);
2284         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2285         return rc ? err_serious(rc) : req->rq_status;
2286 }
2287
2288 int mdt_convert(struct mdt_thread_info *info)
2289 {
2290         int rc;
2291         struct ptlrpc_request *req;
2292
2293         LASSERT(info->mti_dlm_req);
2294         req = mdt_info_req(info);
2295         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2296         return rc ? err_serious(rc) : req->rq_status;
2297 }
2298
2299 int mdt_bl_callback(struct mdt_thread_info *info)
2300 {
2301         CERROR("bl callbacks should not happen on MDS\n");
2302         LBUG();
2303         return err_serious(-EOPNOTSUPP);
2304 }
2305
2306 int mdt_cp_callback(struct mdt_thread_info *info)
2307 {
2308         CERROR("cp callbacks should not happen on MDS\n");
2309         LBUG();
2310         return err_serious(-EOPNOTSUPP);
2311 }
2312
2313 /*
2314  * sec context handlers
2315  */
2316 int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2317 {
2318         int rc;
2319
2320         rc = mdt_handle_idmap(info);
2321
2322         if (unlikely(rc)) {
2323                 struct ptlrpc_request *req = mdt_info_req(info);
2324                 __u32                  opc;
2325
2326                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2327                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2328                         sptlrpc_svc_ctx_invalidate(req);
2329         }
2330
2331         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2332
2333         return rc;
2334 }
2335
2336 /*
2337  * quota request handlers
2338  */
2339 int mdt_quota_dqacq(struct mdt_thread_info *info)
2340 {
2341         struct lu_device        *qmt = info->mti_mdt->mdt_qmt_dev;
2342         int                      rc;
2343         ENTRY;
2344
2345         if (qmt == NULL)
2346                 RETURN(err_serious(-EOPNOTSUPP));
2347
2348         rc = qmt_hdls.qmth_dqacq(info->mti_env, qmt, mdt_info_req(info));
2349         RETURN(rc);
2350 }
2351
2352 static struct mdt_object *mdt_obj(struct lu_object *o)
2353 {
2354         LASSERT(lu_device_is_mdt(o->lo_dev));
2355         return container_of0(o, struct mdt_object, mot_obj);
2356 }
2357
2358 struct mdt_object *mdt_object_new(const struct lu_env *env,
2359                                   struct mdt_device *d,
2360                                   const struct lu_fid *f)
2361 {
2362         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2363         struct lu_object *o;
2364         struct mdt_object *m;
2365         ENTRY;
2366
2367         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2368         o = lu_object_find(env, &d->mdt_lu_dev, f, &conf);
2369         if (unlikely(IS_ERR(o)))
2370                 m = (struct mdt_object *)o;
2371         else
2372                 m = mdt_obj(o);
2373         RETURN(m);
2374 }
2375
2376 struct mdt_object *mdt_object_find(const struct lu_env *env,
2377                                    struct mdt_device *d,
2378                                    const struct lu_fid *f)
2379 {
2380         struct lu_object *o;
2381         struct mdt_object *m;
2382         ENTRY;
2383
2384         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2385         o = lu_object_find(env, &d->mdt_lu_dev, f, NULL);
2386         if (unlikely(IS_ERR(o)))
2387                 m = (struct mdt_object *)o;
2388         else
2389                 m = mdt_obj(o);
2390
2391         RETURN(m);
2392 }
2393
2394 /**
2395  * Asyncronous commit for mdt device.
2396  *
2397  * Pass asynchonous commit call down the MDS stack.
2398  *
2399  * \param env environment
2400  * \param mdt the mdt device
2401  */
2402 static void mdt_device_commit_async(const struct lu_env *env,
2403                                     struct mdt_device *mdt)
2404 {
2405         struct dt_device *dt = mdt->mdt_bottom;
2406         int rc;
2407
2408         rc = dt->dd_ops->dt_commit_async(env, dt);
2409         if (unlikely(rc != 0))
2410                 CWARN("async commit start failed with rc = %d", rc);
2411 }
2412
2413 /**
2414  * Mark the lock as "synchonous".
2415  *
2416  * Mark the lock to deffer transaction commit to the unlock time.
2417  *
2418  * \param lock the lock to mark as "synchonous"
2419  *
2420  * \see mdt_is_lock_sync
2421  * \see mdt_save_lock
2422  */
2423 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2424 {
2425         lock->l_ast_data = (void*)1;
2426 }
2427
2428 /**
2429  * Check whehter the lock "synchonous" or not.
2430  *
2431  * \param lock the lock to check
2432  * \retval 1 the lock is "synchonous"
2433  * \retval 0 the lock isn't "synchronous"
2434  *
2435  * \see mdt_set_lock_sync
2436  * \see mdt_save_lock
2437  */
2438 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2439 {
2440         return lock->l_ast_data != NULL;
2441 }
2442
2443 /**
2444  * Blocking AST for mdt locks.
2445  *
2446  * Starts transaction commit if in case of COS lock conflict or
2447  * deffers such a commit to the mdt_save_lock.
2448  *
2449  * \param lock the lock which blocks a request or cancelling lock
2450  * \param desc unused
2451  * \param data unused
2452  * \param flag indicates whether this cancelling or blocking callback
2453  * \retval 0
2454  * \see ldlm_blocking_ast_nocheck
2455  */
2456 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2457                      void *data, int flag)
2458 {
2459         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2460         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2461         int rc;
2462         ENTRY;
2463
2464         if (flag == LDLM_CB_CANCELING)
2465                 RETURN(0);
2466         lock_res_and_lock(lock);
2467         if (lock->l_blocking_ast != mdt_blocking_ast) {
2468                 unlock_res_and_lock(lock);
2469                 RETURN(0);
2470         }
2471         if (mdt_cos_is_enabled(mdt) &&
2472             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2473             lock->l_blocking_lock != NULL &&
2474             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2475                 mdt_set_lock_sync(lock);
2476         }
2477         rc = ldlm_blocking_ast_nocheck(lock);
2478
2479         /* There is no lock conflict if l_blocking_lock == NULL,
2480          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2481          * when the last reference to a local lock was released */
2482         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2483                 struct lu_env env;
2484
2485                 rc = lu_env_init(&env, LCT_LOCAL);
2486                 if (unlikely(rc != 0))
2487                         CWARN("lu_env initialization failed with rc = %d,"
2488                               "cannot start asynchronous commit\n", rc);
2489                 else
2490                         mdt_device_commit_async(&env, mdt);
2491                 lu_env_fini(&env);
2492         }
2493         RETURN(rc);
2494 }
2495
2496 int mdt_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2497                         void *data, int flag)
2498 {
2499         struct lustre_handle lockh;
2500         int               rc;
2501
2502         switch (flag) {
2503         case LDLM_CB_BLOCKING:
2504                 ldlm_lock2handle(lock, &lockh);
2505                 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
2506                 if (rc < 0) {
2507                         CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2508                         RETURN(rc);
2509                 }
2510                 break;
2511         case LDLM_CB_CANCELING:
2512                 LDLM_DEBUG(lock, "Revoke remote lock\n");
2513                 break;
2514         default:
2515                 LBUG();
2516         }
2517         RETURN(0);
2518 }
2519
2520 int mdt_remote_object_lock(struct mdt_thread_info *mti,
2521                            struct mdt_object *o, struct lustre_handle *lh,
2522                            ldlm_mode_t mode, __u64 ibits)
2523 {
2524         struct ldlm_enqueue_info *einfo = &mti->mti_einfo;
2525         ldlm_policy_data_t *policy = &mti->mti_policy;
2526         int rc = 0;
2527         ENTRY;
2528
2529         LASSERT(mdt_object_remote(o));
2530
2531         LASSERT((ibits & MDS_INODELOCK_UPDATE));
2532
2533         memset(einfo, 0, sizeof(*einfo));
2534         einfo->ei_type = LDLM_IBITS;
2535         einfo->ei_mode = mode;
2536         einfo->ei_cb_bl = mdt_md_blocking_ast;
2537         einfo->ei_cb_cp = ldlm_completion_ast;
2538
2539         memset(policy, 0, sizeof(*policy));
2540         policy->l_inodebits.bits = ibits;
2541
2542         rc = mo_object_lock(mti->mti_env, mdt_object_child(o), lh, einfo,
2543                             policy);
2544         RETURN(rc);
2545 }
2546
2547 static int mdt_object_lock0(struct mdt_thread_info *info, struct mdt_object *o,
2548                             struct mdt_lock_handle *lh, __u64 ibits,
2549                             bool nonblock, int locality)
2550 {
2551         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2552         ldlm_policy_data_t *policy = &info->mti_policy;
2553         struct ldlm_res_id *res_id = &info->mti_res_id;
2554         __u64 dlmflags;
2555         int rc;
2556         ENTRY;
2557
2558         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2559         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2560         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2561         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2562
2563         if (mdt_object_remote(o)) {
2564                 if (locality == MDT_CROSS_LOCK) {
2565                         ibits &= ~(MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM);
2566                         ibits |= MDS_INODELOCK_LOOKUP;
2567                 } else {
2568                         LASSERTF(!(ibits &
2569                                   (MDS_INODELOCK_UPDATE | MDS_INODELOCK_PERM)),
2570                                 "%s: wrong bit "LPX64" for remote obj "DFID"\n",
2571                                 mdt_obd_name(info->mti_mdt), ibits,
2572                                 PFID(mdt_object_fid(o)));
2573                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2574                 }
2575                 /* No PDO lock on remote object */
2576                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2577         }
2578
2579         if (lh->mlh_type == MDT_PDO_LOCK) {
2580                 /* check for exists after object is locked */
2581                 if (mdt_object_exists(o) == 0) {
2582                         /* Non-existent object shouldn't have PDO lock */
2583                         RETURN(-ESTALE);
2584                 } else {
2585                         /* Non-dir object shouldn't have PDO lock */
2586                         if (!S_ISDIR(lu_object_attr(&o->mot_obj)))
2587                                 RETURN(-ENOTDIR);
2588                 }
2589         }
2590
2591         memset(policy, 0, sizeof(*policy));
2592         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2593
2594         dlmflags = LDLM_FL_ATOMIC_CB;
2595         if (nonblock)
2596                 dlmflags |= LDLM_FL_BLOCK_NOWAIT;
2597
2598         /*
2599          * Take PDO lock on whole directory and build correct @res_id for lock
2600          * on part of directory.
2601          */
2602         if (lh->mlh_pdo_hash != 0) {
2603                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2604                 mdt_lock_pdo_mode(info, o, lh);
2605                 if (lh->mlh_pdo_mode != LCK_NL) {
2606                         /*
2607                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2608                          * is never going to be sent to client and we do not
2609                          * want it slowed down due to possible cancels.
2610                          */
2611                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2612                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2613                                           policy, res_id, dlmflags,
2614                                           &info->mti_exp->exp_handle.h_cookie);
2615                         if (unlikely(rc))
2616                                 RETURN(rc);
2617                 }
2618
2619                 /*
2620                  * Finish res_id initializing by name hash marking part of
2621                  * directory which is taking modification.
2622                  */
2623                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2624         }
2625
2626         policy->l_inodebits.bits = ibits;
2627
2628         /*
2629          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2630          * going to be sent to client. If it is - mdt_intent_policy() path will
2631          * fix it up and turn FL_LOCAL flag off.
2632          */
2633         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2634                           res_id, LDLM_FL_LOCAL_ONLY | dlmflags,
2635                           &info->mti_exp->exp_handle.h_cookie);
2636         if (rc)
2637                 mdt_object_unlock(info, o, lh, 1);
2638         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2639                  lh->mlh_pdo_hash != 0 &&
2640                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2641                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2642         }
2643
2644         RETURN(rc);
2645 }
2646
2647 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2648                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2649 {
2650         return mdt_object_lock0(info, o, lh, ibits, false, locality);
2651 }
2652
2653 int mdt_object_lock_try(struct mdt_thread_info *info, struct mdt_object *o,
2654                         struct mdt_lock_handle *lh, __u64 ibits, int locality)
2655 {
2656         struct mdt_lock_handle tmp = *lh;
2657         int rc;
2658
2659         rc = mdt_object_lock0(info, o, &tmp, ibits, true, locality);
2660         if (rc == 0)
2661                 *lh = tmp;
2662
2663         return rc == 0;
2664 }
2665
2666 /**
2667  * Save a lock within request object.
2668  *
2669  * Keep the lock referenced until whether client ACK or transaction
2670  * commit happens or release the lock immediately depending on input
2671  * parameters. If COS is ON, a write lock is converted to COS lock
2672  * before saving.
2673  *
2674  * \param info thead info object
2675  * \param h lock handle
2676  * \param mode lock mode
2677  * \param decref force immediate lock releasing
2678  */
2679 static
2680 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2681                    ldlm_mode_t mode, int decref)
2682 {
2683         ENTRY;
2684
2685         if (lustre_handle_is_used(h)) {
2686                 if (decref || !info->mti_has_trans ||
2687                     !(mode & (LCK_PW | LCK_EX))){
2688                         mdt_fid_unlock(h, mode);
2689                 } else {
2690                         struct mdt_device *mdt = info->mti_mdt;
2691                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2692                         struct ptlrpc_request *req = mdt_info_req(info);
2693                         int no_ack = 0;
2694
2695                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2696                                  h->cookie);
2697                         CDEBUG(D_HA, "request = %p reply state = %p"
2698                                " transno = "LPD64"\n",
2699                                req, req->rq_reply_state, req->rq_transno);
2700                         if (mdt_cos_is_enabled(mdt)) {
2701                                 no_ack = 1;
2702                                 ldlm_lock_downgrade(lock, LCK_COS);
2703                                 mode = LCK_COS;
2704                         }
2705                         ptlrpc_save_lock(req, h, mode, no_ack);
2706                         if (mdt_is_lock_sync(lock)) {
2707                                 CDEBUG(D_HA, "found sync-lock,"
2708                                        " async commit started\n");
2709                                 mdt_device_commit_async(info->mti_env,
2710                                                         mdt);
2711                         }
2712                         LDLM_LOCK_PUT(lock);
2713                 }
2714                 h->cookie = 0ull;
2715         }
2716
2717         EXIT;
2718 }
2719
2720 /**
2721  * Unlock mdt object.
2722  *
2723  * Immeditely release the regular lock and the PDO lock or save the
2724  * lock in reqeuest and keep them referenced until client ACK or
2725  * transaction commit.
2726  *
2727  * \param info thread info object
2728  * \param o mdt object
2729  * \param lh mdt lock handle referencing regular and PDO locks
2730  * \param decref force immediate lock releasing
2731  */
2732 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2733                        struct mdt_lock_handle *lh, int decref)
2734 {
2735         ENTRY;
2736
2737         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2738         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2739
2740         if (lustre_handle_is_used(&lh->mlh_rreg_lh))
2741                 ldlm_lock_decref(&lh->mlh_rreg_lh, lh->mlh_rreg_mode);
2742
2743         EXIT;
2744 }
2745
2746 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2747                                         const struct lu_fid *f,
2748                                         struct mdt_lock_handle *lh,
2749                                         __u64 ibits)
2750 {
2751         struct mdt_object *o;
2752
2753         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2754         if (!IS_ERR(o)) {
2755                 int rc;
2756
2757                 rc = mdt_object_lock(info, o, lh, ibits,
2758                                      MDT_LOCAL_LOCK);
2759                 if (rc != 0) {
2760                         mdt_object_put(info->mti_env, o);
2761                         o = ERR_PTR(rc);
2762                 }
2763         }
2764         return o;
2765 }
2766
2767 void mdt_object_unlock_put(struct mdt_thread_info * info,
2768                            struct mdt_object * o,
2769                            struct mdt_lock_handle *lh,
2770                            int decref)
2771 {
2772         mdt_object_unlock(info, o, lh, decref);
2773         mdt_object_put(info->mti_env, o);
2774 }
2775
2776 struct mdt_handler *mdt_handler_find(__u32 opc, struct mdt_opc_slice *supported)
2777 {
2778         struct mdt_opc_slice *s;
2779         struct mdt_handler   *h;
2780
2781         h = NULL;
2782         for (s = supported; s->mos_hs != NULL; s++) {
2783                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2784                         h = s->mos_hs + (opc - s->mos_opc_start);
2785                         if (likely(h->mh_opc != 0))
2786                                 LASSERTF(h->mh_opc == opc,
2787                                          "opcode mismatch %d != %d\n",
2788                                          h->mh_opc, opc);
2789                         else
2790                                 h = NULL; /* unsupported opc */
2791                         break;
2792                 }
2793         }
2794         return h;
2795 }
2796
2797 static int mdt_lock_resname_compat(struct mdt_device *m,
2798                                    struct ldlm_request *req)
2799 {
2800         /* XXX something... later. */
2801         return 0;
2802 }
2803
2804 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2805 {
2806         /* XXX something... later. */
2807         return 0;
2808 }
2809
2810 /*
2811  * Generic code handling requests that have struct mdt_body passed in:
2812  *
2813  *  - extract mdt_body from request and save it in @info, if present;
2814  *
2815  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2816  *  @info;
2817  *
2818  *  - if HABEO_CORPUS flag is set for this request type check whether object
2819  *  actually exists on storage (lu_object_exists()).
2820  *
2821  */
2822 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2823 {
2824         const struct mdt_body    *body;
2825         struct mdt_object        *obj;
2826         const struct lu_env      *env;
2827         struct req_capsule       *pill;
2828         int                       rc;
2829         ENTRY;
2830
2831         env = info->mti_env;
2832         pill = info->mti_pill;
2833
2834         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2835         if (body == NULL)
2836                 RETURN(-EFAULT);
2837
2838         if (!(body->valid & OBD_MD_FLID))
2839                 RETURN(0);
2840
2841         if (!fid_is_sane(&body->fid1)) {
2842                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2843                 RETURN(-EINVAL);
2844         }
2845
2846         /*
2847          * Do not get size or any capa fields before we check that request
2848          * contains capa actually. There are some requests which do not, for
2849          * instance MDS_IS_SUBDIR.
2850          */
2851         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2852             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2853                 mdt_set_capainfo(info, 0, &body->fid1,
2854                                  req_capsule_client_get(pill, &RMF_CAPA1));
2855
2856         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2857         if (!IS_ERR(obj)) {
2858                 if ((flags & HABEO_CORPUS) &&
2859                     !mdt_object_exists(obj)) {
2860                         mdt_object_put(env, obj);
2861                         /* for capability renew ENOENT will be handled in
2862                          * mdt_renew_capa */
2863                         if (body->valid & OBD_MD_FLOSSCAPA)
2864                                 rc = 0;
2865                         else
2866                                 rc = -ENOENT;
2867                 } else {
2868                         info->mti_object = obj;
2869                         rc = 0;
2870                 }
2871         } else
2872                 rc = PTR_ERR(obj);
2873
2874         RETURN(rc);
2875 }
2876
2877 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2878 {
2879         struct req_capsule *pill = info->mti_pill;
2880         int rc;
2881         ENTRY;
2882
2883         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2884                 rc = mdt_body_unpack(info, flags);
2885         else
2886                 rc = 0;
2887
2888         if (rc == 0 && (flags & HABEO_REFERO)) {
2889                 /* Pack reply. */
2890                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2891                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2892                                              info->mti_body->eadatasize);
2893                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2894                         req_capsule_set_size(pill, &RMF_LOGCOOKIES,
2895                                              RCL_SERVER, 0);
2896
2897                 rc = req_capsule_server_pack(pill);
2898         }
2899         RETURN(rc);
2900 }
2901
2902 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2903 {
2904         struct md_device *next = m->mdt_child;
2905
2906         return next->md_ops->mdo_init_capa_ctxt(env, next,
2907                                                 m->mdt_opts.mo_mds_capa,
2908                                                 m->mdt_capa_timeout,
2909                                                 m->mdt_capa_alg,
2910                                                 m->mdt_capa_keys);
2911 }
2912
2913 /*
2914  * Invoke handler for this request opc. Also do necessary preprocessing
2915  * (according to handler ->mh_flags), and post-processing (setting of
2916  * ->last_{xid,committed}).
2917  */
2918 static int mdt_req_handle(struct mdt_thread_info *info,
2919                           struct mdt_handler *h, struct ptlrpc_request *req)
2920 {
2921         int   rc, serious = 0;
2922         __u32 flags;
2923
2924         ENTRY;
2925
2926         LASSERT(h->mh_act != NULL);
2927         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2928         LASSERT(current->journal_info == NULL);
2929
2930         /*
2931          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2932          * to put same checks into handlers like mdt_close(), mdt_reint(),
2933          * etc., without talking to mdt authors first. Checking same thing
2934          * there again is useless and returning 0 error without packing reply
2935          * is buggy! Handlers either pack reply or return error.
2936          *
2937          * We return 0 here and do not send any reply in order to emulate
2938          * network failure. Do not send any reply in case any of NET related
2939          * fail_id has occured.
2940          */
2941         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2942                 RETURN(0);
2943
2944         rc = 0;
2945         flags = h->mh_flags;
2946         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2947
2948         if (h->mh_fmt != NULL) {
2949                 req_capsule_set(info->mti_pill, h->mh_fmt);
2950                 rc = mdt_unpack_req_pack_rep(info, flags);
2951         }
2952
2953         if (rc == 0 && flags & MUTABOR &&
2954             exp_connect_flags(req->rq_export) & OBD_CONNECT_RDONLY)
2955                 /* should it be rq_status? */
2956                 rc = -EROFS;
2957
2958         if (rc == 0 && flags & HABEO_CLAVIS) {
2959                 struct ldlm_request *dlm_req;
2960
2961                 LASSERT(h->mh_fmt != NULL);
2962
2963                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2964                 if (dlm_req != NULL) {
2965                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2966                                         LDLM_IBITS &&
2967                                      dlm_req->lock_desc.l_policy_data.\
2968                                         l_inodebits.bits == 0)) {
2969                                 /*
2970                                  * Lock without inodebits makes no sense and
2971                                  * will oops later in ldlm. If client miss to
2972                                  * set such bits, do not trigger ASSERTION.
2973                                  *
2974                                  * For liblustre flock case, it maybe zero.
2975                                  */
2976                                 rc = -EPROTO;
2977                         } else {
2978                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2979                                         rc = mdt_lock_resname_compat(
2980                                                                 info->mti_mdt,
2981                                                                 dlm_req);
2982                                 info->mti_dlm_req = dlm_req;
2983                         }
2984                 } else {
2985                         rc = -EFAULT;
2986                 }
2987         }
2988
2989         /* capability setting changed via /proc, needs reinitialize ctxt */
2990         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2991                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2992                 info->mti_mdt->mdt_capa_conf = 0;
2993         }
2994
2995         if (likely(rc == 0)) {
2996                 /*
2997                  * Process request, there can be two types of rc:
2998                  * 1) errors with msg unpack/pack, other failures outside the
2999                  * operation itself. This is counted as serious errors;
3000                  * 2) errors during fs operation, should be placed in rq_status
3001                  * only
3002                  */
3003                 rc = h->mh_act(info);
3004                 if (rc == 0 &&
3005                     !req->rq_no_reply && req->rq_reply_state == NULL) {
3006                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
3007                                   "pack reply and returned 0 error\n",
3008                                   h->mh_name);
3009                         LBUG();
3010                 }
3011                 serious = is_serious(rc);
3012                 rc = clear_serious(rc);
3013         } else
3014                 serious = 1;
3015
3016         req->rq_status = rc;
3017
3018         /*
3019          * ELDLM_* codes which > 0 should be in rq_status only as well as
3020          * all non-serious errors.
3021          */
3022         if (rc > 0 || !serious)
3023                 rc = 0;
3024
3025         LASSERT(current->journal_info == NULL);
3026
3027         if (rc == 0 && (flags & HABEO_CLAVIS) &&
3028             info->mti_mdt->mdt_opts.mo_compat_resname) {
3029                 struct ldlm_reply *dlmrep;
3030
3031                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3032                 if (dlmrep != NULL)
3033                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3034         }
3035
3036         /* If we're DISCONNECTing, the mdt_export_data is already freed */
3037         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3038                 target_committed_to_req(req);
3039
3040         if (unlikely(req_is_replay(req) &&
3041                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3042                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3043                 LBUG();
3044         }
3045
3046         target_send_reply(req, rc, info->mti_fail_id);
3047         RETURN(0);
3048 }
3049
3050 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3051 {
3052         lh->mlh_type = MDT_NUL_LOCK;
3053         lh->mlh_reg_lh.cookie = 0ull;
3054         lh->mlh_reg_mode = LCK_MINMODE;
3055         lh->mlh_pdo_lh.cookie = 0ull;
3056         lh->mlh_pdo_mode = LCK_MINMODE;
3057         lh->mlh_rreg_lh.cookie = 0ull;
3058         lh->mlh_rreg_mode = LCK_MINMODE;
3059 }
3060
3061 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3062 {
3063         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3064         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3065 }
3066
3067 /*
3068  * Initialize fields of struct mdt_thread_info. Other fields are left in
3069  * uninitialized state, because it's too expensive to zero out whole
3070  * mdt_thread_info (> 1K) on each request arrival.
3071  */
3072 static void mdt_thread_info_init(struct ptlrpc_request *req,
3073                                  struct mdt_thread_info *info)
3074 {
3075         int i;
3076
3077         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3078         info->mti_pill = &req->rq_pill;
3079
3080         /* lock handle */
3081         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3082                 mdt_lock_handle_init(&info->mti_lh[i]);
3083
3084         /* mdt device: it can be NULL while CONNECT */
3085         if (req->rq_export) {
3086                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3087                 info->mti_exp = req->rq_export;
3088         } else
3089                 info->mti_mdt = NULL;
3090         info->mti_env = req->rq_svc_thread->t_env;
3091         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3092         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3093         info->mti_mos = NULL;
3094
3095         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3096         info->mti_big_buf = LU_BUF_NULL;
3097         info->mti_body = NULL;
3098         info->mti_object = NULL;
3099         info->mti_dlm_req = NULL;
3100         info->mti_has_trans = 0;
3101         info->mti_cross_ref = 0;
3102         info->mti_opdata = 0;
3103         info->mti_big_lmm_used = 0;
3104
3105         /* To not check for split by default. */
3106         info->mti_spec.no_create = 0;
3107         info->mti_spec.sp_rm_entry = 0;
3108 }
3109
3110 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3111 {
3112         int i;
3113
3114         req_capsule_fini(info->mti_pill);
3115         if (info->mti_object != NULL) {
3116                 mdt_object_put(info->mti_env, info->mti_object);
3117                 info->mti_object = NULL;
3118         }
3119
3120         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3121                 mdt_lock_handle_fini(&info->mti_lh[i]);
3122         info->mti_env = NULL;
3123
3124         if (unlikely(info->mti_big_buf.lb_buf != NULL))
3125                 lu_buf_free(&info->mti_big_buf);
3126 }
3127
3128 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3129                                        struct obd_device *obd, int *process)
3130 {
3131         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3132         case MDS_CONNECT: /* This will never get here, but for completeness. */
3133         case OST_CONNECT: /* This will never get here, but for completeness. */
3134         case MDS_DISCONNECT:
3135         case OST_DISCONNECT:
3136         case OBD_IDX_READ:
3137                *process = 1;
3138                RETURN(0);
3139
3140         case MDS_CLOSE:
3141         case MDS_DONE_WRITING:
3142         case MDS_SYNC: /* used in unmounting */
3143         case OBD_PING:
3144         case MDS_REINT:
3145         case UPDATE_OBJ:
3146         case SEQ_QUERY:
3147         case FLD_QUERY:
3148         case LDLM_ENQUEUE:
3149                 *process = target_queue_recovery_request(req, obd);
3150                 RETURN(0);
3151
3152         default:
3153                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3154                 *process = -EAGAIN;
3155                 RETURN(0);
3156         }
3157 }
3158
3159 /*
3160  * Handle recovery. Return:
3161  *        +1: continue request processing;
3162  *       -ve: abort immediately with the given error code;
3163  *         0: send reply with error code in req->rq_status;
3164  */
3165 static int mdt_recovery(struct mdt_thread_info *info)
3166 {
3167         struct ptlrpc_request *req = mdt_info_req(info);
3168         struct obd_device *obd;
3169
3170         ENTRY;
3171
3172         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3173         case MDS_CONNECT:
3174         case SEC_CTX_INIT:
3175         case SEC_CTX_INIT_CONT:
3176         case SEC_CTX_FINI:
3177                 {
3178 #if 0
3179                         int rc;
3180
3181                         rc = mdt_handle_idmap(info);
3182                         if (rc)
3183                                 RETURN(rc);
3184                         else
3185 #endif
3186                                 RETURN(+1);
3187                 }
3188         }
3189
3190         if (unlikely(!class_connected_export(req->rq_export))) {
3191                 CDEBUG(D_HA, "operation %d on unconnected MDS from %s\n",
3192                        lustre_msg_get_opc(req->rq_reqmsg),
3193                        libcfs_id2str(req->rq_peer));
3194                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3195                  * mds_A will get -ENOTCONN(especially for ping req),
3196                  * which will cause that mds_A deactive timeout, then when
3197                  * mds_A cleanup, the cleanup process will be suspended since
3198                  * deactive timeout is not zero.
3199                  */
3200                 req->rq_status = -ENOTCONN;
3201                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3202                 RETURN(0);
3203         }
3204
3205         /* sanity check: if the xid matches, the request must be marked as a
3206          * resent or replayed */
3207         if (req_xid_is_last(req)) {
3208                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3209                       (MSG_RESENT | MSG_REPLAY))) {
3210                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3211                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3212                                   lustre_msg_get_flags(req->rq_reqmsg));
3213                         LBUG();
3214                         req->rq_status = -ENOTCONN;
3215                         RETURN(-ENOTCONN);
3216                 }
3217         }
3218
3219         /* else: note the opposite is not always true; a&nb