Whamcloud - gitweb
71dce5b1640d7851aadabd52058d5305c5a9e0f2
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #define DEBUG_SUBSYSTEM S_MDS
50
51 #include <linux/module.h>
52 /*
53  * struct OBD_{ALLOC,FREE}*()
54  */
55 #include <obd_support.h>
56 /* struct ptlrpc_request */
57 #include <lustre_net.h>
58 /* struct obd_export */
59 #include <lustre_export.h>
60 /* struct obd_device */
61 #include <obd.h>
62 /* lu2dt_dev() */
63 #include <dt_object.h>
64 #include <lustre_mds.h>
65 #include <lustre_mdt.h>
66 #include <lustre_log.h>
67 #include "mdt_internal.h"
68 #ifdef HAVE_QUOTA_SUPPORT
69 # include <lustre_quota.h>
70 #endif
71 #include <lustre_acl.h>
72 #include <lustre_param.h>
73
74 mdl_mode_t mdt_mdl_lock_modes[] = {
75         [LCK_MINMODE] = MDL_MINMODE,
76         [LCK_EX]      = MDL_EX,
77         [LCK_PW]      = MDL_PW,
78         [LCK_PR]      = MDL_PR,
79         [LCK_CW]      = MDL_CW,
80         [LCK_CR]      = MDL_CR,
81         [LCK_NL]      = MDL_NL,
82         [LCK_GROUP]   = MDL_GROUP
83 };
84
85 ldlm_mode_t mdt_dlm_lock_modes[] = {
86         [MDL_MINMODE] = LCK_MINMODE,
87         [MDL_EX]      = LCK_EX,
88         [MDL_PW]      = LCK_PW,
89         [MDL_PR]      = LCK_PR,
90         [MDL_CW]      = LCK_CW,
91         [MDL_CR]      = LCK_CR,
92         [MDL_NL]      = LCK_NL,
93         [MDL_GROUP]   = LCK_GROUP
94 };
95
96 /*
97  * Initialized in mdt_mod_init().
98  */
99 static unsigned long mdt_num_threads;
100 CFS_MODULE_PARM(mdt_num_threads, "ul", ulong, 0444,
101                 "number of MDS service threads to start "
102                 "(deprecated in favor of mds_num_threads)");
103
104 static unsigned long mds_num_threads;
105 CFS_MODULE_PARM(mds_num_threads, "ul", ulong, 0444,
106                 "number of MDS service threads to start");
107
108 static char *mds_num_cpts;
109 CFS_MODULE_PARM(mds_num_cpts, "c", charp, 0444,
110                 "CPU partitions MDS threads should run on");
111
112 static unsigned long mds_rdpg_num_threads;
113 CFS_MODULE_PARM(mds_rdpg_num_threads, "ul", ulong, 0444,
114                 "number of MDS readpage service threads to start");
115
116 static char *mds_rdpg_num_cpts;
117 CFS_MODULE_PARM(mds_rdpg_num_cpts, "c", charp, 0444,
118                 "CPU partitions MDS readpage threads should run on");
119
120 /* NB: these two should be removed along with setattr service in the future */
121 static unsigned long mds_attr_num_threads;
122 CFS_MODULE_PARM(mds_attr_num_threads, "ul", ulong, 0444,
123                 "number of MDS setattr service threads to start");
124
125 static char *mds_attr_num_cpts;
126 CFS_MODULE_PARM(mds_attr_num_cpts, "c", charp, 0444,
127                 "CPU partitions MDS setattr threads should run on");
128
129 /* ptlrpc request handler for MDT. All handlers are
130  * grouped into several slices - struct mdt_opc_slice,
131  * and stored in an array - mdt_handlers[].
132  */
133 struct mdt_handler {
134         /* The name of this handler. */
135         const char *mh_name;
136         /* Fail id for this handler, checked at the beginning of this handler*/
137         int         mh_fail_id;
138         /* Operation code for this handler */
139         __u32       mh_opc;
140         /* flags are listed in enum mdt_handler_flags below. */
141         __u32       mh_flags;
142         /* The actual handler function to execute. */
143         int (*mh_act)(struct mdt_thread_info *info);
144         /* Request format for this request. */
145         const struct req_format *mh_fmt;
146 };
147
148 enum mdt_handler_flags {
149         /*
150          * struct mdt_body is passed in the incoming message, and object
151          * identified by this fid exists on disk.
152          *
153          * "habeo corpus" == "I have a body"
154          */
155         HABEO_CORPUS = (1 << 0),
156         /*
157          * struct ldlm_request is passed in the incoming message.
158          *
159          * "habeo clavis" == "I have a key"
160          */
161         HABEO_CLAVIS = (1 << 1),
162         /*
163          * this request has fixed reply format, so that reply message can be
164          * packed by generic code.
165          *
166          * "habeo refero" == "I have a reply"
167          */
168         HABEO_REFERO = (1 << 2),
169         /*
170          * this request will modify something, so check whether the filesystem
171          * is readonly or not, then return -EROFS to client asap if necessary.
172          *
173          * "mutabor" == "I shall modify"
174          */
175         MUTABOR      = (1 << 3)
176 };
177
178 struct mdt_opc_slice {
179         __u32               mos_opc_start;
180         int                 mos_opc_end;
181         struct mdt_handler *mos_hs;
182 };
183
184 static struct mdt_opc_slice mdt_regular_handlers[];
185 static struct mdt_opc_slice mdt_readpage_handlers[];
186 static struct mdt_opc_slice mdt_xmds_handlers[];
187 static struct mdt_opc_slice mdt_seq_handlers[];
188 static struct mdt_opc_slice mdt_fld_handlers[];
189
190 static struct mdt_device *mdt_dev(struct lu_device *d);
191 static int mdt_regular_handle(struct ptlrpc_request *req);
192 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
193 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
194                         struct getinfo_fid2path *fp);
195
196 static const struct lu_object_operations mdt_obj_ops;
197
198 /* Slab for MDT object allocation */
199 static cfs_mem_cache_t *mdt_object_kmem;
200
201 static struct lu_kmem_descr mdt_caches[] = {
202         {
203                 .ckd_cache = &mdt_object_kmem,
204                 .ckd_name  = "mdt_obj",
205                 .ckd_size  = sizeof(struct mdt_object)
206         },
207         {
208                 .ckd_cache = NULL
209         }
210 };
211
212 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
213 {
214         if (!rep)
215                 return 0;
216         return (rep->lock_policy_res1 & flag);
217 }
218
219 void mdt_clear_disposition(struct mdt_thread_info *info,
220                            struct ldlm_reply *rep, int flag)
221 {
222         if (info)
223                 info->mti_opdata &= ~flag;
224         if (rep)
225                 rep->lock_policy_res1 &= ~flag;
226 }
227
228 void mdt_set_disposition(struct mdt_thread_info *info,
229                          struct ldlm_reply *rep, int flag)
230 {
231         if (info)
232                 info->mti_opdata |= flag;
233         if (rep)
234                 rep->lock_policy_res1 |= flag;
235 }
236
237 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
238 {
239         lh->mlh_pdo_hash = 0;
240         lh->mlh_reg_mode = lm;
241         lh->mlh_type = MDT_REG_LOCK;
242 }
243
244 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
245                        const char *name, int namelen)
246 {
247         lh->mlh_reg_mode = lm;
248         lh->mlh_type = MDT_PDO_LOCK;
249
250         if (name != NULL && (name[0] != '\0')) {
251                 LASSERT(namelen > 0);
252                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
253         } else {
254                 LASSERT(namelen == 0);
255                 lh->mlh_pdo_hash = 0ull;
256         }
257 }
258
259 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
260                               struct mdt_lock_handle *lh)
261 {
262         mdl_mode_t mode;
263         ENTRY;
264
265         /*
266          * Any dir access needs couple of locks:
267          *
268          * 1) on part of dir we gonna take lookup/modify;
269          *
270          * 2) on whole dir to protect it from concurrent splitting and/or to
271          * flush client's cache for readdir().
272          *
273          * so, for a given mode and object this routine decides what lock mode
274          * to use for lock #2:
275          *
276          * 1) if caller's gonna lookup in dir then we need to protect dir from
277          * being splitted only - LCK_CR
278          *
279          * 2) if caller's gonna modify dir then we need to protect dir from
280          * being splitted and to flush cache - LCK_CW
281          *
282          * 3) if caller's gonna modify dir and that dir seems ready for
283          * splitting then we need to protect it from any type of access
284          * (lookup/modify/split) - LCK_EX --bzzz
285          */
286
287         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
288         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
289
290         /*
291          * Ask underlaying level its opinion about preferable PDO lock mode
292          * having access type passed as regular lock mode:
293          *
294          * - MDL_MINMODE means that lower layer does not want to specify lock
295          * mode;
296          *
297          * - MDL_NL means that no PDO lock should be taken. This is used in some
298          * cases. Say, for non-splittable directories no need to use PDO locks
299          * at all.
300          */
301         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
302                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
303
304         if (mode != MDL_MINMODE) {
305                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
306         } else {
307                 /*
308                  * Lower layer does not want to specify locking mode. We do it
309                  * our selves. No special protection is needed, just flush
310                  * client's cache on modification and allow concurrent
311                  * mondification.
312                  */
313                 switch (lh->mlh_reg_mode) {
314                 case LCK_EX:
315                         lh->mlh_pdo_mode = LCK_EX;
316                         break;
317                 case LCK_PR:
318                         lh->mlh_pdo_mode = LCK_CR;
319                         break;
320                 case LCK_PW:
321                         lh->mlh_pdo_mode = LCK_CW;
322                         break;
323                 default:
324                         CERROR("Not expected lock type (0x%x)\n",
325                                (int)lh->mlh_reg_mode);
326                         LBUG();
327                 }
328         }
329
330         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
331         EXIT;
332 }
333
334 static int mdt_getstatus(struct mdt_thread_info *info)
335 {
336         struct mdt_device *mdt  = info->mti_mdt;
337         struct md_device  *next = mdt->mdt_child;
338         struct mdt_body   *repbody;
339         int                rc;
340
341         ENTRY;
342
343         rc = mdt_check_ucred(info);
344         if (rc)
345                 RETURN(err_serious(rc));
346
347         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
348                 RETURN(err_serious(-ENOMEM));
349
350         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
351         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
352         if (rc != 0)
353                 RETURN(rc);
354
355         repbody->valid |= OBD_MD_FLID;
356
357         if (mdt->mdt_opts.mo_mds_capa &&
358             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
359                 struct mdt_object  *root;
360                 struct lustre_capa *capa;
361
362                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
363                 if (IS_ERR(root))
364                         RETURN(PTR_ERR(root));
365
366                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
367                 LASSERT(capa);
368                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
369                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
370                                  0);
371                 mdt_object_put(info->mti_env, root);
372                 if (rc == 0)
373                         repbody->valid |= OBD_MD_FLMDSCAPA;
374         }
375
376         RETURN(rc);
377 }
378
379 static int mdt_statfs(struct mdt_thread_info *info)
380 {
381         struct ptlrpc_request           *req = mdt_info_req(info);
382         struct md_device                *next = info->mti_mdt->mdt_child;
383         struct ptlrpc_service_part      *svcpt;
384         struct obd_statfs               *osfs;
385         int                             rc;
386
387         ENTRY;
388
389         svcpt = info->mti_pill->rc_req->rq_rqbd->rqbd_svcpt;
390
391         /* This will trigger a watchdog timeout */
392         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
393                          (MDT_SERVICE_WATCHDOG_FACTOR *
394                           at_get(&svcpt->scp_at_estimate)) + 1);
395
396         rc = mdt_check_ucred(info);
397         if (rc)
398                 RETURN(err_serious(rc));
399
400         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
401                 RETURN(err_serious(-ENOMEM));
402
403         osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
404         if (!osfs)
405                 RETURN(-EPROTO);
406
407         /** statfs information are cached in the mdt_device */
408         if (cfs_time_before_64(info->mti_mdt->mdt_osfs_age,
409                                cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
410                 /** statfs data is too old, get up-to-date one */
411                 rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
412                 if (rc)
413                         RETURN(rc);
414                 cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
415                 info->mti_mdt->mdt_osfs = *osfs;
416                 info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
417                 cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
418         } else {
419                 /** use cached statfs data */
420                 cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
421                 *osfs = info->mti_mdt->mdt_osfs;
422                 cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
423         }
424
425         if (rc == 0)
426                 mdt_counter_incr(req, LPROC_MDT_STATFS);
427
428         RETURN(rc);
429 }
430
431 /**
432  * Pack SOM attributes into the reply.
433  * Call under a DLM UPDATE lock.
434  */
435 static void mdt_pack_size2body(struct mdt_thread_info *info,
436                                struct mdt_object *mo)
437 {
438         struct mdt_body *b;
439         struct md_attr *ma = &info->mti_attr;
440
441         LASSERT(ma->ma_attr.la_valid & LA_MODE);
442         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
443
444         /* Check if Size-on-MDS is supported, if this is a regular file,
445          * if SOM is enabled on the object and if SOM cache exists and valid.
446          * Otherwise do not pack Size-on-MDS attributes to the reply. */
447         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
448             !S_ISREG(ma->ma_attr.la_mode) ||
449             !mdt_object_is_som_enabled(mo) ||
450             !(ma->ma_valid & MA_SOM))
451                 return;
452
453         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
454         b->size = ma->ma_som->msd_size;
455         b->blocks = ma->ma_som->msd_blocks;
456 }
457
458 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
459                         const struct lu_attr *attr, const struct lu_fid *fid)
460 {
461         struct md_attr *ma = &info->mti_attr;
462
463         LASSERT(ma->ma_valid & MA_INODE);
464
465         b->atime      = attr->la_atime;
466         b->mtime      = attr->la_mtime;
467         b->ctime      = attr->la_ctime;
468         b->mode       = attr->la_mode;
469         b->size       = attr->la_size;
470         b->blocks     = attr->la_blocks;
471         b->uid        = attr->la_uid;
472         b->gid        = attr->la_gid;
473         b->flags      = attr->la_flags;
474         b->nlink      = attr->la_nlink;
475         b->rdev       = attr->la_rdev;
476
477         /*XXX should pack the reply body according to lu_valid*/
478         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
479                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
480                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
481                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
482
483         if (!S_ISREG(attr->la_mode)) {
484                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
485         } else if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV)) {
486                 /* means no objects are allocated on osts. */
487                 LASSERT(!(ma->ma_valid & MA_LOV));
488                 /* just ignore blocks occupied by extend attributes on MDS */
489                 b->blocks = 0;
490                 /* if no object is allocated on osts, the size on mds is valid. b=22272 */
491                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
492         }
493
494         if (fid) {
495                 b->fid1 = *fid;
496                 b->valid |= OBD_MD_FLID;
497
498                 /* FIXME: these should be fixed when new igif ready.*/
499                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
500                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
501                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
502
503                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
504                                 PFID(fid), b->nlink, b->mode, b->size);
505         }
506
507         if (info)
508                 mdt_body_reverse_idmap(info, b);
509
510         if (b->valid & OBD_MD_FLSIZE)
511                 CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
512                        PFID(fid), (unsigned long long)b->size);
513 }
514
515 static inline int mdt_body_has_lov(const struct lu_attr *la,
516                                    const struct mdt_body *body)
517 {
518         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
519                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
520 }
521
522 void mdt_client_compatibility(struct mdt_thread_info *info)
523 {
524         struct mdt_body       *body;
525         struct ptlrpc_request *req = mdt_info_req(info);
526         struct obd_export     *exp = req->rq_export;
527         struct md_attr        *ma = &info->mti_attr;
528         struct lu_attr        *la = &ma->ma_attr;
529         ENTRY;
530
531         if (exp->exp_connect_flags & OBD_CONNECT_LAYOUTLOCK)
532                 /* the client can deal with 16-bit lmm_stripe_count */
533                 RETURN_EXIT;
534
535         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
536
537         if (!mdt_body_has_lov(la, body))
538                 RETURN_EXIT;
539
540         /* now we have a reply with a lov for a client not compatible with the
541          * layout lock so we have to clean the layout generation number */
542         if (S_ISREG(la->la_mode))
543                 ma->ma_lmm->lmm_layout_gen = 0;
544         EXIT;
545 }
546
547 static int mdt_big_lmm_get(const struct lu_env *env, struct mdt_object *o,
548                            struct md_attr *ma)
549 {
550         struct mdt_thread_info *info;
551         int rc;
552         ENTRY;
553
554         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
555         LASSERT(info != NULL);
556         LASSERT(ma->ma_lmm_size > 0);
557         LASSERT(info->mti_big_lmm_used == 0);
558         rc = mo_xattr_get(env, mdt_object_child(o), &LU_BUF_NULL,
559                           XATTR_NAME_LOV);
560         if (rc < 0)
561                 RETURN(rc);
562
563         /* big_lmm may need to be grown */
564         if (info->mti_big_lmmsize < rc) {
565                 int size = size_roundup_power2(rc);
566
567                 if (info->mti_big_lmmsize > 0) {
568                         /* free old buffer */
569                         LASSERT(info->mti_big_lmm);
570                         OBD_FREE_LARGE(info->mti_big_lmm,
571                                        info->mti_big_lmmsize);
572                         info->mti_big_lmm = NULL;
573                         info->mti_big_lmmsize = 0;
574                 }
575
576                 OBD_ALLOC_LARGE(info->mti_big_lmm, size);
577                 if (info->mti_big_lmm == NULL)
578                         RETURN(-ENOMEM);
579                 info->mti_big_lmmsize = size;
580         }
581         LASSERT(info->mti_big_lmmsize >= rc);
582
583         info->mti_buf.lb_buf = info->mti_big_lmm;
584         info->mti_buf.lb_len = info->mti_big_lmmsize;
585         rc = mo_xattr_get(env, mdt_object_child(o), &info->mti_buf,
586                           XATTR_NAME_LOV);
587         if (rc < 0)
588                 RETURN(rc);
589
590         info->mti_big_lmm_used = 1;
591         ma->ma_valid |= MA_LOV;
592         ma->ma_lmm = info->mti_big_lmm;
593         ma->ma_lmm_size = rc;
594
595         /* update mdt_max_mdsize so all clients will be aware about that */
596         if (info->mti_mdt->mdt_max_mdsize < rc)
597                 info->mti_mdt->mdt_max_mdsize = rc;
598
599         RETURN(0);
600 }
601
602 int mdt_attr_get_lov(struct mdt_thread_info *info,
603                      struct mdt_object *o, struct md_attr *ma)
604 {
605         struct md_object *next = mdt_object_child(o);
606         struct lu_buf    *buf = &info->mti_buf;
607         int rc;
608
609         buf->lb_buf = ma->ma_lmm;
610         buf->lb_len = ma->ma_lmm_size;
611         rc = mo_xattr_get(info->mti_env, next, buf, XATTR_NAME_LOV);
612         if (rc > 0) {
613                 ma->ma_lmm_size = rc;
614                 ma->ma_valid |= MA_LOV;
615                 rc = 0;
616         } else if (rc == -ENODATA) {
617                 /* no LOV EA */
618                 rc = 0;
619         } else if (rc == -ERANGE) {
620                 rc = mdt_big_lmm_get(info->mti_env, o, ma);
621         }
622
623         return rc;
624 }
625
626 int mdt_attr_get_complex(struct mdt_thread_info *info,
627                          struct mdt_object *o, struct md_attr *ma)
628 {
629         const struct lu_env *env = info->mti_env;
630         struct md_object    *next = mdt_object_child(o);
631         struct lu_buf       *buf = &info->mti_buf;
632         u32                  mode = lu_object_attr(&next->mo_lu);
633         int                  need = ma->ma_need;
634         int                  rc = 0, rc2;
635         ENTRY;
636
637         /* do we really need PFID */
638         LASSERT((ma->ma_need & MA_PFID) == 0);
639
640         ma->ma_valid = 0;
641
642         if (need & MA_INODE) {
643                 ma->ma_need = MA_INODE;
644                 rc = mo_attr_get(env, next, ma);
645                 if (rc)
646                         GOTO(out, rc);
647                 ma->ma_valid |= MA_INODE;
648         }
649
650         if (need & MA_LOV && (S_ISREG(mode) || S_ISDIR(mode))) {
651                 rc = mdt_attr_get_lov(info, o, ma);
652                 if (rc)
653                         GOTO(out, rc);
654         }
655
656         if (need & MA_LMV && S_ISDIR(mode)) {
657                 buf->lb_buf = ma->ma_lmv;
658                 buf->lb_len = ma->ma_lmv_size;
659                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
660                 if (rc2 > 0) {
661                         ma->ma_lmv_size = rc2;
662                         ma->ma_valid |= MA_LMV;
663                 } else if (rc2 == -ENODATA) {
664                         /* no LMV EA */
665                         ma->ma_lmv_size = 0;
666                 } else
667                         GOTO(out, rc = rc2);
668         }
669
670
671         if (rc == 0 && S_ISREG(mode) && (need & (MA_HSM | MA_SOM))) {
672                 struct lustre_mdt_attrs *lma;
673
674                 lma = (struct lustre_mdt_attrs *)info->mti_xattr_buf;
675                 CLASSERT(sizeof(*lma) <= sizeof(info->mti_xattr_buf));
676
677                 buf->lb_buf = lma;
678                 buf->lb_len = sizeof(info->mti_xattr_buf);
679                 rc = mo_xattr_get(env, next, buf, XATTR_NAME_LMA);
680                 if (rc > 0) {
681                         lustre_lma_swab(lma);
682                         /* Swab and copy LMA */
683                         if (need & MA_HSM) {
684                                 if (lma->lma_compat & LMAC_HSM)
685                                         ma->ma_hsm.mh_flags =
686                                                 lma->lma_flags & HSM_FLAGS_MASK;
687                                 else
688                                         ma->ma_hsm.mh_flags = 0;
689                                 ma->ma_valid |= MA_HSM;
690                         }
691                         /* Copy SOM */
692                         if (need & MA_SOM && lma->lma_compat & LMAC_SOM) {
693                                 LASSERT(ma->ma_som != NULL);
694                                 ma->ma_som->msd_ioepoch = lma->lma_ioepoch;
695                                 ma->ma_som->msd_size    = lma->lma_som_size;
696                                 ma->ma_som->msd_blocks  = lma->lma_som_blocks;
697                                 ma->ma_som->msd_mountid = lma->lma_som_mountid;
698                                 ma->ma_valid |= MA_SOM;
699                         }
700                         rc = 0;
701                 } else if (rc == -ENODATA) {
702                         rc = 0;
703                 }
704         }
705
706 #ifdef CONFIG_FS_POSIX_ACL
707         if (need & MA_ACL_DEF && S_ISDIR(mode)) {
708                 buf->lb_buf = ma->ma_acl;
709                 buf->lb_len = ma->ma_acl_size;
710                 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
711                 if (rc2 > 0) {
712                         ma->ma_acl_size = rc2;
713                         ma->ma_valid |= MA_ACL_DEF;
714                 } else if (rc2 == -ENODATA) {
715                         /* no ACLs */
716                         ma->ma_acl_size = 0;
717                 } else
718                         GOTO(out, rc = rc2);
719         }
720 #endif
721 out:
722         ma->ma_need = need;
723         CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
724                rc, ma->ma_valid, ma->ma_lmm);
725         RETURN(rc);
726 }
727
728 static int mdt_getattr_internal(struct mdt_thread_info *info,
729                                 struct mdt_object *o, int ma_need)
730 {
731         struct md_object        *next = mdt_object_child(o);
732         const struct mdt_body   *reqbody = info->mti_body;
733         struct ptlrpc_request   *req = mdt_info_req(info);
734         struct md_attr          *ma = &info->mti_attr;
735         struct lu_attr          *la = &ma->ma_attr;
736         struct req_capsule      *pill = info->mti_pill;
737         const struct lu_env     *env = info->mti_env;
738         struct mdt_body         *repbody;
739         struct lu_buf           *buffer = &info->mti_buf;
740         int                     rc;
741         int                     is_root;
742         ENTRY;
743
744         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
745                 RETURN(err_serious(-ENOMEM));
746
747         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
748
749         ma->ma_valid = 0;
750
751         rc = mdt_object_exists(o);
752         if (rc < 0) {
753                 /* This object is located on remote node.*/
754                 repbody->fid1 = *mdt_object_fid(o);
755                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
756                 GOTO(out, rc = 0);
757         }
758
759         buffer->lb_len = reqbody->eadatasize;
760         if (buffer->lb_len > 0)
761                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
762         else
763                 buffer->lb_buf = NULL;
764
765         /* If it is dir object and client require MEA, then we got MEA */
766         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
767             reqbody->valid & OBD_MD_MEA) {
768                 /* Assumption: MDT_MD size is enough for lmv size. */
769                 ma->ma_lmv = buffer->lb_buf;
770                 ma->ma_lmv_size = buffer->lb_len;
771                 ma->ma_need = MA_LMV | MA_INODE;
772         } else {
773                 ma->ma_lmm = buffer->lb_buf;
774                 ma->ma_lmm_size = buffer->lb_len;
775                 ma->ma_need = MA_LOV | MA_INODE;
776         }
777
778         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
779             reqbody->valid & OBD_MD_FLDIREA  &&
780             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
781                 /* get default stripe info for this dir. */
782                 ma->ma_need |= MA_LOV_DEF;
783         }
784         ma->ma_need |= ma_need;
785         if (ma->ma_need & MA_SOM)
786                 ma->ma_som = &info->mti_u.som.data;
787
788         rc = mdt_attr_get_complex(info, o, ma);
789         if (unlikely(rc)) {
790                 CERROR("getattr error for "DFID": %d\n",
791                         PFID(mdt_object_fid(o)), rc);
792                 RETURN(rc);
793         }
794
795         is_root = lu_fid_eq(mdt_object_fid(o), &info->mti_mdt->mdt_md_root_fid);
796
797         /* the Lustre protocol supposes to return default striping
798          * on the user-visible root if explicitly requested */
799         if ((ma->ma_valid & MA_LOV) == 0 && S_ISDIR(la->la_mode) &&
800             (ma->ma_need & MA_LOV_DEF && is_root) && (ma->ma_need & MA_LOV)) {
801                 struct lu_fid      rootfid;
802                 struct mdt_object *root;
803                 struct mdt_device *mdt = info->mti_mdt;
804
805                 rc = dt_root_get(env, mdt->mdt_bottom, &rootfid);
806                 if (rc)
807                         RETURN(rc);
808                 root = mdt_object_find(env, mdt, &rootfid);
809                 if (IS_ERR(root))
810                         RETURN(PTR_ERR(root));
811                 rc = mdt_attr_get_lov(info, root, ma);
812                 mdt_object_put(info->mti_env, root);
813                 if (unlikely(rc)) {
814                         CERROR("getattr error for "DFID": %d\n",
815                                         PFID(mdt_object_fid(o)), rc);
816                         RETURN(rc);
817                 }
818         }
819
820         if (likely(ma->ma_valid & MA_INODE))
821                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
822         else
823                 RETURN(-EFAULT);
824
825         if (mdt_body_has_lov(la, reqbody)) {
826                 if (ma->ma_valid & MA_LOV) {
827                         LASSERT(ma->ma_lmm_size);
828                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
829                         repbody->eadatasize = ma->ma_lmm_size;
830                         if (S_ISDIR(la->la_mode))
831                                 repbody->valid |= OBD_MD_FLDIREA;
832                         else
833                                 repbody->valid |= OBD_MD_FLEASIZE;
834                 }
835                 if (ma->ma_valid & MA_LMV) {
836                         LASSERT(S_ISDIR(la->la_mode));
837                         repbody->eadatasize = ma->ma_lmv_size;
838                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
839                 }
840         } else if (S_ISLNK(la->la_mode) &&
841                    reqbody->valid & OBD_MD_LINKNAME) {
842                 buffer->lb_buf = ma->ma_lmm;
843                 /* eadatasize from client includes NULL-terminator, so
844                  * there is no need to read it */
845                 buffer->lb_len = reqbody->eadatasize - 1;
846                 rc = mo_readlink(env, next, buffer);
847                 if (unlikely(rc <= 0)) {
848                         CERROR("readlink failed: %d\n", rc);
849                         rc = -EFAULT;
850                 } else {
851                         int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
852
853                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
854                                 rc -= 2;
855                         repbody->valid |= OBD_MD_LINKNAME;
856                         /* we need to report back size with NULL-terminator
857                          * because client expects that */
858                         repbody->eadatasize = rc + 1;
859                         if (repbody->eadatasize != reqbody->eadatasize)
860                                 CERROR("Read shorter symlink %d, expected %d\n",
861                                        rc, reqbody->eadatasize - 1);
862                         /* NULL terminate */
863                         ((char *)ma->ma_lmm)[rc] = 0;
864
865                         /* If the total CDEBUG() size is larger than a page, it
866                          * will print a warning to the console, avoid this by
867                          * printing just the last part of the symlink. */
868                         CDEBUG(D_INODE, "symlink dest %s%.*s, len = %d\n",
869                                print_limit < rc ? "..." : "", print_limit,
870                                (char *)ma->ma_lmm + rc - print_limit, rc);
871                         rc = 0;
872                 }
873         }
874
875         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
876                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
877                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
878                 repbody->valid |= OBD_MD_FLMODEASIZE;
879                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
880                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
881                        repbody->max_cookiesize);
882         }
883
884         if (exp_connect_rmtclient(info->mti_exp) &&
885             reqbody->valid & OBD_MD_FLRMTPERM) {
886                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
887
888                 /* mdt_getattr_lock only */
889                 rc = mdt_pack_remote_perm(info, o, buf);
890                 if (rc) {
891                         repbody->valid &= ~OBD_MD_FLRMTPERM;
892                         repbody->aclsize = 0;
893                         RETURN(rc);
894                 } else {
895                         repbody->valid |= OBD_MD_FLRMTPERM;
896                         repbody->aclsize = sizeof(struct mdt_remote_perm);
897                 }
898         }
899 #ifdef CONFIG_FS_POSIX_ACL
900         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
901                  (reqbody->valid & OBD_MD_FLACL)) {
902                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
903                 buffer->lb_len = req_capsule_get_size(pill,
904                                                       &RMF_ACL, RCL_SERVER);
905                 if (buffer->lb_len > 0) {
906                         rc = mo_xattr_get(env, next, buffer,
907                                           XATTR_NAME_ACL_ACCESS);
908                         if (rc < 0) {
909                                 if (rc == -ENODATA) {
910                                         repbody->aclsize = 0;
911                                         repbody->valid |= OBD_MD_FLACL;
912                                         rc = 0;
913                                 } else if (rc == -EOPNOTSUPP) {
914                                         rc = 0;
915                                 } else {
916                                         CERROR("got acl size: %d\n", rc);
917                                 }
918                         } else {
919                                 repbody->aclsize = rc;
920                                 repbody->valid |= OBD_MD_FLACL;
921                                 rc = 0;
922                         }
923                 }
924         }
925 #endif
926
927         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
928             info->mti_mdt->mdt_opts.mo_mds_capa &&
929             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
930                 struct lustre_capa *capa;
931
932                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
933                 LASSERT(capa);
934                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
935                 rc = mo_capa_get(env, next, capa, 0);
936                 if (rc)
937                         RETURN(rc);
938                 repbody->valid |= OBD_MD_FLMDSCAPA;
939         }
940
941 out:
942         if (rc == 0)
943                 mdt_counter_incr(req, LPROC_MDT_GETATTR);
944
945         RETURN(rc);
946 }
947
948 static int mdt_renew_capa(struct mdt_thread_info *info)
949 {
950         struct mdt_object  *obj = info->mti_object;
951         struct mdt_body    *body;
952         struct lustre_capa *capa, *c;
953         int rc;
954         ENTRY;
955
956         /* if object doesn't exist, or server has disabled capability,
957          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
958          * flag not set.
959          */
960         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
961             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
962                 RETURN(0);
963
964         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
965         LASSERT(body != NULL);
966
967         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
968         LASSERT(c);
969
970         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
971         LASSERT(capa);
972
973         *capa = *c;
974         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
975         if (rc == 0)
976                 body->valid |= OBD_MD_FLOSSCAPA;
977         RETURN(rc);
978 }
979
980 static int mdt_getattr(struct mdt_thread_info *info)
981 {
982         struct mdt_object       *obj = info->mti_object;
983         struct req_capsule      *pill = info->mti_pill;
984         struct mdt_body         *reqbody;
985         struct mdt_body         *repbody;
986         mode_t                   mode;
987         int rc, rc2;
988         ENTRY;
989
990         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
991         LASSERT(reqbody);
992
993         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
994                 rc = req_capsule_server_pack(pill);
995                 if (unlikely(rc))
996                         RETURN(err_serious(rc));
997                 rc = mdt_renew_capa(info);
998                 GOTO(out_shrink, rc);
999         }
1000
1001         LASSERT(obj != NULL);
1002         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
1003
1004         mode = lu_object_attr(&obj->mot_obj.mo_lu);
1005
1006         /* old clients may not report needed easize, use max value then */
1007         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1008                              reqbody->eadatasize == 0 ?
1009                              info->mti_mdt->mdt_max_mdsize :
1010                              reqbody->eadatasize);
1011
1012         rc = req_capsule_server_pack(pill);
1013         if (unlikely(rc != 0))
1014                 RETURN(err_serious(rc));
1015
1016         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1017         LASSERT(repbody != NULL);
1018         repbody->eadatasize = 0;
1019         repbody->aclsize = 0;
1020
1021         if (reqbody->valid & OBD_MD_FLRMTPERM)
1022                 rc = mdt_init_ucred(info, reqbody);
1023         else
1024                 rc = mdt_check_ucred(info);
1025         if (unlikely(rc))
1026                 GOTO(out_shrink, rc);
1027
1028         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1029         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1030
1031         /*
1032          * Don't check capability at all, because rename might getattr for
1033          * remote obj, and at that time no capability is available.
1034          */
1035         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
1036         rc = mdt_getattr_internal(info, obj, 0);
1037         if (reqbody->valid & OBD_MD_FLRMTPERM)
1038                 mdt_exit_ucred(info);
1039         EXIT;
1040 out_shrink:
1041         mdt_client_compatibility(info);
1042         rc2 = mdt_fix_reply(info);
1043         if (rc == 0)
1044                 rc = rc2;
1045         return rc;
1046 }
1047
1048 static int mdt_is_subdir(struct mdt_thread_info *info)
1049 {
1050         struct mdt_object     *o = info->mti_object;
1051         struct req_capsule    *pill = info->mti_pill;
1052         const struct mdt_body *body = info->mti_body;
1053         struct mdt_body       *repbody;
1054         int                    rc;
1055         ENTRY;
1056
1057         LASSERT(o != NULL);
1058
1059         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1060
1061         /*
1062          * We save last checked parent fid to @repbody->fid1 for remote
1063          * directory case.
1064          */
1065         LASSERT(fid_is_sane(&body->fid2));
1066         LASSERT(mdt_object_exists(o) > 0);
1067         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
1068                            &body->fid2, &repbody->fid1);
1069         if (rc == 0 || rc == -EREMOTE)
1070                 repbody->valid |= OBD_MD_FLID;
1071
1072         RETURN(rc);
1073 }
1074
1075 static int mdt_raw_lookup(struct mdt_thread_info *info,
1076                           struct mdt_object *parent,
1077                           const struct lu_name *lname,
1078                           struct ldlm_reply *ldlm_rep)
1079 {
1080         struct md_object *next = mdt_object_child(info->mti_object);
1081         const struct mdt_body *reqbody = info->mti_body;
1082         struct lu_fid *child_fid = &info->mti_tmp_fid1;
1083         struct mdt_body *repbody;
1084         int rc;
1085         ENTRY;
1086
1087         if (reqbody->valid != OBD_MD_FLID)
1088                 RETURN(0);
1089
1090         LASSERT(!info->mti_cross_ref);
1091
1092         /* Only got the fid of this obj by name */
1093         fid_zero(child_fid);
1094         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1095                         &info->mti_spec);
1096 #if 0
1097         /* XXX is raw_lookup possible as intent operation? */
1098         if (rc != 0) {
1099                 if (rc == -ENOENT)
1100                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
1101                 RETURN(rc);
1102         } else
1103                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1104
1105         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1106 #endif
1107         if (rc == 0) {
1108                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1109                 repbody->fid1 = *child_fid;
1110                 repbody->valid = OBD_MD_FLID;
1111         }
1112         RETURN(1);
1113 }
1114
1115 /*
1116  * UPDATE lock should be taken against parent, and be release before exit;
1117  * child_bits lock should be taken against child, and be returned back:
1118  *            (1)normal request should release the child lock;
1119  *            (2)intent request will grant the lock to client.
1120  */
1121 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
1122                                  struct mdt_lock_handle *lhc,
1123                                  __u64 child_bits,
1124                                  struct ldlm_reply *ldlm_rep)
1125 {
1126         struct ptlrpc_request  *req       = mdt_info_req(info);
1127         struct mdt_body        *reqbody   = NULL;
1128         struct mdt_object      *parent    = info->mti_object;
1129         struct mdt_object      *child;
1130         struct md_object       *next      = mdt_object_child(parent);
1131         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
1132         struct lu_name         *lname     = NULL;
1133         const char             *name      = NULL;
1134         int                     namelen   = 0;
1135         struct mdt_lock_handle *lhp       = NULL;
1136         struct ldlm_lock       *lock;
1137         struct ldlm_res_id     *res_id;
1138         int                     is_resent;
1139         int                     ma_need = 0;
1140         int                     rc;
1141
1142         ENTRY;
1143
1144         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
1145         LASSERT(ergo(is_resent,
1146                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
1147
1148         LASSERT(parent != NULL);
1149         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
1150         if (name == NULL)
1151                 RETURN(err_serious(-EFAULT));
1152
1153         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
1154                                        RCL_CLIENT) - 1;
1155         if (!info->mti_cross_ref) {
1156                 /*
1157                  * XXX: Check for "namelen == 0" is for getattr by fid
1158                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
1159                  * that is the name must contain at least one character and
1160                  * the terminating '\0'
1161                  */
1162                 if (namelen == 0) {
1163                         reqbody = req_capsule_client_get(info->mti_pill,
1164                                                          &RMF_MDT_BODY);
1165                         if (unlikely(reqbody == NULL))
1166                                 RETURN(err_serious(-EFAULT));
1167
1168                         if (unlikely(!fid_is_sane(&reqbody->fid2)))
1169                                 RETURN(err_serious(-EINVAL));
1170
1171                         name = NULL;
1172                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
1173                                "ldlm_rep = %p\n",
1174                                PFID(mdt_object_fid(parent)),
1175                                PFID(&reqbody->fid2), ldlm_rep);
1176                 } else {
1177                         lname = mdt_name(info->mti_env, (char *)name, namelen);
1178                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
1179                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
1180                                name, ldlm_rep);
1181                 }
1182         }
1183         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
1184
1185         rc = mdt_object_exists(parent);
1186         if (unlikely(rc == 0)) {
1187                 LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1188                                 &parent->mot_obj.mo_lu,
1189                                 "Parent doesn't exist!\n");
1190                 RETURN(-ESTALE);
1191         } else if (!info->mti_cross_ref) {
1192                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
1193                          PFID(mdt_object_fid(parent)));
1194         }
1195         if (lname) {
1196                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
1197                 if (rc != 0) {
1198                         if (rc > 0)
1199                                 rc = 0;
1200                         RETURN(rc);
1201                 }
1202         }
1203
1204         if (info->mti_cross_ref) {
1205                 /* Only getattr on the child. Parent is on another node. */
1206                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1207                 child = parent;
1208                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
1209                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
1210
1211                 if (is_resent) {
1212                         /* Do not take lock for resent case. */
1213                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1214                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1215                                  lhc->mlh_reg_lh.cookie);
1216                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
1217                                                 &lock->l_resource->lr_name));
1218                         LDLM_LOCK_PUT(lock);
1219                         rc = 0;
1220                 } else {
1221                         mdt_lock_handle_init(lhc);
1222                         mdt_lock_reg_init(lhc, LCK_PR);
1223
1224                         /*
1225                          * Object's name is on another MDS, no lookup lock is
1226                          * needed here but update is.
1227                          */
1228                         child_bits &= ~MDS_INODELOCK_LOOKUP;
1229                         child_bits |= MDS_INODELOCK_UPDATE;
1230
1231                         rc = mdt_object_lock(info, child, lhc, child_bits,
1232                                              MDT_LOCAL_LOCK);
1233                 }
1234                 if (rc == 0) {
1235                         /* Finally, we can get attr for child. */
1236                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
1237                                          BYPASS_CAPA);
1238                         rc = mdt_getattr_internal(info, child, 0);
1239                         if (unlikely(rc != 0))
1240                                 mdt_object_unlock(info, child, lhc, 1);
1241                 }
1242                 RETURN(rc);
1243         }
1244
1245         if (lname) {
1246                 /* step 1: lock parent only if parent is a directory */
1247                 if (S_ISDIR(lu_object_attr(&parent->mot_obj.mo_lu))) {
1248                         lhp = &info->mti_lh[MDT_LH_PARENT];
1249                         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
1250                         rc = mdt_object_lock(info, parent, lhp,
1251                                              MDS_INODELOCK_UPDATE,
1252                                              MDT_LOCAL_LOCK);
1253                         if (unlikely(rc != 0))
1254                                 RETURN(rc);
1255                 }
1256
1257                 /* step 2: lookup child's fid by name */
1258                 fid_zero(child_fid);
1259                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
1260                                 &info->mti_spec);
1261
1262                 if (rc != 0) {
1263                         if (rc == -ENOENT)
1264                                 mdt_set_disposition(info, ldlm_rep,
1265                                                     DISP_LOOKUP_NEG);
1266                         GOTO(out_parent, rc);
1267                 } else
1268                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1269         } else {
1270                 *child_fid = reqbody->fid2;
1271                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
1272         }
1273
1274         /*
1275          *step 3: find the child object by fid & lock it.
1276          *        regardless if it is local or remote.
1277          */
1278         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
1279
1280         if (unlikely(IS_ERR(child)))
1281                 GOTO(out_parent, rc = PTR_ERR(child));
1282         if (is_resent) {
1283                 /* Do not take lock for resent case. */
1284                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1285                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
1286                          lhc->mlh_reg_lh.cookie);
1287
1288                 res_id = &lock->l_resource->lr_name;
1289                 if (!fid_res_name_eq(mdt_object_fid(child),
1290                                     &lock->l_resource->lr_name)) {
1291                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
1292                                                  &lock->l_resource->lr_name),
1293                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1294                                  (unsigned long)res_id->name[0],
1295                                  (unsigned long)res_id->name[1],
1296                                  (unsigned long)res_id->name[2],
1297                                  PFID(mdt_object_fid(parent)));
1298                           CWARN("Although resent, but still not get child lock"
1299                                 "parent:"DFID" child:"DFID"\n",
1300                                 PFID(mdt_object_fid(parent)),
1301                                 PFID(mdt_object_fid(child)));
1302                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1303                           LDLM_LOCK_PUT(lock);
1304                           GOTO(relock, 0);
1305                 }
1306                 LDLM_LOCK_PUT(lock);
1307                 rc = 0;
1308         } else {
1309 relock:
1310                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
1311                 mdt_lock_handle_init(lhc);
1312                 if (child_bits == MDS_INODELOCK_LAYOUT)
1313                         mdt_lock_reg_init(lhc, LCK_CR);
1314                 else
1315                         mdt_lock_reg_init(lhc, LCK_PR);
1316
1317                 if (mdt_object_exists(child) == 0) {
1318                         LU_OBJECT_DEBUG(D_INODE, info->mti_env,
1319                                         &child->mot_obj.mo_lu,
1320                                         "Object doesn't exist!\n");
1321                         GOTO(out_child, rc = -ENOENT);
1322                 }
1323
1324                 if (!(child_bits & MDS_INODELOCK_UPDATE)) {
1325                         struct md_attr *ma = &info->mti_attr;
1326
1327                         ma->ma_valid = 0;
1328                         ma->ma_need = MA_INODE;
1329                         rc = mdt_attr_get_complex(info, child, ma);
1330                         if (unlikely(rc != 0))
1331                                 GOTO(out_child, rc);
1332
1333                         /* layout lock is used only on regular files */
1334                         if ((ma->ma_valid & MA_INODE) &&
1335                             (ma->ma_attr.la_valid & LA_MODE) &&
1336                             !S_ISREG(ma->ma_attr.la_mode))
1337                                 child_bits &= ~MDS_INODELOCK_LAYOUT;
1338
1339                         /* If the file has not been changed for some time, we
1340                          * return not only a LOOKUP lock, but also an UPDATE
1341                          * lock and this might save us RPC on later STAT. For
1342                          * directories, it also let negative dentry starts
1343                          * working for this dir. */
1344                         if (ma->ma_valid & MA_INODE &&
1345                             ma->ma_attr.la_valid & LA_CTIME &&
1346                             info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1347                                 ma->ma_attr.la_ctime < cfs_time_current_sec())
1348                                 child_bits |= MDS_INODELOCK_UPDATE;
1349                 }
1350
1351                 rc = mdt_object_lock(info, child, lhc, child_bits,
1352                                      MDT_CROSS_LOCK);
1353
1354                 if (unlikely(rc != 0))
1355                         GOTO(out_child, rc);
1356         }
1357
1358         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1359         /* Get MA_SOM attributes if update lock is given. */
1360         if (lock &&
1361             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1362             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1363                 ma_need = MA_SOM;
1364
1365         /* finally, we can get attr for child. */
1366         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1367         rc = mdt_getattr_internal(info, child, ma_need);
1368         if (unlikely(rc != 0)) {
1369                 mdt_object_unlock(info, child, lhc, 1);
1370         } else if (lock) {
1371                 /* Debugging code. */
1372                 res_id = &lock->l_resource->lr_name;
1373                 LDLM_DEBUG(lock, "Returning lock to client");
1374                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1375                                          &lock->l_resource->lr_name),
1376                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1377                          (unsigned long)res_id->name[0],
1378                          (unsigned long)res_id->name[1],
1379                          (unsigned long)res_id->name[2],
1380                          PFID(mdt_object_fid(child)));
1381                 mdt_pack_size2body(info, child);
1382         }
1383         if (lock)
1384                 LDLM_LOCK_PUT(lock);
1385
1386         EXIT;
1387 out_child:
1388         mdt_object_put(info->mti_env, child);
1389 out_parent:
1390         if (lhp)
1391                 mdt_object_unlock(info, parent, lhp, 1);
1392         return rc;
1393 }
1394
1395 /* normal handler: should release the child lock */
1396 static int mdt_getattr_name(struct mdt_thread_info *info)
1397 {
1398         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1399         struct mdt_body        *reqbody;
1400         struct mdt_body        *repbody;
1401         int rc, rc2;
1402         ENTRY;
1403
1404         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1405         LASSERT(reqbody != NULL);
1406         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1407         LASSERT(repbody != NULL);
1408
1409         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1410         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1411         repbody->eadatasize = 0;
1412         repbody->aclsize = 0;
1413
1414         rc = mdt_init_ucred(info, reqbody);
1415         if (unlikely(rc))
1416                 GOTO(out_shrink, rc);
1417
1418         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1419         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1420                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1421                 lhc->mlh_reg_lh.cookie = 0;
1422         }
1423         mdt_exit_ucred(info);
1424         EXIT;
1425 out_shrink:
1426         mdt_client_compatibility(info);
1427         rc2 = mdt_fix_reply(info);
1428         if (rc == 0)
1429                 rc = rc2;
1430         return rc;
1431 }
1432
1433 static const struct lu_device_operations mdt_lu_ops;
1434
1435 static int lu_device_is_mdt(struct lu_device *d)
1436 {
1437         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1438 }
1439
1440 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1441                          void *karg, void *uarg);
1442
1443 static int mdt_set_info(struct mdt_thread_info *info)
1444 {
1445         struct ptlrpc_request *req = mdt_info_req(info);
1446         char *key;
1447         void *val;
1448         int keylen, vallen, rc = 0;
1449         ENTRY;
1450
1451         rc = req_capsule_server_pack(info->mti_pill);
1452         if (rc)
1453                 RETURN(rc);
1454
1455         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1456         if (key == NULL) {
1457                 DEBUG_REQ(D_HA, req, "no set_info key");
1458                 RETURN(-EFAULT);
1459         }
1460
1461         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1462                                       RCL_CLIENT);
1463
1464         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1465         if (val == NULL) {
1466                 DEBUG_REQ(D_HA, req, "no set_info val");
1467                 RETURN(-EFAULT);
1468         }
1469
1470         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1471                                       RCL_CLIENT);
1472
1473         /* Swab any part of val you need to here */
1474         if (KEY_IS(KEY_READ_ONLY)) {
1475                 req->rq_status = 0;
1476                 lustre_msg_set_status(req->rq_repmsg, 0);
1477
1478                 cfs_spin_lock(&req->rq_export->exp_lock);
1479                 if (*(__u32 *)val)
1480                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1481                 else
1482                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1483                 cfs_spin_unlock(&req->rq_export->exp_lock);
1484
1485         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1486                 struct changelog_setinfo *cs =
1487                         (struct changelog_setinfo *)val;
1488                 if (vallen != sizeof(*cs)) {
1489                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1490                         RETURN(-EINVAL);
1491                 }
1492                 if (ptlrpc_req_need_swab(req)) {
1493                         __swab64s(&cs->cs_recno);
1494                         __swab32s(&cs->cs_id);
1495                 }
1496
1497                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1498                                    vallen, val, NULL);
1499                 lustre_msg_set_status(req->rq_repmsg, rc);
1500
1501         } else {
1502                 RETURN(-EINVAL);
1503         }
1504         RETURN(0);
1505 }
1506
1507 static int mdt_connect(struct mdt_thread_info *info)
1508 {
1509         int rc;
1510         struct ptlrpc_request *req;
1511
1512         req = mdt_info_req(info);
1513         rc = target_handle_connect(req);
1514         if (rc == 0) {
1515                 LASSERT(req->rq_export != NULL);
1516                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1517                 rc = mdt_init_sec_level(info);
1518                 if (rc == 0)
1519                         rc = mdt_init_idmap(info);
1520                 if (rc != 0)
1521                         obd_disconnect(class_export_get(req->rq_export));
1522         } else {
1523                 rc = err_serious(rc);
1524         }
1525         return rc;
1526 }
1527
1528 static int mdt_disconnect(struct mdt_thread_info *info)
1529 {
1530         int rc;
1531         ENTRY;
1532
1533         rc = target_handle_disconnect(mdt_info_req(info));
1534         if (rc)
1535                 rc = err_serious(rc);
1536         RETURN(rc);
1537 }
1538
1539 static int mdt_sendpage(struct mdt_thread_info *info,
1540                         struct lu_rdpg *rdpg, int nob)
1541 {
1542         struct ptlrpc_request   *req = mdt_info_req(info);
1543         struct obd_export       *exp = req->rq_export;
1544         struct ptlrpc_bulk_desc *desc;
1545         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1546         int                      tmpcount;
1547         int                      tmpsize;
1548         int                      i;
1549         int                      rc;
1550         ENTRY;
1551
1552         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1553                                     MDS_BULK_PORTAL);
1554         if (desc == NULL)
1555                 RETURN(-ENOMEM);
1556
1557         if (!(exp->exp_connect_flags & OBD_CONNECT_BRW_SIZE))
1558                 /* old client requires reply size in it's PAGE_SIZE,
1559                  * which is rdpg->rp_count */
1560                 nob = rdpg->rp_count;
1561
1562         for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
1563              i++, tmpcount -= tmpsize) {
1564                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1565                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1566         }
1567
1568         LASSERT(desc->bd_nob == nob);
1569         rc = target_bulk_io(exp, desc, lwi);
1570         ptlrpc_free_bulk(desc);
1571         RETURN(rc);
1572 }
1573
1574 #ifdef HAVE_SPLIT_SUPPORT
1575 /*
1576  * Retrieve dir entry from the page and insert it to the slave object, actually,
1577  * this should be in osd layer, but since it will not in the final product, so
1578  * just do it here and do not define more moo api anymore for this.
1579  */
1580 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1581                               int size)
1582 {
1583         struct mdt_object *object = info->mti_object;
1584         struct lu_fid *lf = &info->mti_tmp_fid2;
1585         struct md_attr *ma = &info->mti_attr;
1586         struct lu_dirpage *dp;
1587         struct lu_dirent *ent;
1588         int rc = 0, offset = 0;
1589         ENTRY;
1590
1591         /* Make sure we have at least one entry. */
1592         if (size == 0)
1593                 RETURN(-EINVAL);
1594
1595         /*
1596          * Disable trans for this name insert, since it will include many trans
1597          * for this.
1598          */
1599         info->mti_no_need_trans = 1;
1600         /*
1601          * When write_dir_page, no need update parent's ctime,
1602          * and no permission check for name_insert.
1603          */
1604         ma->ma_attr.la_ctime = 0;
1605         ma->ma_attr.la_valid = LA_MODE;
1606         ma->ma_valid = MA_INODE;
1607
1608         cfs_kmap(page);
1609         dp = page_address(page);
1610         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1611
1612         for (ent = lu_dirent_start(dp); ent != NULL;
1613              ent = lu_dirent_next(ent)) {
1614                 struct lu_name *lname;
1615                 char *name;
1616
1617                 if (le16_to_cpu(ent->lde_namelen) == 0)
1618                         continue;
1619
1620                 fid_le_to_cpu(lf, &ent->lde_fid);
1621                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1622                         ma->ma_attr.la_mode = S_IFDIR;
1623                 else
1624                         ma->ma_attr.la_mode = 0;
1625                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1626                 if (name == NULL)
1627                         GOTO(out, rc = -ENOMEM);
1628
1629                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1630                 lname = mdt_name(info->mti_env, name,
1631                                  le16_to_cpu(ent->lde_namelen));
1632                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1633                 rc = mdo_name_insert(info->mti_env,
1634                                      md_object_next(&object->mot_obj),
1635                                      lname, lf, ma);
1636                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1637                 if (rc) {
1638                         CERROR("Can't insert %*.*s, rc %d\n",
1639                                le16_to_cpu(ent->lde_namelen),
1640                                le16_to_cpu(ent->lde_namelen),
1641                                ent->lde_name, rc);
1642                         GOTO(out, rc);
1643                 }
1644
1645                 offset += lu_dirent_size(ent);
1646                 if (offset >= size)
1647                         break;
1648         }
1649         EXIT;
1650 out:
1651         cfs_kunmap(page);
1652         return rc;
1653 }
1654
1655 static int mdt_bulk_timeout(void *data)
1656 {
1657         ENTRY;
1658
1659         CERROR("mdt bulk transfer timeout \n");
1660
1661         RETURN(1);
1662 }
1663
1664 static int mdt_writepage(struct mdt_thread_info *info)
1665 {
1666         struct ptlrpc_request   *req = mdt_info_req(info);
1667         struct mdt_body         *reqbody;
1668         struct l_wait_info      *lwi;
1669         struct ptlrpc_bulk_desc *desc;
1670         struct page             *page;
1671         int                rc;
1672         ENTRY;
1673
1674
1675         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1676         if (reqbody == NULL)
1677                 RETURN(err_serious(-EFAULT));
1678
1679         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1680         if (desc == NULL)
1681                 RETURN(err_serious(-ENOMEM));
1682
1683         /* allocate the page for the desc */
1684         page = cfs_alloc_page(CFS_ALLOC_STD);
1685         if (page == NULL)
1686                 GOTO(desc_cleanup, rc = -ENOMEM);
1687
1688         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1689                (int)reqbody->size, (int)reqbody->nlink);
1690
1691         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1692                               (int)reqbody->nlink);
1693
1694         rc = sptlrpc_svc_prep_bulk(req, desc);
1695         if (rc != 0)
1696                 GOTO(cleanup_page, rc);
1697         /*
1698          * Check if client was evicted while we were doing i/o before touching
1699          * network.
1700          */
1701         OBD_ALLOC_PTR(lwi);
1702         if (!lwi)
1703                 GOTO(cleanup_page, rc = -ENOMEM);
1704
1705         if (desc->bd_export->exp_failed)
1706                 rc = -ENOTCONN;
1707         else
1708                 rc = ptlrpc_start_bulk_transfer (desc);
1709         if (rc == 0) {
1710                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
1711                                             mdt_bulk_timeout, desc);
1712                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1713                                   desc->bd_export->exp_failed, lwi);
1714                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1715                 if (rc == -ETIMEDOUT) {
1716                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1717                         ptlrpc_abort_bulk(desc);
1718                 } else if (desc->bd_export->exp_failed) {
1719                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1720                         rc = -ENOTCONN;
1721                         ptlrpc_abort_bulk(desc);
1722                 } else if (!desc->bd_success ||
1723                            desc->bd_nob_transferred != desc->bd_nob) {
1724                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1725                                   desc->bd_success ?
1726                                   "truncated" : "network error on",
1727                                   desc->bd_nob_transferred, desc->bd_nob);
1728                         /* XXX should this be a different errno? */
1729                         rc = -ETIMEDOUT;
1730                 }
1731         } else {
1732                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1733         }
1734         if (rc)
1735                 GOTO(cleanup_lwi, rc);
1736         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1737
1738 cleanup_lwi:
1739         OBD_FREE_PTR(lwi);
1740 cleanup_page:
1741         cfs_free_page(page);
1742 desc_cleanup:
1743         ptlrpc_free_bulk(desc);
1744         RETURN(rc);
1745 }
1746 #endif
1747
1748 static int mdt_readpage(struct mdt_thread_info *info)
1749 {
1750         struct mdt_object *object = info->mti_object;
1751         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1752         struct mdt_body   *reqbody;
1753         struct mdt_body   *repbody;
1754         int                rc;
1755         int                i;
1756         ENTRY;
1757
1758         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1759                 RETURN(err_serious(-ENOMEM));
1760
1761         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1762         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1763         if (reqbody == NULL || repbody == NULL)
1764                 RETURN(err_serious(-EFAULT));
1765
1766         /*
1767          * prepare @rdpg before calling lower layers and transfer itself. Here
1768          * reqbody->size contains offset of where to start to read and
1769          * reqbody->nlink contains number bytes to read.
1770          */
1771         rdpg->rp_hash = reqbody->size;
1772         if (rdpg->rp_hash != reqbody->size) {
1773                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1774                        rdpg->rp_hash, reqbody->size);
1775                 RETURN(-EFAULT);
1776         }
1777
1778         rdpg->rp_attrs = reqbody->mode;
1779         if (info->mti_exp->exp_connect_flags & OBD_CONNECT_64BITHASH)
1780                 rdpg->rp_attrs |= LUDA_64BITHASH;
1781         rdpg->rp_count  = min_t(unsigned int, reqbody->nlink,
1782                                 PTLRPC_MAX_BRW_SIZE);
1783         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
1784                           CFS_PAGE_SHIFT;
1785         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1786         if (rdpg->rp_pages == NULL)
1787                 RETURN(-ENOMEM);
1788
1789         for (i = 0; i < rdpg->rp_npages; ++i) {
1790                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1791                 if (rdpg->rp_pages[i] == NULL)
1792                         GOTO(free_rdpg, rc = -ENOMEM);
1793         }
1794
1795         /* call lower layers to fill allocated pages with directory data */
1796         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1797         if (rc < 0)
1798                 GOTO(free_rdpg, rc);
1799
1800         /* send pages to client */
1801         rc = mdt_sendpage(info, rdpg, rc);
1802
1803         EXIT;
1804 free_rdpg:
1805
1806         for (i = 0; i < rdpg->rp_npages; i++)
1807                 if (rdpg->rp_pages[i] != NULL)
1808                         cfs_free_page(rdpg->rp_pages[i]);
1809         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1810
1811         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1812                 RETURN(0);
1813
1814         return rc;
1815 }
1816
1817 static int mdt_reint_internal(struct mdt_thread_info *info,
1818                               struct mdt_lock_handle *lhc,
1819                               __u32 op)
1820 {
1821         struct req_capsule      *pill = info->mti_pill;
1822         struct md_quota         *mq = md_quota(info->mti_env);
1823         struct mdt_body         *repbody;
1824         int                      rc = 0, rc2;
1825         ENTRY;
1826
1827
1828         rc = mdt_reint_unpack(info, op);
1829         if (rc != 0) {
1830                 CERROR("Can't unpack reint, rc %d\n", rc);
1831                 RETURN(err_serious(rc));
1832         }
1833
1834         /* for replay (no_create) lmm is not needed, client has it already */
1835         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1836                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1837                                      info->mti_rr.rr_eadatalen);
1838
1839         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1840                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1841                                      info->mti_mdt->mdt_max_cookiesize);
1842
1843         rc = req_capsule_server_pack(pill);
1844         if (rc != 0) {
1845                 CERROR("Can't pack response, rc %d\n", rc);
1846                 RETURN(err_serious(rc));
1847         }
1848
1849         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1850                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1851                 LASSERT(repbody);
1852                 repbody->eadatasize = 0;
1853                 repbody->aclsize = 0;
1854         }
1855
1856         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1857
1858         /* for replay no cookkie / lmm need, because client have this already */
1859         if (info->mti_spec.no_create)
1860                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1861                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1862
1863         rc = mdt_init_ucred_reint(info);
1864         if (rc)
1865                 GOTO(out_shrink, rc);
1866
1867         rc = mdt_fix_attr_ucred(info, op);
1868         if (rc != 0)
1869                 GOTO(out_ucred, rc = err_serious(rc));
1870
1871         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1872                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1873                 GOTO(out_ucred, rc);
1874         }
1875         mq->mq_exp = info->mti_exp;
1876         rc = mdt_reint_rec(info, lhc);
1877         EXIT;
1878 out_ucred:
1879         mdt_exit_ucred(info);
1880 out_shrink:
1881         mdt_client_compatibility(info);
1882         rc2 = mdt_fix_reply(info);
1883         if (rc == 0)
1884                 rc = rc2;
1885         return rc;
1886 }
1887
1888 static long mdt_reint_opcode(struct mdt_thread_info *info,
1889                              const struct req_format **fmt)
1890 {
1891         struct mdt_rec_reint *rec;
1892         long opc;
1893
1894         opc = err_serious(-EFAULT);
1895         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1896         if (rec != NULL) {
1897                 opc = rec->rr_opcode;
1898                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1899                 if (opc < REINT_MAX && fmt[opc] != NULL)
1900                         req_capsule_extend(info->mti_pill, fmt[opc]);
1901                 else {
1902                         CERROR("Unsupported opc: %ld\n", opc);
1903                         opc = err_serious(opc);
1904                 }
1905         }
1906         return opc;
1907 }
1908
1909 static int mdt_reint(struct mdt_thread_info *info)
1910 {
1911         long opc;
1912         int  rc;
1913
1914         static const struct req_format *reint_fmts[REINT_MAX] = {
1915                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1916                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1917                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1918                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1919                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1920                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1921                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1922         };
1923
1924         ENTRY;
1925
1926         opc = mdt_reint_opcode(info, reint_fmts);
1927         if (opc >= 0) {
1928                 /*
1929                  * No lock possible here from client to pass it to reint code
1930                  * path.
1931                  */
1932                 rc = mdt_reint_internal(info, NULL, opc);
1933         } else {
1934                 rc = opc;
1935         }
1936
1937         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1938         RETURN(rc);
1939 }
1940
1941 /* this should sync the whole device */
1942 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1943 {
1944         struct dt_device *dt = mdt->mdt_bottom;
1945         int rc;
1946         ENTRY;
1947
1948         rc = dt->dd_ops->dt_sync(env, dt);
1949         RETURN(rc);
1950 }
1951
1952 /* this should sync this object */
1953 static int mdt_object_sync(struct mdt_thread_info *info)
1954 {
1955         struct md_object *next;
1956         int rc;
1957         ENTRY;
1958
1959         if (!mdt_object_exists(info->mti_object)) {
1960                 CWARN("Non existing object  "DFID"!\n",
1961                       PFID(mdt_object_fid(info->mti_object)));
1962                 RETURN(-ESTALE);
1963         }
1964         next = mdt_object_child(info->mti_object);
1965         rc = mo_object_sync(info->mti_env, next);
1966
1967         RETURN(rc);
1968 }
1969
1970 static int mdt_sync(struct mdt_thread_info *info)
1971 {
1972         struct ptlrpc_request *req = mdt_info_req(info);
1973         struct req_capsule *pill = info->mti_pill;
1974         struct mdt_body *body;
1975         int rc;
1976         ENTRY;
1977
1978         /* The fid may be zero, so we req_capsule_set manually */
1979         req_capsule_set(pill, &RQF_MDS_SYNC);
1980
1981         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1982         if (body == NULL)
1983                 RETURN(err_serious(-EINVAL));
1984
1985         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1986                 RETURN(err_serious(-ENOMEM));
1987
1988         if (fid_seq(&body->fid1) == 0) {
1989                 /* sync the whole device */
1990                 rc = req_capsule_server_pack(pill);
1991                 if (rc == 0)
1992                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1993                 else
1994                         rc = err_serious(rc);
1995         } else {
1996                 /* sync an object */
1997                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1998                 if (rc == 0) {
1999                         rc = mdt_object_sync(info);
2000                         if (rc == 0) {
2001                                 const struct lu_fid *fid;
2002                                 struct lu_attr *la = &info->mti_attr.ma_attr;
2003
2004                                 info->mti_attr.ma_need = MA_INODE;
2005                                 info->mti_attr.ma_valid = 0;
2006                                 rc = mdt_attr_get_complex(info, info->mti_object,
2007                                                           &info->mti_attr);
2008                                 if (rc == 0) {
2009                                         body = req_capsule_server_get(pill,
2010                                                                 &RMF_MDT_BODY);
2011                                         fid = mdt_object_fid(info->mti_object);
2012                                         mdt_pack_attr2body(info, body, la, fid);
2013                                 }
2014                         }
2015                 } else
2016                         rc = err_serious(rc);
2017         }
2018         if (rc == 0)
2019                 mdt_counter_incr(req, LPROC_MDT_SYNC);
2020
2021         RETURN(rc);
2022 }
2023
2024 #ifdef HAVE_QUOTA_SUPPORT
2025 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
2026 {
2027         struct obd_quotactl *oqctl;
2028         struct req_capsule *pill = info->mti_pill;
2029         struct obd_export *exp = info->mti_exp;
2030         struct md_quota *mq = md_quota(info->mti_env);
2031         struct md_device *next = info->mti_mdt->mdt_child;
2032         int rc;
2033         ENTRY;
2034
2035         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
2036         if (oqctl == NULL)
2037                 RETURN(-EPROTO);
2038
2039         /* remote client has no permission for quotacheck */
2040         if (unlikely(exp_connect_rmtclient(exp)))
2041                 RETURN(-EPERM);
2042
2043         rc = req_capsule_server_pack(pill);
2044         if (rc)
2045                 RETURN(rc);
2046
2047         mq->mq_exp = exp;
2048         rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next,
2049                                                oqctl->qc_type);
2050         RETURN(rc);
2051 }
2052
2053 static int mdt_quotactl_handle(struct mdt_thread_info *info)
2054 {
2055         struct obd_quotactl *oqctl, *repoqc;
2056         struct req_capsule *pill = info->mti_pill;
2057         struct obd_export *exp = info->mti_exp;
2058         struct md_quota *mq = md_quota(info->mti_env);
2059         struct md_device *next = info->mti_mdt->mdt_child;
2060         const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
2061         int id, rc;
2062         ENTRY;
2063
2064         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
2065         if (oqctl == NULL)
2066                 RETURN(-EPROTO);
2067
2068         id = oqctl->qc_id;
2069         if (exp_connect_rmtclient(exp)) {
2070                 struct ptlrpc_request *req = mdt_info_req(info);
2071                 struct mdt_export_data *med = mdt_req2med(req);
2072                 struct lustre_idmap_table *idmap = med->med_idmap;
2073
2074                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
2075                              oqctl->qc_cmd != Q_GETINFO))
2076                         RETURN(-EPERM);
2077
2078
2079                 if (oqctl->qc_type == USRQUOTA)
2080                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
2081                                                      oqctl->qc_id);
2082                 else if (oqctl->qc_type == GRPQUOTA)
2083                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
2084                                                      oqctl->qc_id);
2085                 else
2086                         RETURN(-EINVAL);
2087
2088                 if (id == CFS_IDMAP_NOTFOUND) {
2089                         CDEBUG(D_QUOTA, "no mapping for id %u\n",
2090                                oqctl->qc_id);
2091                         RETURN(-EACCES);
2092                 }
2093         }
2094
2095         rc = req_capsule_server_pack(pill);
2096         if (rc)
2097                 RETURN(rc);
2098
2099         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
2100         LASSERT(repoqc != NULL);
2101
2102         mq->mq_exp = exp;
2103         switch (oqctl->qc_cmd) {
2104         case Q_QUOTAON:
2105                 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
2106                 break;
2107         case Q_QUOTAOFF:
2108                 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type);
2109                 break;
2110         case Q_SETINFO:
2111                 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
2112                                       &oqctl->qc_dqinfo);
2113                 break;
2114         case Q_GETINFO:
2115                 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
2116                                       &oqctl->qc_dqinfo);
2117                 break;
2118         case Q_SETQUOTA:
2119                 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
2120                                        &oqctl->qc_dqblk);
2121                 break;
2122         case Q_GETQUOTA:
2123                 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
2124                                        &oqctl->qc_dqblk);
2125                 break;
2126         case Q_GETOINFO:
2127                 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
2128                                        &oqctl->qc_dqinfo);
2129                 break;
2130         case Q_GETOQUOTA:
2131                 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
2132                                         &oqctl->qc_dqblk);
2133                 break;
2134         case LUSTRE_Q_INVALIDATE:
2135                 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
2136                 break;
2137         case LUSTRE_Q_FINVALIDATE:
2138                 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
2139                 break;
2140         default:
2141                 CERROR("unsupported mdt_quotactl command: %d\n",
2142                        oqctl->qc_cmd);
2143                 RETURN(-EFAULT);
2144         }
2145
2146         *repoqc = *oqctl;
2147         RETURN(rc);
2148 }
2149 #endif
2150
2151
2152 /*
2153  * OBD PING and other handlers.
2154  */
2155 static int mdt_obd_ping(struct mdt_thread_info *info)
2156 {
2157         int rc;
2158         ENTRY;
2159
2160         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
2161
2162         rc = target_handle_ping(mdt_info_req(info));
2163         if (rc < 0)
2164                 rc = err_serious(rc);
2165         RETURN(rc);
2166 }
2167
2168 /*
2169  * OBD_IDX_READ handler
2170  */
2171 static int mdt_obd_idx_read(struct mdt_thread_info *info)
2172 {
2173         struct mdt_device       *mdt = info->mti_mdt;
2174         struct lu_rdpg          *rdpg = &info->mti_u.rdpg.mti_rdpg;
2175         struct idx_info         *req_ii, *rep_ii;
2176         int                      rc, i;
2177         ENTRY;
2178
2179         memset(rdpg, 0, sizeof(*rdpg));
2180         req_capsule_set(info->mti_pill, &RQF_OBD_IDX_READ);
2181
2182         /* extract idx_info buffer from request & reply */
2183         req_ii = req_capsule_client_get(info->mti_pill, &RMF_IDX_INFO);
2184         if (req_ii == NULL || req_ii->ii_magic != IDX_INFO_MAGIC)
2185                 RETURN(err_serious(-EPROTO));
2186
2187         rc = req_capsule_server_pack(info->mti_pill);
2188         if (rc)
2189                 RETURN(err_serious(rc));
2190
2191         rep_ii = req_capsule_server_get(info->mti_pill, &RMF_IDX_INFO);
2192         if (rep_ii == NULL)
2193                 RETURN(err_serious(-EFAULT));
2194         rep_ii->ii_magic = IDX_INFO_MAGIC;
2195
2196         /* extract hash to start with */
2197         rdpg->rp_hash = req_ii->ii_hash_start;
2198
2199         /* extract requested attributes */
2200         rdpg->rp_attrs = req_ii->ii_attrs;
2201
2202         /* check that fid packed in request is valid and supported */
2203         if (!fid_is_sane(&req_ii->ii_fid))
2204                 RETURN(-EINVAL);
2205         rep_ii->ii_fid = req_ii->ii_fid;
2206
2207         /* copy flags */
2208         rep_ii->ii_flags = req_ii->ii_flags;
2209
2210         /* compute number of pages to allocate, ii_count is the number of 4KB
2211          * containers */
2212         if (req_ii->ii_count <= 0)
2213                 GOTO(out, rc = -EFAULT);
2214         rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
2215                                PTLRPC_MAX_BRW_SIZE);
2216         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
2217
2218         /* allocate pages to store the containers */
2219         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2220         if (rdpg->rp_pages == NULL)
2221                 GOTO(out, rc = -ENOMEM);
2222         for (i = 0; i < rdpg->rp_npages; i++) {
2223                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
2224                 if (rdpg->rp_pages[i] == NULL)
2225                         GOTO(out, rc = -ENOMEM);
2226         }
2227
2228         /* populate pages with key/record pairs */
2229         rc = dt_index_read(info->mti_env, mdt->mdt_bottom, rep_ii, rdpg);
2230         if (rc < 0)
2231                 GOTO(out, rc);
2232
2233         LASSERTF(rc <= rdpg->rp_count, "dt_index_read() returned more than "
2234                  "asked %d > %d\n", rc, rdpg->rp_count);
2235
2236         /* send pages to client */
2237         rc = mdt_sendpage(info, rdpg, rc);
2238
2239         GOTO(out, rc);
2240 out:
2241         if (rdpg->rp_pages) {
2242                 for (i = 0; i < rdpg->rp_npages; i++)
2243                         if (rdpg->rp_pages[i])
2244                                 cfs_free_page(rdpg->rp_pages[i]);
2245                 OBD_FREE(rdpg->rp_pages,
2246                          rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
2247         }
2248         return rc;
2249 }
2250
2251 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
2252 {
2253         return err_serious(-EOPNOTSUPP);
2254 }
2255
2256 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
2257 {
2258         return err_serious(-EOPNOTSUPP);
2259 }
2260
2261
2262 /*
2263  * LLOG handlers.
2264  */
2265
2266 /** clone llog ctxt from child (mdd)
2267  * This allows remote llog (replicator) access.
2268  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
2269  * context was originally set up, or we can handle them directly.
2270  * I choose the latter, but that means I need any llog
2271  * contexts set up by child to be accessable by the mdt.  So we clone the
2272  * context into our context list here.
2273  */
2274 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
2275                                int idx)
2276 {
2277         struct md_device  *next = mdt->mdt_child;
2278         struct llog_ctxt *ctxt;
2279         int rc;
2280
2281         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
2282                 return 0;
2283
2284         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
2285         if (rc || ctxt == NULL) {
2286                 CERROR("Can't get mdd ctxt %d\n", rc);
2287                 return rc;
2288         }
2289
2290         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
2291         if (rc)
2292                 CERROR("Can't set mdt ctxt %d\n", rc);
2293
2294         return rc;
2295 }
2296
2297 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
2298                                  struct mdt_device *mdt, int idx)
2299 {
2300         struct llog_ctxt *ctxt;
2301
2302         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
2303         if (ctxt == NULL)
2304                 return 0;
2305         /* Put once for the get we just did, and once for the clone */
2306         llog_ctxt_put(ctxt);
2307         llog_ctxt_put(ctxt);
2308         return 0;
2309 }
2310
2311 static int mdt_llog_create(struct mdt_thread_info *info)
2312 {
2313         int rc;
2314
2315         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
2316         rc = llog_origin_handle_open(mdt_info_req(info));
2317         return (rc < 0 ? err_serious(rc) : rc);
2318 }
2319
2320 static int mdt_llog_destroy(struct mdt_thread_info *info)
2321 {
2322         int rc;
2323
2324         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
2325         rc = llog_origin_handle_destroy(mdt_info_req(info));
2326         return (rc < 0 ? err_serious(rc) : rc);
2327 }
2328
2329 static int mdt_llog_read_header(struct mdt_thread_info *info)
2330 {
2331         int rc;
2332
2333         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
2334         rc = llog_origin_handle_read_header(mdt_info_req(info));
2335         return (rc < 0 ? err_serious(rc) : rc);
2336 }
2337
2338 static int mdt_llog_next_block(struct mdt_thread_info *info)
2339 {
2340         int rc;
2341
2342         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
2343         rc = llog_origin_handle_next_block(mdt_info_req(info));
2344         return (rc < 0 ? err_serious(rc) : rc);
2345 }
2346
2347 static int mdt_llog_prev_block(struct mdt_thread_info *info)
2348 {
2349         int rc;
2350
2351         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
2352         rc = llog_origin_handle_prev_block(mdt_info_req(info));
2353         return (rc < 0 ? err_serious(rc) : rc);
2354 }
2355
2356
2357 /*
2358  * DLM handlers.
2359  */
2360 static struct ldlm_callback_suite cbs = {
2361         .lcs_completion = ldlm_server_completion_ast,
2362         .lcs_blocking   = ldlm_server_blocking_ast,
2363         .lcs_glimpse    = NULL
2364 };
2365
2366 static int mdt_enqueue(struct mdt_thread_info *info)
2367 {
2368         struct ptlrpc_request *req;
2369         int rc;
2370
2371         /*
2372          * info->mti_dlm_req already contains swapped and (if necessary)
2373          * converted dlm request.
2374          */
2375         LASSERT(info->mti_dlm_req != NULL);
2376
2377         req = mdt_info_req(info);
2378         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
2379                                   req, info->mti_dlm_req, &cbs);
2380         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
2381         return rc ? err_serious(rc) : req->rq_status;
2382 }
2383
2384 static int mdt_convert(struct mdt_thread_info *info)
2385 {
2386         int rc;
2387         struct ptlrpc_request *req;
2388
2389         LASSERT(info->mti_dlm_req);
2390         req = mdt_info_req(info);
2391         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2392         return rc ? err_serious(rc) : req->rq_status;
2393 }
2394
2395 static int mdt_bl_callback(struct mdt_thread_info *info)
2396 {
2397         CERROR("bl callbacks should not happen on MDS\n");
2398         LBUG();
2399         return err_serious(-EOPNOTSUPP);
2400 }
2401
2402 static int mdt_cp_callback(struct mdt_thread_info *info)
2403 {
2404         CERROR("cp callbacks should not happen on MDS\n");
2405         LBUG();
2406         return err_serious(-EOPNOTSUPP);
2407 }
2408
2409 /*
2410  * sec context handlers
2411  */
2412 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2413 {
2414         int rc;
2415
2416         rc = mdt_handle_idmap(info);
2417
2418         if (unlikely(rc)) {
2419                 struct ptlrpc_request *req = mdt_info_req(info);
2420                 __u32                  opc;
2421
2422                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2423                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2424                         sptlrpc_svc_ctx_invalidate(req);
2425         }
2426
2427         CFS_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, cfs_fail_val);
2428
2429         return rc;
2430 }
2431
2432 static struct mdt_object *mdt_obj(struct lu_object *o)
2433 {
2434         LASSERT(lu_device_is_mdt(o->lo_dev));
2435         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2436 }
2437
2438 struct mdt_object *mdt_object_new(const struct lu_env *env,
2439                                   struct mdt_device *d,
2440                                   const struct lu_fid *f)
2441 {
2442         struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
2443         struct lu_object *o;
2444         struct mdt_object *m;
2445         ENTRY;
2446
2447         CDEBUG(D_INFO, "Allocate object for "DFID"\n", PFID(f));
2448         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, &conf);
2449         if (unlikely(IS_ERR(o)))
2450                 m = (struct mdt_object *)o;
2451         else
2452                 m = mdt_obj(o);
2453         RETURN(m);
2454 }
2455
2456 struct mdt_object *mdt_object_find(const struct lu_env *env,
2457                                    struct mdt_device *d,
2458                                    const struct lu_fid *f)
2459 {
2460         struct lu_object *o;
2461         struct mdt_object *m;
2462         ENTRY;
2463
2464         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2465         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2466         if (unlikely(IS_ERR(o)))
2467                 m = (struct mdt_object *)o;
2468         else
2469                 m = mdt_obj(o);
2470         RETURN(m);
2471 }
2472
2473 /**
2474  * Asyncronous commit for mdt device.
2475  *
2476  * Pass asynchonous commit call down the MDS stack.
2477  *
2478  * \param env environment
2479  * \param mdt the mdt device
2480  */
2481 static void mdt_device_commit_async(const struct lu_env *env,
2482                                     struct mdt_device *mdt)
2483 {
2484         struct dt_device *dt = mdt->mdt_bottom;
2485         int rc;
2486
2487         rc = dt->dd_ops->dt_commit_async(env, dt);
2488         if (unlikely(rc != 0))
2489                 CWARN("async commit start failed with rc = %d", rc);
2490 }
2491
2492 /**
2493  * Mark the lock as "synchonous".
2494  *
2495  * Mark the lock to deffer transaction commit to the unlock time.
2496  *
2497  * \param lock the lock to mark as "synchonous"
2498  *
2499  * \see mdt_is_lock_sync
2500  * \see mdt_save_lock
2501  */
2502 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2503 {
2504         lock->l_ast_data = (void*)1;
2505 }
2506
2507 /**
2508  * Check whehter the lock "synchonous" or not.
2509  *
2510  * \param lock the lock to check
2511  * \retval 1 the lock is "synchonous"
2512  * \retval 0 the lock isn't "synchronous"
2513  *
2514  * \see mdt_set_lock_sync
2515  * \see mdt_save_lock
2516  */
2517 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2518 {
2519         return lock->l_ast_data != NULL;
2520 }
2521
2522 /**
2523  * Blocking AST for mdt locks.
2524  *
2525  * Starts transaction commit if in case of COS lock conflict or
2526  * deffers such a commit to the mdt_save_lock.
2527  *
2528  * \param lock the lock which blocks a request or cancelling lock
2529  * \param desc unused
2530  * \param data unused
2531  * \param flag indicates whether this cancelling or blocking callback
2532  * \retval 0
2533  * \see ldlm_blocking_ast_nocheck
2534  */
2535 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2536                      void *data, int flag)
2537 {
2538         struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
2539         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2540         int rc;
2541         ENTRY;
2542
2543         if (flag == LDLM_CB_CANCELING)
2544                 RETURN(0);
2545         lock_res_and_lock(lock);
2546         if (lock->l_blocking_ast != mdt_blocking_ast) {
2547                 unlock_res_and_lock(lock);
2548                 RETURN(0);
2549         }
2550         if (mdt_cos_is_enabled(mdt) &&
2551             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2552             lock->l_blocking_lock != NULL &&
2553             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2554                 mdt_set_lock_sync(lock);
2555         }
2556         rc = ldlm_blocking_ast_nocheck(lock);
2557
2558         /* There is no lock conflict if l_blocking_lock == NULL,
2559          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2560          * when the last reference to a local lock was released */
2561         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2562                 struct lu_env env;
2563
2564                 rc = lu_env_init(&env, LCT_LOCAL);
2565                 if (unlikely(rc != 0))
2566                         CWARN("lu_env initialization failed with rc = %d,"
2567                               "cannot start asynchronous commit\n", rc);
2568                 else
2569                         mdt_device_commit_async(&env, mdt);
2570                 lu_env_fini(&env);
2571         }
2572         RETURN(rc);
2573 }
2574
2575 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2576                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2577 {
2578         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2579         ldlm_policy_data_t *policy = &info->mti_policy;
2580         struct ldlm_res_id *res_id = &info->mti_res_id;
2581         int rc;
2582         ENTRY;
2583
2584         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2585         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2586         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2587         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2588
2589         if (mdt_object_exists(o) < 0) {
2590                 if (locality == MDT_CROSS_LOCK) {
2591                         /* cross-ref object fix */
2592                         ibits &= ~MDS_INODELOCK_UPDATE;
2593                         ibits |= MDS_INODELOCK_LOOKUP;
2594                 } else {
2595                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2596                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2597                 }
2598                 /* No PDO lock on remote object */
2599                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2600         }
2601
2602         if (lh->mlh_type == MDT_PDO_LOCK) {
2603                 /* check for exists after object is locked */
2604                 if (mdt_object_exists(o) == 0) {
2605                         /* Non-existent object shouldn't have PDO lock */
2606                         RETURN(-ESTALE);
2607                 } else {
2608                         /* Non-dir object shouldn't have PDO lock */
2609                         if (!S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)))
2610                                 RETURN(-ENOTDIR);
2611                 }
2612         }
2613
2614         memset(policy, 0, sizeof(*policy));
2615         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2616
2617         /*
2618          * Take PDO lock on whole directory and build correct @res_id for lock
2619          * on part of directory.
2620          */
2621         if (lh->mlh_pdo_hash != 0) {
2622                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2623                 mdt_lock_pdo_mode(info, o, lh);
2624                 if (lh->mlh_pdo_mode != LCK_NL) {
2625                         /*
2626                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2627                          * is never going to be sent to client and we do not
2628                          * want it slowed down due to possible cancels.
2629                          */
2630                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2631                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2632                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2633                                           &info->mti_exp->exp_handle.h_cookie);
2634                         if (unlikely(rc))
2635                                 RETURN(rc);
2636                 }
2637
2638                 /*
2639                  * Finish res_id initializing by name hash marking part of
2640                  * directory which is taking modification.
2641                  */
2642                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2643         }
2644
2645         policy->l_inodebits.bits = ibits;
2646
2647         /*
2648          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2649          * going to be sent to client. If it is - mdt_intent_policy() path will
2650          * fix it up and turn FL_LOCAL flag off.
2651          */
2652         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2653                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2654                           &info->mti_exp->exp_handle.h_cookie);
2655         if (rc)
2656                 mdt_object_unlock(info, o, lh, 1);
2657         else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
2658                  lh->mlh_pdo_hash != 0 &&
2659                  (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
2660                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 15);
2661         }
2662
2663         RETURN(rc);
2664 }
2665
2666 /**
2667  * Save a lock within request object.
2668  *
2669  * Keep the lock referenced until whether client ACK or transaction
2670  * commit happens or release the lock immediately depending on input
2671  * parameters. If COS is ON, a write lock is converted to COS lock
2672  * before saving.
2673  *
2674  * \param info thead info object
2675  * \param h lock handle
2676  * \param mode lock mode
2677  * \param decref force immediate lock releasing
2678  */
2679 static
2680 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2681                    ldlm_mode_t mode, int decref)
2682 {
2683         ENTRY;
2684
2685         if (lustre_handle_is_used(h)) {
2686                 if (decref || !info->mti_has_trans ||
2687                     !(mode & (LCK_PW | LCK_EX))){
2688                         mdt_fid_unlock(h, mode);
2689                 } else {
2690                         struct mdt_device *mdt = info->mti_mdt;
2691                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2692                         struct ptlrpc_request *req = mdt_info_req(info);
2693                         int no_ack = 0;
2694
2695                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2696                                  h->cookie);
2697                         CDEBUG(D_HA, "request = %p reply state = %p"
2698                                " transno = "LPD64"\n",
2699                                req, req->rq_reply_state, req->rq_transno);
2700                         if (mdt_cos_is_enabled(mdt)) {
2701                                 no_ack = 1;
2702                                 ldlm_lock_downgrade(lock, LCK_COS);
2703                                 mode = LCK_COS;
2704                         }
2705                         ptlrpc_save_lock(req, h, mode, no_ack);
2706                         if (mdt_is_lock_sync(lock)) {
2707                                 CDEBUG(D_HA, "found sync-lock,"
2708                                        " async commit started\n");
2709                                 mdt_device_commit_async(info->mti_env,
2710                                                         mdt);
2711                         }
2712                         LDLM_LOCK_PUT(lock);
2713                 }
2714                 h->cookie = 0ull;
2715         }
2716
2717         EXIT;
2718 }
2719
2720 /**
2721  * Unlock mdt object.
2722  *
2723  * Immeditely release the regular lock and the PDO lock or save the
2724  * lock in reqeuest and keep them referenced until client ACK or
2725  * transaction commit.
2726  *
2727  * \param info thread info object
2728  * \param o mdt object
2729  * \param lh mdt lock handle referencing regular and PDO locks
2730  * \param decref force immediate lock releasing
2731  */
2732 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2733                        struct mdt_lock_handle *lh, int decref)
2734 {
2735         ENTRY;
2736
2737         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2738         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2739
2740         EXIT;
2741 }
2742
2743 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2744                                         const struct lu_fid *f,
2745                                         struct mdt_lock_handle *lh,
2746                                         __u64 ibits)
2747 {
2748         struct mdt_object *o;
2749
2750         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2751         if (!IS_ERR(o)) {
2752                 int rc;
2753
2754                 rc = mdt_object_lock(info, o, lh, ibits,
2755                                      MDT_LOCAL_LOCK);
2756                 if (rc != 0) {
2757                         mdt_object_put(info->mti_env, o);
2758                         o = ERR_PTR(rc);
2759                 }
2760         }
2761         return o;
2762 }
2763
2764 void mdt_object_unlock_put(struct mdt_thread_info * info,
2765                            struct mdt_object * o,
2766                            struct mdt_lock_handle *lh,
2767                            int decref)
2768 {
2769         mdt_object_unlock(info, o, lh, decref);
2770         mdt_object_put(info->mti_env, o);
2771 }
2772
2773 static struct mdt_handler *mdt_handler_find(__u32 opc,
2774                                             struct mdt_opc_slice *supported)
2775 {
2776         struct mdt_opc_slice *s;
2777         struct mdt_handler   *h;
2778
2779         h = NULL;
2780         for (s = supported; s->mos_hs != NULL; s++) {
2781                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2782                         h = s->mos_hs + (opc - s->mos_opc_start);
2783                         if (likely(h->mh_opc != 0))
2784                                 LASSERTF(h->mh_opc == opc,
2785                                          "opcode mismatch %d != %d\n",
2786                                          h->mh_opc, opc);
2787                         else
2788                                 h = NULL; /* unsupported opc */
2789                         break;
2790                 }
2791         }
2792         return h;
2793 }
2794
2795 static int mdt_lock_resname_compat(struct mdt_device *m,
2796                                    struct ldlm_request *req)
2797 {
2798         /* XXX something... later. */
2799         return 0;
2800 }
2801
2802 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2803 {
2804         /* XXX something... later. */
2805         return 0;
2806 }
2807
2808 /*
2809  * Generic code handling requests that have struct mdt_body passed in:
2810  *
2811  *  - extract mdt_body from request and save it in @info, if present;
2812  *
2813  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2814  *  @info;
2815  *
2816  *  - if HABEO_CORPUS flag is set for this request type check whether object
2817  *  actually exists on storage (lu_object_exists()).
2818  *
2819  */
2820 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2821 {
2822         const struct mdt_body    *body;
2823         struct mdt_object        *obj;
2824         const struct lu_env      *env;
2825         struct req_capsule       *pill;
2826         int                       rc;
2827         ENTRY;
2828
2829         env = info->mti_env;
2830         pill = info->mti_pill;
2831
2832         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2833         if (body == NULL)
2834                 RETURN(-EFAULT);
2835
2836         if (!(body->valid & OBD_MD_FLID))
2837                 RETURN(0);
2838
2839         if (!fid_is_sane(&body->fid1)) {
2840                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2841                 RETURN(-EINVAL);
2842         }
2843
2844         /*
2845          * Do not get size or any capa fields before we check that request
2846          * contains capa actually. There are some requests which do not, for
2847          * instance MDS_IS_SUBDIR.
2848          */
2849         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2850             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2851                 mdt_set_capainfo(info, 0, &body->fid1,
2852                                  req_capsule_client_get(pill, &RMF_CAPA1));
2853
2854         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2855         if (!IS_ERR(obj)) {
2856                 if ((flags & HABEO_CORPUS) &&
2857                     !mdt_object_exists(obj)) {
2858                         mdt_object_put(env, obj);
2859                         /* for capability renew ENOENT will be handled in
2860                          * mdt_renew_capa */
2861                         if (body->valid & OBD_MD_FLOSSCAPA)
2862                                 rc = 0;
2863                         else
2864                                 rc = -ENOENT;
2865                 } else {
2866                         info->mti_object = obj;
2867                         rc = 0;
2868                 }
2869         } else
2870                 rc = PTR_ERR(obj);
2871
2872         RETURN(rc);
2873 }
2874
2875 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2876 {
2877         struct req_capsule *pill = info->mti_pill;
2878         int rc;
2879         ENTRY;
2880
2881         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2882                 rc = mdt_body_unpack(info, flags);
2883         else
2884                 rc = 0;
2885
2886         if (rc == 0 && (flags & HABEO_REFERO)) {
2887                 /* Pack reply. */
2888                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2889                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2890                                              info->mti_body->eadatasize);
2891                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2892                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2893                                              info->mti_mdt->mdt_max_cookiesize);
2894
2895                 rc = req_capsule_server_pack(pill);
2896         }
2897         RETURN(rc);
2898 }
2899
2900 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2901 {
2902         struct md_device *next = m->mdt_child;
2903
2904         return next->md_ops->mdo_init_capa_ctxt(env, next,
2905                                                 m->mdt_opts.mo_mds_capa,
2906                                                 m->mdt_capa_timeout,
2907                                                 m->mdt_capa_alg,
2908                                                 m->mdt_capa_keys);
2909 }
2910
2911 /*
2912  * Invoke handler for this request opc. Also do necessary preprocessing
2913  * (according to handler ->mh_flags), and post-processing (setting of
2914  * ->last_{xid,committed}).
2915  */
2916 static int mdt_req_handle(struct mdt_thread_info *info,
2917                           struct mdt_handler *h, struct ptlrpc_request *req)
2918 {
2919         int   rc, serious = 0;
2920         __u32 flags;
2921
2922         ENTRY;
2923
2924         LASSERT(h->mh_act != NULL);
2925         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2926         LASSERT(current->journal_info == NULL);
2927
2928         /*
2929          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2930          * to put same checks into handlers like mdt_close(), mdt_reint(),
2931          * etc., without talking to mdt authors first. Checking same thing
2932          * there again is useless and returning 0 error without packing reply
2933          * is buggy! Handlers either pack reply or return error.
2934          *
2935          * We return 0 here and do not send any reply in order to emulate
2936          * network failure. Do not send any reply in case any of NET related
2937          * fail_id has occured.
2938          */
2939         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2940                 RETURN(0);
2941
2942         rc = 0;
2943         flags = h->mh_flags;
2944         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2945
2946         if (h->mh_fmt != NULL) {
2947                 req_capsule_set(info->mti_pill, h->mh_fmt);
2948                 rc = mdt_unpack_req_pack_rep(info, flags);
2949         }
2950
2951         if (rc == 0 && flags & MUTABOR &&
2952             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2953                 /* should it be rq_status? */
2954                 rc = -EROFS;
2955
2956         if (rc == 0 && flags & HABEO_CLAVIS) {
2957                 struct ldlm_request *dlm_req;
2958
2959                 LASSERT(h->mh_fmt != NULL);
2960
2961                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2962                 if (dlm_req != NULL) {
2963                         if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
2964                                         LDLM_IBITS &&
2965                                      dlm_req->lock_desc.l_policy_data.\
2966                                         l_inodebits.bits == 0)) {
2967                                 /*
2968                                  * Lock without inodebits makes no sense and
2969                                  * will oops later in ldlm. If client miss to
2970                                  * set such bits, do not trigger ASSERTION.
2971                                  *
2972                                  * For liblustre flock case, it maybe zero.
2973                                  */
2974                                 rc = -EPROTO;
2975                         } else {
2976                                 if (info->mti_mdt->mdt_opts.mo_compat_resname)
2977                                         rc = mdt_lock_resname_compat(
2978                                                                 info->mti_mdt,
2979                                                                 dlm_req);
2980                                 info->mti_dlm_req = dlm_req;
2981                         }
2982                 } else {
2983                         rc = -EFAULT;
2984                 }
2985         }
2986
2987         /* capability setting changed via /proc, needs reinitialize ctxt */
2988         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2989                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2990                 info->mti_mdt->mdt_capa_conf = 0;
2991         }
2992
2993         if (likely(rc == 0)) {
2994                 /*
2995                  * Process request, there can be two types of rc:
2996                  * 1) errors with msg unpack/pack, other failures outside the
2997                  * operation itself. This is counted as serious errors;
2998                  * 2) errors during fs operation, should be placed in rq_status
2999                  * only
3000                  */
3001                 rc = h->mh_act(info);
3002                 if (rc == 0 &&
3003                     !req->rq_no_reply && req->rq_reply_state == NULL) {
3004                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
3005                                   "pack reply and returned 0 error\n",
3006                                   h->mh_name);
3007                         LBUG();
3008                 }
3009                 serious = is_serious(rc);
3010                 rc = clear_serious(rc);
3011         } else
3012                 serious = 1;
3013
3014         req->rq_status = rc;
3015
3016         /*
3017          * ELDLM_* codes which > 0 should be in rq_status only as well as
3018          * all non-serious errors.
3019          */
3020         if (rc > 0 || !serious)
3021                 rc = 0;
3022
3023         LASSERT(current->journal_info == NULL);
3024
3025         if (rc == 0 && (flags & HABEO_CLAVIS) &&
3026             info->mti_mdt->mdt_opts.mo_compat_resname) {
3027                 struct ldlm_reply *dlmrep;
3028
3029                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
3030                 if (dlmrep != NULL)
3031                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
3032         }
3033
3034         /* If we're DISCONNECTing, the mdt_export_data is already freed */
3035         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
3036                 target_committed_to_req(req);
3037
3038         if (unlikely(req_is_replay(req) &&
3039                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
3040                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
3041                 LBUG();
3042         }
3043
3044         target_send_reply(req, rc, info->mti_fail_id);
3045         RETURN(0);
3046 }
3047
3048 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
3049 {
3050         lh->mlh_type = MDT_NUL_LOCK;
3051         lh->mlh_reg_lh.cookie = 0ull;
3052         lh->mlh_reg_mode = LCK_MINMODE;
3053         lh->mlh_pdo_lh.cookie = 0ull;
3054         lh->mlh_pdo_mode = LCK_MINMODE;
3055 }
3056
3057 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
3058 {
3059         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
3060         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
3061 }
3062
3063 /*
3064  * Initialize fields of struct mdt_thread_info. Other fields are left in
3065  * uninitialized state, because it's too expensive to zero out whole
3066  * mdt_thread_info (> 1K) on each request arrival.
3067  */
3068 static void mdt_thread_info_init(struct ptlrpc_request *req,
3069                                  struct mdt_thread_info *info)
3070 {
3071         int i;
3072         struct md_capainfo *ci;
3073
3074         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
3075         info->mti_pill = &req->rq_pill;
3076
3077         /* lock handle */
3078         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3079                 mdt_lock_handle_init(&info->mti_lh[i]);
3080
3081         /* mdt device: it can be NULL while CONNECT */
3082         if (req->rq_export) {
3083                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
3084                 info->mti_exp = req->rq_export;
3085         } else
3086                 info->mti_mdt = NULL;
3087         info->mti_env = req->rq_svc_thread->t_env;
3088         ci = md_capainfo(info->mti_env);
3089         memset(ci, 0, sizeof *ci);
3090         if (req->rq_export) {
3091                 if (exp_connect_rmtclient(req->rq_export))
3092                         ci->mc_auth = LC_ID_CONVERT;
3093                 else if (req->rq_export->exp_connect_flags &
3094                          OBD_CONNECT_MDS_CAPA)
3095                         ci->mc_auth = LC_ID_PLAIN;
3096                 else
3097                         ci->mc_auth = LC_ID_NONE;
3098         }
3099
3100         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
3101         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
3102         info->mti_mos = NULL;
3103
3104         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
3105         info->mti_body = NULL;
3106         info->mti_object = NULL;
3107         info->mti_dlm_req = NULL;
3108         info->mti_has_trans = 0;
3109         info->mti_no_need_trans = 0;
3110         info->mti_cross_ref = 0;
3111         info->mti_opdata = 0;
3112         info->mti_big_lmm_used = 0;
3113
3114         /* To not check for split by default. */
3115         info->mti_spec.sp_ck_split = 0;
3116         info->mti_spec.no_create = 0;
3117 }
3118
3119 static void mdt_thread_info_fini(struct mdt_thread_info *info)
3120 {
3121         int i;
3122
3123         req_capsule_fini(info->mti_pill);
3124         if (info->mti_object != NULL) {
3125                 /*
3126                  * freeing an object may lead to OSD level transaction, do not
3127                  * let it mess with MDT. bz19385.
3128                  */
3129                 info->mti_no_need_trans = 1;
3130                 mdt_object_put(info->mti_env, info->mti_object);
3131                 info->mti_object = NULL;
3132         }
3133         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
3134                 mdt_lock_handle_fini(&info->mti_lh[i]);
3135         info->mti_env = NULL;
3136 }
3137
3138 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
3139                                        struct obd_device *obd, int *process)
3140 {
3141         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3142         case MDS_CONNECT: /* This will never get here, but for completeness. */
3143         case OST_CONNECT: /* This will never get here, but for completeness. */
3144         case MDS_DISCONNECT:
3145         case OST_DISCONNECT:
3146                *process = 1;
3147                RETURN(0);
3148
3149         case MDS_CLOSE:
3150         case MDS_DONE_WRITING:
3151         case MDS_SYNC: /* used in unmounting */
3152         case OBD_PING:
3153         case MDS_REINT:
3154         case SEQ_QUERY:
3155         case FLD_QUERY:
3156         case LDLM_ENQUEUE:
3157                 *process = target_queue_recovery_request(req, obd);
3158                 RETURN(0);
3159
3160         default:
3161                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
3162                 *process = -EAGAIN;
3163                 RETURN(0);
3164         }
3165 }
3166
3167 /*
3168  * Handle recovery. Return:
3169  *        +1: continue request processing;
3170  *       -ve: abort immediately with the given error code;
3171  *         0: send reply with error code in req->rq_status;
3172  */
3173 static int mdt_recovery(struct mdt_thread_info *info)
3174 {
3175         struct ptlrpc_request *req = mdt_info_req(info);
3176         struct obd_device *obd;
3177
3178         ENTRY;
3179
3180         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3181         case MDS_CONNECT:
3182         case SEC_CTX_INIT:
3183         case SEC_CTX_INIT_CONT:
3184         case SEC_CTX_FINI:
3185                 {
3186 #if 0
3187                         int rc;
3188
3189                         rc = mdt_handle_idmap(info);
3190                         if (rc)
3191                                 RETURN(rc);
3192                         else
3193 #endif
3194                                 RETURN(+1);
3195                 }
3196         }
3197
3198         if (unlikely(!class_connected_export(req->rq_export))) {
3199                 CERROR("operation %d on unconnected MDS from %s\n",
3200                        lustre_msg_get_opc(req->rq_reqmsg),
3201                        libcfs_id2str(req->rq_peer));
3202                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
3203                  * mds_A will get -ENOTCONN(especially for ping req),
3204                  * which will cause that mds_A deactive timeout, then when
3205                  * mds_A cleanup, the cleanup process will be suspended since
3206                  * deactive timeout is not zero.
3207                  */
3208                 req->rq_status = -ENOTCONN;
3209                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
3210                 RETURN(0);
3211         }
3212
3213         /* sanity check: if the xid matches, the request must be marked as a
3214          * resent or replayed */
3215         if (req_xid_is_last(req)) {
3216                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
3217                       (MSG_RESENT | MSG_REPLAY))) {
3218                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
3219                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
3220                                   lustre_msg_get_flags(req->rq_reqmsg));
3221                         LBUG();
3222                         req->rq_status = -ENOTCONN;
3223                         RETURN(-ENOTCONN);
3224                 }
3225         }
3226
3227         /* else: note the opposite is not always true; a RESENT req after a
3228          * failover will usually not match the last_xid, since it was likely
3229          * never committed. A REPLAYed request will almost never match the
3230          * last xid, however it could for a committed, but still retained,
3231          * open. */
3232
3233         obd = req->rq_export->exp_obd;
3234
3235         /* Check for aborted recovery... */
3236         if (unlikely(obd->obd_recovering)) {
3237                 int rc;
3238                 int should_process;
3239                 DEBUG_REQ(D_INFO, req, "Got new replay");
3240                 rc = mdt_filter_recovery_request(req, obd, &should_process);
3241                 if (rc != 0 || !should_process)
3242                         RETURN(rc);
3243                 else if (should_process < 0) {
3244                         req->rq_status = should_process;
3245                         rc = ptlrpc_error(req);
3246                         RETURN(rc);
3247                 }
3248         }
3249         RETURN(+1);
3250 }
3251
3252 static int mdt_msg_check_version(struct lustre_msg *msg)
3253 {
3254         int rc;
3255
3256         switch (lustre_msg_get_opc(msg)) {
3257         case MDS_CONNECT:
3258         case MDS_DISCONNECT:
3259         case OBD_PING:
3260         case SEC_CTX_INIT:
3261         case SEC_CTX_INIT_CONT:
3262         case SEC_CTX_FINI:
3263         case OBD_IDX_READ:
3264                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
3265                 if (rc)
3266                         CERROR("bad opc %u version %08x, expecting %08x\n",
3267                                lustre_msg_get_opc(msg),
3268                                lustre_msg_get_version(msg),
3269                                LUSTRE_OBD_VERSION);
3270                 break;
3271         case MDS_GETSTATUS:
3272         case MDS_GETATTR:
3273         case MDS_GETATTR_NAME:
3274         case MDS_STATFS:
3275         case MDS_READPAGE:
3276         case MDS_WRITEPAGE:
3277         case MDS_IS_SUBDIR:
3278         case MDS_REINT:
3279         case MDS_CLOSE:
3280         case MDS_DONE_WRITING:
3281         case MDS_PIN:
3282         case MDS_SYNC:
3283         case MDS_GETXATTR:
3284         case MDS_SETXATTR:
3285         case MDS_SET_INFO:
3286         case MDS_GET_INFO:
3287         case MDS_QUOTACHECK:
3288         case MDS_QUOTACTL:
3289         case QUOTA_DQACQ:
3290         case QUOTA_DQREL:
3291         case SEQ_QUERY:
3292         case FLD_QUERY:
3293                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
3294                 if (rc)
3295                         CERROR("bad opc %u version %08x, expecting %08x\n",
3296                                lustre_msg_get_opc(msg),
3297                                lustre_msg_get_version(msg),
3298                                LUSTRE_MDS_VERSION);
3299                 break;
3300         case LDLM_ENQUEUE:
3301         case LDLM_CONVERT:
3302         case LDLM_BL_CALLBACK:
3303         case LDLM_CP_CALLBACK:
3304                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
3305                 if (rc)
3306                         CERROR("bad opc %u version %08x, expecting %08x\n",
3307                                lustre_msg_get_opc(msg),
3308                                lustre_msg_get_version(msg),
3309                                LUSTRE_DLM_VERSION);
3310                 break;
3311         case OBD_LOG_CANCEL:
3312         case LLOG_ORIGIN_HANDLE_CREATE:
3313         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
3314         case LLOG_ORIGIN_HANDLE_READ_HEADER:
3315         case LLOG_ORIGIN_HANDLE_CLOSE:
3316         case LLOG_ORIGIN_HANDLE_DESTROY:
3317         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
3318         case LLOG_CATINFO:
3319                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
3320                 if (rc)
3321                         CERROR("bad opc %u version %08x, expecting %08x\n",
3322                                lustre_msg_get_opc(msg),
3323                                lustre_msg_get_version(msg),
3324                                LUSTRE_LOG_VERSION);
3325                 break;
3326         default:
3327                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
3328                 rc = -ENOTSUPP;
3329         }
3330         return rc;
3331 }
3332
3333 static int mdt_handle0(struct ptlrpc_request *req,
3334                        struct mdt_thread_info *info,
3335                        struct mdt_opc_slice *supported)
3336 {
3337         struct mdt_handler *h;
3338         struct lustre_msg  *msg;
3339         int                 rc;
3340
3341         ENTRY;
3342
3343         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
3344                 RETURN(0);
3345
3346         LASSERT(current->journal_info == NULL);
3347
3348         msg = req->rq_reqmsg;
3349         rc = mdt_msg_check_version(msg);
3350         if (likely(rc == 0)) {
3351                 rc = mdt_recovery(info);
3352                 if (likely(rc == +1)) {
3353                         h = mdt_handler_find(lustre_msg_get_opc(msg),
3354                                              supported);
3355                         if (likely(h != NULL)) {
3356                                 rc = mdt_req_handle(info, h, req);
3357                         } else {
3358                                 CERROR("The unsupported opc: 0x%x\n",
3359                                        lustre_msg_get_opc(msg) );
3360                                 req->rq_status = -ENOTSUPP;
3361                                 rc = ptlrpc_error(req);
3362                                 RETURN(rc);
3363                         }
3364                 }
3365         } else
3366                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
3367         RETURN(rc);
3368 }
3369
3370 /*
3371  * MDT handler function called by ptlrpc service thread when request comes.
3372  *
3373  * XXX common "target" functionality should be factored into separate module
3374  * shared by mdt, ost and stand-alone services like fld.
3375  */
3376 static int mdt_handle_common(struct ptlrpc_request *req,
3377                              struct mdt_opc_slice *supported)
3378 {
3379         struct lu_env          *env;
3380         struct mdt_thread_info *info;
3381         int                     rc;
3382         ENTRY;
3383
3384         env = req->rq_svc_thread->t_env;
3385         LASSERT(env != NULL);
3386         LASSERT(env->le_ses != NULL);
3387         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
3388         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
3389         LASSERT(info != NULL);
3390
3391         mdt_thread_info_init(req, info);
3392
3393         rc = mdt_handle0(req, info, supported);
3394
3395         mdt_thread_info_fini(info);
3396         RETURN(rc);
3397 }
3398
3399 /*
3400  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
3401  * as well.
3402  */
3403 int mdt_recovery_handle(struct ptlrpc_request *req)
3404 {
3405         int rc;
3406         ENTRY;
3407
3408         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
3409         case FLD_QUERY:
3410                 rc = mdt_handle_common(req, mdt_fld_handlers);
3411                 break;
3412         case SEQ_QUERY:
3413                 rc = mdt_handle_common(req, mdt_seq_handlers);
3414                 break;
3415         default:
3416                 rc = mdt_handle_common(req, mdt_regular_handlers);
3417                 break;
3418         }
3419
3420         RETURN(rc);
3421 }
3422
3423 static int mdt_regular_handle(struct ptlrpc_request *req)
3424 {
3425         return mdt_handle_common(req, mdt_regular_handlers);
3426 }
3427
3428 static int mdt_readpage_handle(struct ptlrpc_request *req)
3429 {
3430         return mdt_handle_common(req, mdt_readpage_handlers);
3431 }
3432
3433 static int mdt_xmds_handle(struct ptlrpc_request *req)
3434 {
3435         return mdt_handle_common(req, mdt_xmds_handlers);
3436 }
3437
3438 static int mdt_mdsc_handle(struct ptlrpc_request *req)
3439 {
3440         return mdt_handle_common(req, mdt_seq_handlers);
3441 }
3442
3443 static int mdt_mdss_handle(struct ptlrpc_request *req)
3444 {
3445         return mdt_handle_common(req, mdt_seq_handlers);
3446 }
3447
3448 static int mdt_dtss_handle(struct ptlrpc_request *req)
3449 {
3450         return mdt_handle_common(req, mdt_seq_handlers);
3451 }
3452
3453 static int mdt_fld_handle(struct ptlrpc_request *req)
3454 {
3455         return mdt_handle_common(req, mdt_fld_handlers);
3456 }
3457
3458 enum mdt_it_code {
3459         MDT_IT_OPEN,
3460         MDT_IT_OCREAT,
3461         MDT_IT_CREATE,
3462         MDT_IT_GETATTR,
3463         MDT_IT_READDIR,
3464         MDT_IT_LOOKUP,
3465         MDT_IT_UNLINK,
3466         MDT_IT_TRUNC,
3467         MDT_IT_GETXATTR,
3468         MDT_IT_LAYOUT,
3469         MDT_IT_NR
3470 };
3471
3472 static int mdt_intent_getattr(enum mdt_it_code opcode,
3473                               struct mdt_thread_info *info,
3474                               struct ldlm_lock **,
3475                               int);
3476 static int mdt_intent_reint(enum mdt_it_code opcode,
3477                             struct mdt_thread_info *info,
3478                             struct ldlm_lock **,
3479                             int);
3480
3481 static struct mdt_it_flavor {
3482         const struct req_format *it_fmt;
3483         __u32                    it_flags;
3484         int                    (*it_act)(enum mdt_it_code ,
3485                                          struct mdt_thread_info *,
3486                                          struct ldlm_lock **,
3487                                          int);
3488         long                     it_reint;
3489 } mdt_it_flavor[] = {
3490         [MDT_IT_OPEN]     = {
3491                 .it_fmt   = &RQF_LDLM_INTENT,
3492                 /*.it_flags = HABEO_REFERO,*/
3493                 .it_flags = 0,
3494                 .it_act   = mdt_intent_reint,
3495                 .it_reint = REINT_OPEN
3496         },
3497         [MDT_IT_OCREAT]   = {
3498                 .it_fmt   = &RQF_LDLM_INTENT,
3499                 .it_flags = MUTABOR,
3500                 .it_act   = mdt_intent_reint,
3501                 .it_reint = REINT_OPEN
3502         },
3503         [MDT_IT_CREATE]   = {
3504                 .it_fmt   = &RQF_LDLM_INTENT,
3505                 .it_flags = MUTABOR,
3506                 .it_act   = mdt_intent_reint,
3507                 .it_reint = REINT_CREATE
3508         },
3509         [MDT_IT_GETATTR]  = {
3510                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3511                 .it_flags = HABEO_REFERO,
3512                 .it_act   = mdt_intent_getattr
3513         },
3514         [MDT_IT_READDIR]  = {
3515                 .it_fmt   = NULL,
3516                 .it_flags = 0,
3517                 .it_act   = NULL
3518         },
3519         [MDT_IT_LOOKUP]   = {
3520                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3521                 .it_flags = HABEO_REFERO,
3522                 .it_act   = mdt_intent_getattr
3523         },
3524         [MDT_IT_UNLINK]   = {
3525                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3526                 .it_flags = MUTABOR,
3527                 .it_act   = NULL,
3528                 .it_reint = REINT_UNLINK
3529         },
3530         [MDT_IT_TRUNC]    = {
3531                 .it_fmt   = NULL,
3532                 .it_flags = MUTABOR,
3533                 .it_act   = NULL
3534         },
3535         [MDT_IT_GETXATTR] = {
3536                 .it_fmt   = NULL,
3537                 .it_flags = 0,
3538                 .it_act   = NULL
3539         },
3540         [MDT_IT_LAYOUT] = {
3541                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3542                 .it_flags = HABEO_REFERO,
3543                 .it_act   = mdt_intent_getattr
3544         }
3545 };
3546
3547 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3548                             struct ldlm_lock **lockp,
3549                             struct ldlm_lock *new_lock,
3550                             struct mdt_lock_handle *lh,
3551                             int flags)
3552 {
3553         struct ptlrpc_request  *req = mdt_info_req(info);
3554         struct ldlm_lock       *lock = *lockp;
3555
3556         /*
3557          * Get new lock only for cases when possible resent did not find any
3558          * lock.
3559          */
3560         if (new_lock == NULL)
3561                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3562
3563         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3564                 lh->mlh_reg_lh.cookie = 0;
3565                 RETURN(0);
3566         }
3567
3568         LASSERTF(new_lock != NULL,
3569                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3570
3571         /*
3572          * If we've already given this lock to a client once, then we should
3573          * have no readers or writers.  Otherwise, we should have one reader
3574          * _or_ writer ref (which will be zeroed below) before returning the
3575          * lock to a client.
3576          */
3577         if (new_lock->l_export == req->rq_export) {
3578                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3579         } else {
3580                 LASSERT(new_lock->l_export == NULL);
3581                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3582         }
3583
3584         *lockp = new_lock;
3585
3586         if (new_lock->l_export == req->rq_export) {
3587                 /*
3588                  * Already gave this to the client, which means that we
3589                  * reconstructed a reply.
3590                  */
3591                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3592                         MSG_RESENT);
3593                 lh->mlh_reg_lh.cookie = 0;
3594                 RETURN(ELDLM_LOCK_REPLACED);
3595         }
3596
3597         /*
3598          * Fixup the lock to be given to the client.
3599          */
3600         lock_res_and_lock(new_lock);
3601         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3602          * possible blocking AST. */
3603         while (new_lock->l_readers > 0) {
3604                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3605                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3606                 new_lock->l_readers--;
3607         }
3608         while (new_lock->l_writers > 0) {
3609                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3610                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3611                 new_lock->l_writers--;
3612         }
3613
3614         new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3615         new_lock->l_blocking_ast = lock->l_blocking_ast;
3616         new_lock->l_completion_ast = lock->l_completion_ast;
3617         new_lock->l_remote_handle = lock->l_remote_handle;
3618         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3619
3620         unlock_res_and_lock(new_lock);
3621
3622         cfs_hash_add(new_lock->l_export->exp_lock_hash,
3623                      &new_lock->l_remote_handle,
3624                      &new_lock->l_exp_hash);
3625
3626         LDLM_LOCK_RELEASE(new_lock);
3627         lh->mlh_reg_lh.cookie = 0;
3628
3629         RETURN(ELDLM_LOCK_REPLACED);
3630 }
3631
3632 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3633                                     struct ldlm_lock *new_lock,
3634                                     struct ldlm_lock **old_lock,
3635                                     struct mdt_lock_handle *lh)
3636 {
3637         struct ptlrpc_request  *req = mdt_info_req(info);
3638         struct obd_export      *exp = req->rq_export;
3639         struct lustre_handle    remote_hdl;
3640         struct ldlm_request    *dlmreq;
3641         struct ldlm_lock       *lock;
3642
3643         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3644                 return;
3645
3646         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3647         remote_hdl = dlmreq->lock_handle[0];
3648
3649         /* In the function below, .hs_keycmp resolves to
3650          * ldlm_export_lock_keycmp() */
3651         /* coverity[overrun-buffer-val] */
3652         lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3653         if (lock) {
3654                 if (lock != new_lock) {
3655                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3656                         lh->mlh_reg_mode = lock->l_granted_mode;
3657
3658                         LDLM_DEBUG(lock, "Restoring lock cookie");
3659                         DEBUG_REQ(D_DLMTRACE, req,
3660                                   "restoring lock cookie "LPX64,
3661                                   lh->mlh_reg_lh.cookie);
3662                         if (old_lock)
3663                                 *old_lock = LDLM_LOCK_GET(lock);
3664                         cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3665                         return;
3666                 }
3667
3668                 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3669         }
3670
3671         /*
3672          * If the xid matches, then we know this is a resent request, and allow
3673          * it. (It's probably an OPEN, for which we don't send a lock.
3674          */
3675         if (req_xid_is_last(req))
3676                 return;
3677
3678         /*
3679          * This remote handle isn't enqueued, so we never received or processed
3680          * this request.  Clear MSG_RESENT, because it can be handled like any
3681          * normal request now.
3682          */
3683         lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3684
3685         DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
3686                   remote_hdl.cookie);
3687 }
3688
3689 static int mdt_intent_getattr(enum mdt_it_code opcode,
3690                               struct mdt_thread_info *info,
3691                               struct ldlm_lock **lockp,
3692                               int flags)
3693 {
3694         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_RMT];
3695         struct ldlm_lock       *new_lock = NULL;
3696         __u64                   child_bits;
3697         struct ldlm_reply      *ldlm_rep;
3698         struct ptlrpc_request  *req;
3699         struct mdt_body        *reqbody;
3700         struct mdt_body        *repbody;
3701         int                     rc, rc2;
3702         ENTRY;
3703
3704         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
3705         LASSERT(reqbody);
3706
3707         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
3708         LASSERT(repbody);
3709
3710         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
3711         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
3712         repbody->eadatasize = 0;
3713         repbody->aclsize = 0;
3714
3715         switch (opcode) {
3716         case MDT_IT_LOOKUP:
3717                 child_bits = MDS_INODELOCK_LOOKUP;
3718                 break;
3719         case MDT_IT_GETATTR:
3720                 child_bits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
3721                 break;
3722         case MDT_IT_LAYOUT: {
3723                 static int printed = 0;
3724
3725                 if (!printed) {
3726                         CERROR("layout lock not supported by this version\n");
3727                         printed = 1;
3728                 }
3729                 GOTO(out_shrink, rc = -EINVAL);
3730                 break;
3731         }
3732         default:
3733                 CERROR("Unsupported intent (%d)\n", opcode);
3734                 GOTO(out_shrink, rc = -EINVAL);
3735         }