Whamcloud - gitweb
b=19069 save nid string into buffer before creating dir
[fs/lustre-release.git] / lustre / mdt / mdt_handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/mdt/mdt_handler.c
37  *
38  * Lustre Metadata Target (mdt) request handler
39  *
40  * Author: Peter Braam <braam@clusterfs.com>
41  * Author: Andreas Dilger <adilger@clusterfs.com>
42  * Author: Phil Schwan <phil@clusterfs.com>
43  * Author: Mike Shaver <shaver@clusterfs.com>
44  * Author: Nikita Danilov <nikita@clusterfs.com>
45  * Author: Huang Hua <huanghua@clusterfs.com>
46  * Author: Yury Umanets <umka@clusterfs.com>
47  */
48
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52 #define DEBUG_SUBSYSTEM S_MDS
53
54 #include <linux/module.h>
55 /*
56  * struct OBD_{ALLOC,FREE}*()
57  */
58 #include <obd_support.h>
59 /* struct ptlrpc_request */
60 #include <lustre_net.h>
61 /* struct obd_export */
62 #include <lustre_export.h>
63 /* struct obd_device */
64 #include <obd.h>
65 /* lu2dt_dev() */
66 #include <dt_object.h>
67 #include <lustre_mds.h>
68 #include <lustre_mdt.h>
69 #include "mdt_internal.h"
70 #ifdef HAVE_QUOTA_SUPPORT
71 # include <lustre_quota.h>
72 #endif
73 #include <lustre_acl.h>
74 #include <lustre_param.h>
75 #include <lustre_fsfilt.h>
76
77 mdl_mode_t mdt_mdl_lock_modes[] = {
78         [LCK_MINMODE] = MDL_MINMODE,
79         [LCK_EX]      = MDL_EX,
80         [LCK_PW]      = MDL_PW,
81         [LCK_PR]      = MDL_PR,
82         [LCK_CW]      = MDL_CW,
83         [LCK_CR]      = MDL_CR,
84         [LCK_NL]      = MDL_NL,
85         [LCK_GROUP]   = MDL_GROUP
86 };
87
88 ldlm_mode_t mdt_dlm_lock_modes[] = {
89         [MDL_MINMODE] = LCK_MINMODE,
90         [MDL_EX]      = LCK_EX,
91         [MDL_PW]      = LCK_PW,
92         [MDL_PR]      = LCK_PR,
93         [MDL_CW]      = LCK_CW,
94         [MDL_CR]      = LCK_CR,
95         [MDL_NL]      = LCK_NL,
96         [MDL_GROUP]   = LCK_GROUP
97 };
98
99 /*
100  * Initialized in mdt_mod_init().
101  */
102 unsigned long mdt_num_threads;
103
104 /* ptlrpc request handler for MDT. All handlers are
105  * grouped into several slices - struct mdt_opc_slice,
106  * and stored in an array - mdt_handlers[].
107  */
108 struct mdt_handler {
109         /* The name of this handler. */
110         const char *mh_name;
111         /* Fail id for this handler, checked at the beginning of this handler*/
112         int         mh_fail_id;
113         /* Operation code for this handler */
114         __u32       mh_opc;
115         /* flags are listed in enum mdt_handler_flags below. */
116         __u32       mh_flags;
117         /* The actual handler function to execute. */
118         int (*mh_act)(struct mdt_thread_info *info);
119         /* Request format for this request. */
120         const struct req_format *mh_fmt;
121 };
122
123 enum mdt_handler_flags {
124         /*
125          * struct mdt_body is passed in the incoming message, and object
126          * identified by this fid exists on disk.
127          *
128          * "habeo corpus" == "I have a body"
129          */
130         HABEO_CORPUS = (1 << 0),
131         /*
132          * struct ldlm_request is passed in the incoming message.
133          *
134          * "habeo clavis" == "I have a key"
135          */
136         HABEO_CLAVIS = (1 << 1),
137         /*
138          * this request has fixed reply format, so that reply message can be
139          * packed by generic code.
140          *
141          * "habeo refero" == "I have a reply"
142          */
143         HABEO_REFERO = (1 << 2),
144         /*
145          * this request will modify something, so check whether the filesystem
146          * is readonly or not, then return -EROFS to client asap if necessary.
147          *
148          * "mutabor" == "I shall modify"
149          */
150         MUTABOR      = (1 << 3)
151 };
152
153 struct mdt_opc_slice {
154         __u32               mos_opc_start;
155         int                 mos_opc_end;
156         struct mdt_handler *mos_hs;
157 };
158
159 static struct mdt_opc_slice mdt_regular_handlers[];
160 static struct mdt_opc_slice mdt_readpage_handlers[];
161 static struct mdt_opc_slice mdt_xmds_handlers[];
162 static struct mdt_opc_slice mdt_seq_handlers[];
163 static struct mdt_opc_slice mdt_fld_handlers[];
164
165 static struct mdt_device *mdt_dev(struct lu_device *d);
166 static int mdt_regular_handle(struct ptlrpc_request *req);
167 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags);
168 static int mdt_fid2path(const struct lu_env *env, struct mdt_device *mdt,
169                         struct getinfo_fid2path *fp);
170
171 static const struct lu_object_operations mdt_obj_ops;
172
173 int mdt_get_disposition(struct ldlm_reply *rep, int flag)
174 {
175         if (!rep)
176                 return 0;
177         return (rep->lock_policy_res1 & flag);
178 }
179
180 void mdt_clear_disposition(struct mdt_thread_info *info,
181                            struct ldlm_reply *rep, int flag)
182 {
183         if (info)
184                 info->mti_opdata &= ~flag;
185         if (rep)
186                 rep->lock_policy_res1 &= ~flag;
187 }
188
189 void mdt_set_disposition(struct mdt_thread_info *info,
190                          struct ldlm_reply *rep, int flag)
191 {
192         if (info)
193                 info->mti_opdata |= flag;
194         if (rep)
195                 rep->lock_policy_res1 |= flag;
196 }
197
198 void mdt_lock_reg_init(struct mdt_lock_handle *lh, ldlm_mode_t lm)
199 {
200         lh->mlh_pdo_hash = 0;
201         lh->mlh_reg_mode = lm;
202         lh->mlh_type = MDT_REG_LOCK;
203 }
204
205 void mdt_lock_pdo_init(struct mdt_lock_handle *lh, ldlm_mode_t lm,
206                        const char *name, int namelen)
207 {
208         lh->mlh_reg_mode = lm;
209         lh->mlh_type = MDT_PDO_LOCK;
210
211         if (name != NULL) {
212                 LASSERT(namelen > 0);
213                 lh->mlh_pdo_hash = full_name_hash(name, namelen);
214         } else {
215                 LASSERT(namelen == 0);
216                 lh->mlh_pdo_hash = 0ull;
217         }
218 }
219
220 static void mdt_lock_pdo_mode(struct mdt_thread_info *info, struct mdt_object *o,
221                               struct mdt_lock_handle *lh)
222 {
223         mdl_mode_t mode;
224         ENTRY;
225
226         /*
227          * Any dir access needs couple of locks:
228          *
229          * 1) on part of dir we gonna take lookup/modify;
230          *
231          * 2) on whole dir to protect it from concurrent splitting and/or to
232          * flush client's cache for readdir().
233          *
234          * so, for a given mode and object this routine decides what lock mode
235          * to use for lock #2:
236          *
237          * 1) if caller's gonna lookup in dir then we need to protect dir from
238          * being splitted only - LCK_CR
239          *
240          * 2) if caller's gonna modify dir then we need to protect dir from
241          * being splitted and to flush cache - LCK_CW
242          *
243          * 3) if caller's gonna modify dir and that dir seems ready for
244          * splitting then we need to protect it from any type of access
245          * (lookup/modify/split) - LCK_EX --bzzz
246          */
247
248         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
249         LASSERT(lh->mlh_pdo_mode == LCK_MINMODE);
250
251         /*
252          * Ask underlaying level its opinion about preferable PDO lock mode
253          * having access type passed as regular lock mode:
254          *
255          * - MDL_MINMODE means that lower layer does not want to specify lock
256          * mode;
257          *
258          * - MDL_NL means that no PDO lock should be taken. This is used in some
259          * cases. Say, for non-splittable directories no need to use PDO locks
260          * at all.
261          */
262         mode = mdo_lock_mode(info->mti_env, mdt_object_child(o),
263                              mdt_dlm_mode2mdl_mode(lh->mlh_reg_mode));
264
265         if (mode != MDL_MINMODE) {
266                 lh->mlh_pdo_mode = mdt_mdl_mode2dlm_mode(mode);
267         } else {
268                 /*
269                  * Lower layer does not want to specify locking mode. We do it
270                  * our selves. No special protection is needed, just flush
271                  * client's cache on modification and allow concurrent
272                  * mondification.
273                  */
274                 switch (lh->mlh_reg_mode) {
275                 case LCK_EX:
276                         lh->mlh_pdo_mode = LCK_EX;
277                         break;
278                 case LCK_PR:
279                         lh->mlh_pdo_mode = LCK_CR;
280                         break;
281                 case LCK_PW:
282                         lh->mlh_pdo_mode = LCK_CW;
283                         break;
284                 default:
285                         CERROR("Not expected lock type (0x%x)\n",
286                                (int)lh->mlh_reg_mode);
287                         LBUG();
288                 }
289         }
290
291         LASSERT(lh->mlh_pdo_mode != LCK_MINMODE);
292         EXIT;
293 }
294
295 static int mdt_getstatus(struct mdt_thread_info *info)
296 {
297         struct mdt_device *mdt  = info->mti_mdt;
298         struct md_device  *next = mdt->mdt_child;
299         struct mdt_body   *repbody;
300         int                rc;
301
302         ENTRY;
303
304         rc = mdt_check_ucred(info);
305         if (rc)
306                 RETURN(err_serious(rc));
307
308         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK))
309                 RETURN(err_serious(-ENOMEM));
310
311         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
312         rc = next->md_ops->mdo_root_get(info->mti_env, next, &repbody->fid1);
313         if (rc != 0)
314                 RETURN(rc);
315
316         repbody->valid |= OBD_MD_FLID;
317
318         if (mdt->mdt_opts.mo_mds_capa &&
319             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
320                 struct mdt_object  *root;
321                 struct lustre_capa *capa;
322
323                 root = mdt_object_find(info->mti_env, mdt, &repbody->fid1);
324                 if (IS_ERR(root))
325                         RETURN(PTR_ERR(root));
326
327                 capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA1);
328                 LASSERT(capa);
329                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
330                 rc = mo_capa_get(info->mti_env, mdt_object_child(root), capa,
331                                  0);
332                 mdt_object_put(info->mti_env, root);
333                 if (rc == 0)
334                         repbody->valid |= OBD_MD_FLMDSCAPA;
335         }
336
337         RETURN(rc);
338 }
339
340 static int mdt_statfs(struct mdt_thread_info *info)
341 {
342         struct md_device      *next  = info->mti_mdt->mdt_child;
343         struct ptlrpc_service *svc;
344         struct obd_statfs     *osfs;
345         int                    rc;
346
347         ENTRY;
348
349         svc = info->mti_pill->rc_req->rq_rqbd->rqbd_service;
350
351         /* This will trigger a watchdog timeout */
352         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
353                          (MDT_SERVICE_WATCHDOG_FACTOR *
354                           at_get(&svc->srv_at_estimate)) + 1);
355
356         rc = mdt_check_ucred(info);
357         if (rc)
358                 RETURN(err_serious(rc));
359
360         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
361                 rc = err_serious(-ENOMEM);
362         } else {
363                 osfs = req_capsule_server_get(info->mti_pill, &RMF_OBD_STATFS);
364                 rc = next->md_ops->mdo_statfs(info->mti_env, next,
365                                               &info->mti_u.ksfs);
366                 statfs_pack(osfs, &info->mti_u.ksfs);
367         }
368         RETURN(rc);
369 }
370
371 /**
372  * Pack SOM attributes into the reply.
373  * Call under a DLM UPDATE lock.
374  */
375 static void mdt_pack_size2body(struct mdt_thread_info *info,
376                                struct mdt_object *mo)
377 {
378         struct mdt_body *b;
379         struct md_attr *ma = &info->mti_attr;
380
381         LASSERT(ma->ma_attr.la_valid & LA_MODE);
382         b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
383
384         /* Check if Size-on-MDS is supported, if this is a regular file,
385          * if SOM is enabled on the object and if SOM cache exists and valid.
386          * Otherwise do not pack Size-on-MDS attributes to the reply. */
387         if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
388             !S_ISREG(ma->ma_attr.la_mode) ||
389             !mdt_object_is_som_enabled(mo) ||
390             !(ma->ma_valid & MA_SOM))
391                 return;
392
393         b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
394         b->size = ma->ma_som->msd_size;
395         b->blocks = ma->ma_som->msd_blocks;
396 }
397
398 void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
399                         const struct lu_attr *attr, const struct lu_fid *fid)
400 {
401         /*XXX should pack the reply body according to lu_valid*/
402         b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID   |
403                     OBD_MD_FLGID   | OBD_MD_FLTYPE  |
404                     OBD_MD_FLMODE  | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
405                     OBD_MD_FLATIME | OBD_MD_FLMTIME ;
406
407         if (!S_ISREG(attr->la_mode))
408                 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
409
410         b->atime      = attr->la_atime;
411         b->mtime      = attr->la_mtime;
412         b->ctime      = attr->la_ctime;
413         b->mode       = attr->la_mode;
414         b->size       = attr->la_size;
415         b->blocks     = attr->la_blocks;
416         b->uid        = attr->la_uid;
417         b->gid        = attr->la_gid;
418         b->flags      = attr->la_flags;
419         b->nlink      = attr->la_nlink;
420         b->rdev       = attr->la_rdev;
421
422         if (fid) {
423                 b->fid1 = *fid;
424                 b->valid |= OBD_MD_FLID;
425
426                 /* FIXME: these should be fixed when new igif ready.*/
427                 b->ino  =  fid_oid(fid);       /* 1.6 compatibility */
428                 b->generation = fid_ver(fid);  /* 1.6 compatibility */
429                 b->valid |= OBD_MD_FLGENER;    /* 1.6 compatibility */
430
431                 CDEBUG(D_INODE, DFID": nlink=%d, mode=%o, size="LPU64"\n",
432                                 PFID(fid), b->nlink, b->mode, b->size);
433         }
434
435         if (info)
436                 mdt_body_reverse_idmap(info, b);
437 }
438
439 static inline int mdt_body_has_lov(const struct lu_attr *la,
440                                    const struct mdt_body *body)
441 {
442         return ((S_ISREG(la->la_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
443                 (S_ISDIR(la->la_mode) && (body->valid & OBD_MD_FLDIREA )) );
444 }
445
446 static int mdt_getattr_internal(struct mdt_thread_info *info,
447                                 struct mdt_object *o, int ma_need)
448 {
449         struct md_object        *next = mdt_object_child(o);
450         const struct mdt_body   *reqbody = info->mti_body;
451         struct ptlrpc_request   *req = mdt_info_req(info);
452         struct md_attr          *ma = &info->mti_attr;
453         struct lu_attr          *la = &ma->ma_attr;
454         struct req_capsule      *pill = info->mti_pill;
455         const struct lu_env     *env = info->mti_env;
456         struct mdt_body         *repbody;
457         struct lu_buf           *buffer = &info->mti_buf;
458         int                     rc;
459         ENTRY;
460
461         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK))
462                 RETURN(err_serious(-ENOMEM));
463
464         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
465
466         ma->ma_valid = 0;
467
468         rc = mdt_object_exists(o);
469         if (rc < 0) {
470                 /* This object is located on remote node.*/
471                 repbody->fid1 = *mdt_object_fid(o);
472                 repbody->valid = OBD_MD_FLID | OBD_MD_MDS;
473                 RETURN(0);
474         }
475
476         buffer->lb_buf = req_capsule_server_get(pill, &RMF_MDT_MD);
477         buffer->lb_len = req_capsule_get_size(pill, &RMF_MDT_MD, RCL_SERVER);
478
479         /* If it is dir object and client require MEA, then we got MEA */
480         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
481             reqbody->valid & OBD_MD_MEA) {
482                 /* Assumption: MDT_MD size is enough for lmv size. */
483                 ma->ma_lmv = buffer->lb_buf;
484                 ma->ma_lmv_size = buffer->lb_len;
485                 ma->ma_need = MA_LMV | MA_INODE;
486         } else {
487                 ma->ma_lmm = buffer->lb_buf;
488                 ma->ma_lmm_size = buffer->lb_len;
489                 ma->ma_need = MA_LOV | MA_INODE;
490         }
491
492         if (S_ISDIR(lu_object_attr(&next->mo_lu)) &&
493             reqbody->valid & OBD_MD_FLDIREA  &&
494             lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR) {
495                 /* get default stripe info for this dir. */
496                 ma->ma_need |= MA_LOV_DEF;
497         }
498         ma->ma_need |= ma_need;
499         if (ma->ma_need & MA_SOM)
500                 ma->ma_som = &info->mti_u.som.data;
501
502         rc = mo_attr_get(env, next, ma);
503         if (unlikely(rc)) {
504                 CERROR("getattr error for "DFID": %d\n",
505                         PFID(mdt_object_fid(o)), rc);
506                 RETURN(rc);
507         }
508
509         if (likely(ma->ma_valid & MA_INODE))
510                 mdt_pack_attr2body(info, repbody, la, mdt_object_fid(o));
511         else
512                 RETURN(-EFAULT);
513
514         if (mdt_body_has_lov(la, reqbody)) {
515                 if (ma->ma_valid & MA_LOV) {
516                         LASSERT(ma->ma_lmm_size);
517                         mdt_dump_lmm(D_INFO, ma->ma_lmm);
518                         repbody->eadatasize = ma->ma_lmm_size;
519                         if (S_ISDIR(la->la_mode))
520                                 repbody->valid |= OBD_MD_FLDIREA;
521                         else
522                                 repbody->valid |= OBD_MD_FLEASIZE;
523                 }
524                 if (ma->ma_valid & MA_LMV) {
525                         LASSERT(S_ISDIR(la->la_mode));
526                         repbody->eadatasize = ma->ma_lmv_size;
527                         repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
528                 }
529                 if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
530                         repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
531                 }
532         } else if (S_ISLNK(la->la_mode) &&
533                    reqbody->valid & OBD_MD_LINKNAME) {
534                 buffer->lb_buf = ma->ma_lmm;
535                 buffer->lb_len = reqbody->eadatasize;
536                 rc = mo_readlink(env, next, buffer);
537                 if (unlikely(rc <= 0)) {
538                         CERROR("readlink failed: %d\n", rc);
539                         rc = -EFAULT;
540                 } else {
541                         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
542                                  rc -= 2;
543                         repbody->valid |= OBD_MD_LINKNAME;
544                         repbody->eadatasize = rc;
545                         /* NULL terminate */
546                         ((char*)ma->ma_lmm)[rc - 1] = 0;
547                         CDEBUG(D_INODE, "symlink dest %s, len = %d\n",
548                                (char*)ma->ma_lmm, rc);
549                         rc = 0;
550                 }
551         }
552
553         if (reqbody->valid & OBD_MD_FLMODEASIZE) {
554                 repbody->max_cookiesize = info->mti_mdt->mdt_max_cookiesize;
555                 repbody->max_mdsize = info->mti_mdt->mdt_max_mdsize;
556                 repbody->valid |= OBD_MD_FLMODEASIZE;
557                 CDEBUG(D_INODE, "I am going to change the MAX_MD_SIZE & "
558                        "MAX_COOKIE to : %d:%d\n", repbody->max_mdsize,
559                        repbody->max_cookiesize);
560         }
561
562         if (exp_connect_rmtclient(info->mti_exp) &&
563             reqbody->valid & OBD_MD_FLRMTPERM) {
564                 void *buf = req_capsule_server_get(pill, &RMF_ACL);
565
566                 /* mdt_getattr_lock only */
567                 rc = mdt_pack_remote_perm(info, o, buf);
568                 if (rc) {
569                         repbody->valid &= ~OBD_MD_FLRMTPERM;
570                         repbody->aclsize = 0;
571                         RETURN(rc);
572                 } else {
573                         repbody->valid |= OBD_MD_FLRMTPERM;
574                         repbody->aclsize = sizeof(struct mdt_remote_perm);
575                 }
576         }
577 #ifdef CONFIG_FS_POSIX_ACL
578         else if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
579                  (reqbody->valid & OBD_MD_FLACL)) {
580                 buffer->lb_buf = req_capsule_server_get(pill, &RMF_ACL);
581                 buffer->lb_len = req_capsule_get_size(pill,
582                                                       &RMF_ACL, RCL_SERVER);
583                 if (buffer->lb_len > 0) {
584                         rc = mo_xattr_get(env, next, buffer,
585                                           XATTR_NAME_ACL_ACCESS);
586                         if (rc < 0) {
587                                 if (rc == -ENODATA) {
588                                         repbody->aclsize = 0;
589                                         repbody->valid |= OBD_MD_FLACL;
590                                         rc = 0;
591                                 } else if (rc == -EOPNOTSUPP) {
592                                         rc = 0;
593                                 } else {
594                                         CERROR("got acl size: %d\n", rc);
595                                 }
596                         } else {
597                                 repbody->aclsize = rc;
598                                 repbody->valid |= OBD_MD_FLACL;
599                                 rc = 0;
600                         }
601                 }
602         }
603 #endif
604
605         if (reqbody->valid & OBD_MD_FLMDSCAPA &&
606             info->mti_mdt->mdt_opts.mo_mds_capa &&
607             info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA) {
608                 struct lustre_capa *capa;
609
610                 capa = req_capsule_server_get(pill, &RMF_CAPA1);
611                 LASSERT(capa);
612                 capa->lc_opc = CAPA_OPC_MDS_DEFAULT;
613                 rc = mo_capa_get(env, next, capa, 0);
614                 if (rc)
615                         RETURN(rc);
616                 repbody->valid |= OBD_MD_FLMDSCAPA;
617         }
618         RETURN(rc);
619 }
620
621 static int mdt_renew_capa(struct mdt_thread_info *info)
622 {
623         struct mdt_object  *obj = info->mti_object;
624         struct mdt_body    *body;
625         struct lustre_capa *capa, *c;
626         int rc;
627         ENTRY;
628
629         /* if object doesn't exist, or server has disabled capability,
630          * return directly, client will find body->valid OBD_MD_FLOSSCAPA
631          * flag not set.
632          */
633         if (!obj || !info->mti_mdt->mdt_opts.mo_oss_capa ||
634             !(info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA))
635                 RETURN(0);
636
637         body = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
638         LASSERT(body != NULL);
639
640         c = req_capsule_client_get(info->mti_pill, &RMF_CAPA1);
641         LASSERT(c);
642
643         capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
644         LASSERT(capa);
645
646         *capa = *c;
647         rc = mo_capa_get(info->mti_env, mdt_object_child(obj), capa, 1);
648         if (rc == 0)
649                 body->valid |= OBD_MD_FLOSSCAPA;
650         RETURN(rc);
651 }
652
653 static int mdt_getattr(struct mdt_thread_info *info)
654 {
655         struct mdt_object       *obj = info->mti_object;
656         struct req_capsule      *pill = info->mti_pill;
657         struct mdt_body         *reqbody;
658         struct mdt_body         *repbody;
659         mode_t                   mode;
660         int                      md_size;
661         int rc;
662         ENTRY;
663
664         reqbody = req_capsule_client_get(pill, &RMF_MDT_BODY);
665         LASSERT(reqbody);
666
667         if (reqbody->valid & OBD_MD_FLOSSCAPA) {
668                 rc = req_capsule_server_pack(pill);
669                 if (unlikely(rc))
670                         RETURN(err_serious(rc));
671                 rc = mdt_renew_capa(info);
672                 GOTO(out_shrink, rc);
673         }
674
675         LASSERT(obj != NULL);
676         LASSERT(lu_object_assert_exists(&obj->mot_obj.mo_lu));
677
678         mode = lu_object_attr(&obj->mot_obj.mo_lu);
679         if (S_ISLNK(mode) && (reqbody->valid & OBD_MD_LINKNAME) &&
680             (reqbody->eadatasize > info->mti_mdt->mdt_max_mdsize))
681                 md_size = reqbody->eadatasize;
682         else
683                 md_size = info->mti_mdt->mdt_max_mdsize;
684
685         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, md_size);
686
687         rc = req_capsule_server_pack(pill);
688         if (unlikely(rc != 0))
689                 RETURN(err_serious(rc));
690
691         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
692         LASSERT(repbody != NULL);
693         repbody->eadatasize = 0;
694         repbody->aclsize = 0;
695
696         if (reqbody->valid & OBD_MD_FLRMTPERM)
697                 rc = mdt_init_ucred(info, reqbody);
698         else
699                 rc = mdt_check_ucred(info);
700         if (unlikely(rc))
701                 GOTO(out_shrink, rc);
702
703         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
704         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
705
706         /*
707          * Don't check capability at all, because rename might getattr for
708          * remote obj, and at that time no capability is available.
709          */
710         mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
711         rc = mdt_getattr_internal(info, obj, 0);
712         if (reqbody->valid & OBD_MD_FLRMTPERM)
713                 mdt_exit_ucred(info);
714         EXIT;
715 out_shrink:
716         mdt_shrink_reply(info);
717         return rc;
718 }
719
720 static int mdt_is_subdir(struct mdt_thread_info *info)
721 {
722         struct mdt_object     *o = info->mti_object;
723         struct req_capsule    *pill = info->mti_pill;
724         const struct mdt_body *body = info->mti_body;
725         struct mdt_body       *repbody;
726         int                    rc;
727         ENTRY;
728
729         LASSERT(o != NULL);
730
731         repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
732
733         /*
734          * We save last checked parent fid to @repbody->fid1 for remote
735          * directory case.
736          */
737         LASSERT(fid_is_sane(&body->fid2));
738         LASSERT(mdt_object_exists(o) > 0);
739         rc = mdo_is_subdir(info->mti_env, mdt_object_child(o),
740                            &body->fid2, &repbody->fid1);
741         if (rc == 0 || rc == -EREMOTE)
742                 repbody->valid |= OBD_MD_FLID;
743
744         RETURN(rc);
745 }
746
747 static int mdt_raw_lookup(struct mdt_thread_info *info,
748                           struct mdt_object *parent,
749                           const struct lu_name *lname,
750                           struct ldlm_reply *ldlm_rep)
751 {
752         struct md_object *next = mdt_object_child(info->mti_object);
753         const struct mdt_body *reqbody = info->mti_body;
754         struct lu_fid *child_fid = &info->mti_tmp_fid1;
755         struct mdt_body *repbody;
756         int rc;
757         ENTRY;
758
759         if (reqbody->valid != OBD_MD_FLID)
760                 RETURN(0);
761
762         LASSERT(!info->mti_cross_ref);
763
764         /* Only got the fid of this obj by name */
765         rc = mdo_lookup(info->mti_env, next, lname, child_fid,
766                         &info->mti_spec);
767 #if 0
768         /* XXX is raw_lookup possible as intent operation? */
769         if (rc != 0) {
770                 if (rc == -ENOENT)
771                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
772                 RETURN(rc);
773         } else
774                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
775
776         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
777 #endif
778         if (rc == 0) {
779                 repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
780                 repbody->fid1 = *child_fid;
781                 repbody->valid = OBD_MD_FLID;
782         }
783         RETURN(1);
784 }
785
786 /*
787  * UPDATE lock should be taken against parent, and be release before exit;
788  * child_bits lock should be taken against child, and be returned back:
789  *            (1)normal request should release the child lock;
790  *            (2)intent request will grant the lock to client.
791  */
792 static int mdt_getattr_name_lock(struct mdt_thread_info *info,
793                                  struct mdt_lock_handle *lhc,
794                                  __u64 child_bits,
795                                  struct ldlm_reply *ldlm_rep)
796 {
797         struct ptlrpc_request  *req       = mdt_info_req(info);
798         struct mdt_body        *reqbody   = NULL;
799         struct mdt_object      *parent    = info->mti_object;
800         struct mdt_object      *child;
801         struct md_object       *next      = mdt_object_child(parent);
802         struct lu_fid          *child_fid = &info->mti_tmp_fid1;
803         struct lu_name         *lname     = NULL;
804         const char             *name      = NULL;
805         int                     namelen   = 0;
806         struct mdt_lock_handle *lhp;
807         struct ldlm_lock       *lock;
808         struct ldlm_res_id     *res_id;
809         int                     is_resent;
810         int                     ma_need = 0;
811         int                     rc;
812
813         ENTRY;
814
815         is_resent = lustre_handle_is_used(&lhc->mlh_reg_lh);
816         LASSERT(ergo(is_resent,
817                      lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT));
818
819         LASSERT(parent != NULL);
820         name = req_capsule_client_get(info->mti_pill, &RMF_NAME);
821         if (name == NULL)
822                 RETURN(err_serious(-EFAULT));
823
824         namelen = req_capsule_get_size(info->mti_pill, &RMF_NAME,
825                                        RCL_CLIENT) - 1;
826         if (!info->mti_cross_ref) {
827                 /*
828                  * XXX: Check for "namelen == 0" is for getattr by fid
829                  * (OBD_CONNECT_ATTRFID), otherwise do not allow empty name,
830                  * that is the name must contain at least one character and
831                  * the terminating '\0'
832                  */
833                 if (namelen == 0) {
834                         reqbody = req_capsule_client_get(info->mti_pill,
835                                                          &RMF_MDT_BODY);
836                         LASSERT(fid_is_sane(&reqbody->fid2));
837                         name = NULL;
838
839                         CDEBUG(D_INODE, "getattr with lock for "DFID"/"DFID", "
840                                "ldlm_rep = %p\n",
841                                PFID(mdt_object_fid(parent)), PFID(&reqbody->fid2),
842                                ldlm_rep);
843                 } else {
844                         lname = mdt_name(info->mti_env, (char *)name, namelen);
845                         CDEBUG(D_INODE, "getattr with lock for "DFID"/%s, "
846                                "ldlm_rep = %p\n", PFID(mdt_object_fid(parent)),
847                                name, ldlm_rep);
848                 }
849         }
850         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_EXECD);
851
852         rc = mdt_object_exists(parent);
853         if (unlikely(rc == 0)) {
854                 LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
855                                 &parent->mot_obj.mo_lu,
856                                 "Parent doesn't exist!\n");
857                 RETURN(-ESTALE);
858         } else if (!info->mti_cross_ref) {
859                 LASSERTF(rc > 0, "Parent "DFID" is on remote server\n",
860                          PFID(mdt_object_fid(parent)));
861         }
862         if (lname) {
863                 rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
864                 if (rc != 0) {
865                         if (rc > 0)
866                                 rc = 0;
867                         RETURN(rc);
868                 }
869         }
870
871         if (info->mti_cross_ref) {
872                 /* Only getattr on the child. Parent is on another node. */
873                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
874                 child = parent;
875                 CDEBUG(D_INODE, "partial getattr_name child_fid = "DFID", "
876                        "ldlm_rep=%p\n", PFID(mdt_object_fid(child)), ldlm_rep);
877
878                 if (is_resent) {
879                         /* Do not take lock for resent case. */
880                         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
881                         LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
882                                  lhc->mlh_reg_lh.cookie);
883                         LASSERT(fid_res_name_eq(mdt_object_fid(child),
884                                                 &lock->l_resource->lr_name));
885                         LDLM_LOCK_PUT(lock);
886                         rc = 0;
887                 } else {
888                         mdt_lock_handle_init(lhc);
889                         mdt_lock_reg_init(lhc, LCK_PR);
890
891                         /*
892                          * Object's name is on another MDS, no lookup lock is
893                          * needed here but update is.
894                          */
895                         child_bits &= ~MDS_INODELOCK_LOOKUP;
896                         child_bits |= MDS_INODELOCK_UPDATE;
897
898                         rc = mdt_object_lock(info, child, lhc, child_bits,
899                                              MDT_LOCAL_LOCK);
900                 }
901                 if (rc == 0) {
902                         /* Finally, we can get attr for child. */
903                         mdt_set_capainfo(info, 0, mdt_object_fid(child),
904                                          BYPASS_CAPA);
905                         rc = mdt_getattr_internal(info, child, 0);
906                         if (unlikely(rc != 0))
907                                 mdt_object_unlock(info, child, lhc, 1);
908                 }
909                 RETURN(rc);
910         }
911
912         /* step 1: lock parent */
913         lhp = &info->mti_lh[MDT_LH_PARENT];
914         mdt_lock_pdo_init(lhp, LCK_PR, name, namelen);
915         rc = mdt_object_lock(info, parent, lhp, MDS_INODELOCK_UPDATE,
916                              MDT_LOCAL_LOCK);
917
918         if (unlikely(rc != 0))
919                 RETURN(rc);
920
921         if (lname) {
922                 /* step 2: lookup child's fid by name */
923                 rc = mdo_lookup(info->mti_env, next, lname, child_fid,
924                                 &info->mti_spec);
925
926                 if (rc != 0) {
927                         if (rc == -ENOENT)
928                                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
929                         GOTO(out_parent, rc);
930                 } else
931                         mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
932         } else {
933                 *child_fid = reqbody->fid2;
934                 mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
935         }
936
937         /*
938          *step 3: find the child object by fid & lock it.
939          *        regardless if it is local or remote.
940          */
941         child = mdt_object_find(info->mti_env, info->mti_mdt, child_fid);
942
943         if (unlikely(IS_ERR(child)))
944                 GOTO(out_parent, rc = PTR_ERR(child));
945         if (is_resent) {
946                 /* Do not take lock for resent case. */
947                 lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
948                 LASSERTF(lock != NULL, "Invalid lock handle "LPX64"\n",
949                          lhc->mlh_reg_lh.cookie);
950
951                 res_id = &lock->l_resource->lr_name;
952                 if (!fid_res_name_eq(mdt_object_fid(child),
953                                     &lock->l_resource->lr_name)) {
954                          LASSERTF(fid_res_name_eq(mdt_object_fid(parent),
955                                                  &lock->l_resource->lr_name),
956                                  "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
957                                  (unsigned long)res_id->name[0],
958                                  (unsigned long)res_id->name[1],
959                                  (unsigned long)res_id->name[2],
960                                  PFID(mdt_object_fid(parent)));
961                           CWARN("Although resent, but still not get child lock"
962                                 "parent:"DFID" child:"DFID"\n",
963                                 PFID(mdt_object_fid(parent)),
964                                 PFID(mdt_object_fid(child)));
965                           lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
966                           LDLM_LOCK_PUT(lock);
967                           GOTO(relock, 0);
968                 }
969                 LDLM_LOCK_PUT(lock);
970                 rc = 0;
971         } else {
972                 struct md_attr *ma;
973 relock:
974                 ma = &info->mti_attr;
975
976                 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
977                 mdt_lock_handle_init(lhc);
978                 mdt_lock_reg_init(lhc, LCK_PR);
979
980                 if (mdt_object_exists(child) == 0) {
981                         LU_OBJECT_DEBUG(D_WARNING, info->mti_env,
982                                         &child->mot_obj.mo_lu,
983                                         "Object doesn't exist!\n");
984                         GOTO(out_child, rc = -ESTALE);
985                 }
986
987                 ma->ma_valid = 0;
988                 ma->ma_need = MA_INODE;
989                 rc = mo_attr_get(info->mti_env, next, ma);
990                 if (unlikely(rc != 0))
991                         GOTO(out_child, rc);
992
993                 /* If the file has not been changed for some time, we return
994                  * not only a LOOKUP lock, but also an UPDATE lock and this
995                  * might save us RPC on later STAT. For directories, it also
996                  * let negative dentry starts working for this dir. */
997                 if (ma->ma_valid & MA_INODE &&
998                     ma->ma_attr.la_valid & LA_CTIME &&
999                     info->mti_mdt->mdt_namespace->ns_ctime_age_limit +
1000                     ma->ma_attr.la_ctime < cfs_time_current_sec())
1001                         child_bits |= MDS_INODELOCK_UPDATE;
1002
1003                 rc = mdt_object_lock(info, child, lhc, child_bits,
1004                                      MDT_CROSS_LOCK);
1005
1006                 if (unlikely(rc != 0))
1007                         GOTO(out_child, rc);
1008         }
1009
1010         lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
1011         /* Get MA_SOM attributes if update lock is given. */
1012         if (lock &&
1013             lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
1014             S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
1015                 ma_need = MA_SOM;
1016
1017         /* finally, we can get attr for child. */
1018         mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
1019         rc = mdt_getattr_internal(info, child, ma_need);
1020         if (unlikely(rc != 0)) {
1021                 mdt_object_unlock(info, child, lhc, 1);
1022         } else if (lock) {
1023                 /* Debugging code. */
1024                 res_id = &lock->l_resource->lr_name;
1025                 LDLM_DEBUG(lock, "Returning lock to client");
1026                 LASSERTF(fid_res_name_eq(mdt_object_fid(child),
1027                                          &lock->l_resource->lr_name),
1028                          "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
1029                          (unsigned long)res_id->name[0],
1030                          (unsigned long)res_id->name[1],
1031                          (unsigned long)res_id->name[2],
1032                          PFID(mdt_object_fid(child)));
1033                 mdt_pack_size2body(info, child);
1034         }
1035         if (lock)
1036                 LDLM_LOCK_PUT(lock);
1037
1038         EXIT;
1039 out_child:
1040         mdt_object_put(info->mti_env, child);
1041 out_parent:
1042         mdt_object_unlock(info, parent, lhp, 1);
1043         return rc;
1044 }
1045
1046 /* normal handler: should release the child lock */
1047 static int mdt_getattr_name(struct mdt_thread_info *info)
1048 {
1049         struct mdt_lock_handle *lhc = &info->mti_lh[MDT_LH_CHILD];
1050         struct mdt_body        *reqbody;
1051         struct mdt_body        *repbody;
1052         int rc;
1053         ENTRY;
1054
1055         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1056         LASSERT(reqbody != NULL);
1057         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1058         LASSERT(repbody != NULL);
1059
1060         info->mti_spec.sp_ck_split = !!(reqbody->valid & OBD_MD_FLCKSPLIT);
1061         info->mti_cross_ref = !!(reqbody->valid & OBD_MD_FLCROSSREF);
1062         repbody->eadatasize = 0;
1063         repbody->aclsize = 0;
1064
1065         rc = mdt_init_ucred(info, reqbody);
1066         if (unlikely(rc))
1067                 GOTO(out_shrink, rc);
1068
1069         rc = mdt_getattr_name_lock(info, lhc, MDS_INODELOCK_UPDATE, NULL);
1070         if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
1071                 ldlm_lock_decref(&lhc->mlh_reg_lh, lhc->mlh_reg_mode);
1072                 lhc->mlh_reg_lh.cookie = 0;
1073         }
1074         mdt_exit_ucred(info);
1075         EXIT;
1076 out_shrink:
1077         mdt_shrink_reply(info);
1078         return rc;
1079 }
1080
1081 static const struct lu_device_operations mdt_lu_ops;
1082
1083 static int lu_device_is_mdt(struct lu_device *d)
1084 {
1085         return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &mdt_lu_ops);
1086 }
1087
1088 static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
1089                          void *karg, void *uarg);
1090
1091 static int mdt_set_info(struct mdt_thread_info *info)
1092 {
1093         struct ptlrpc_request *req = mdt_info_req(info);
1094         char *key;
1095         void *val;
1096         int keylen, vallen, rc = 0;
1097         ENTRY;
1098
1099         rc = req_capsule_server_pack(info->mti_pill);
1100         if (rc)
1101                 RETURN(rc);
1102
1103         key = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_KEY);
1104         if (key == NULL) {
1105                 DEBUG_REQ(D_HA, req, "no set_info key");
1106                 RETURN(-EFAULT);
1107         }
1108
1109         keylen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_KEY,
1110                                       RCL_CLIENT);
1111
1112         val = req_capsule_client_get(info->mti_pill, &RMF_SETINFO_VAL);
1113         if (val == NULL) {
1114                 DEBUG_REQ(D_HA, req, "no set_info val");
1115                 RETURN(-EFAULT);
1116         }
1117
1118         vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
1119                                       RCL_CLIENT);
1120
1121         /* Swab any part of val you need to here */
1122         if (KEY_IS(KEY_READ_ONLY)) {
1123                 req->rq_status = 0;
1124                 lustre_msg_set_status(req->rq_repmsg, 0);
1125
1126                 cfs_spin_lock(&req->rq_export->exp_lock);
1127                 if (*(__u32 *)val)
1128                         req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
1129                 else
1130                         req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
1131                 cfs_spin_unlock(&req->rq_export->exp_lock);
1132
1133         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
1134                 struct changelog_setinfo *cs =
1135                         (struct changelog_setinfo *)val;
1136                 if (vallen != sizeof(*cs)) {
1137                         CERROR("Bad changelog_clear setinfo size %d\n", vallen);
1138                         RETURN(-EINVAL);
1139                 }
1140                 if (ptlrpc_req_need_swab(req)) {
1141                         __swab64s(&cs->cs_recno);
1142                         __swab32s(&cs->cs_id);
1143                 }
1144
1145                 rc = mdt_iocontrol(OBD_IOC_CHANGELOG_CLEAR, info->mti_exp,
1146                                    vallen, val, NULL);
1147                 lustre_msg_set_status(req->rq_repmsg, rc);
1148
1149         } else {
1150                 RETURN(-EINVAL);
1151         }
1152         RETURN(0);
1153 }
1154
1155 static int mdt_connect(struct mdt_thread_info *info)
1156 {
1157         int rc;
1158         struct ptlrpc_request *req;
1159
1160         req = mdt_info_req(info);
1161         rc = target_handle_connect(req);
1162         if (rc == 0) {
1163                 LASSERT(req->rq_export != NULL);
1164                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
1165                 rc = mdt_init_sec_level(info);
1166                 if (rc == 0)
1167                         rc = mdt_init_idmap(info);
1168                 if (rc != 0)
1169                         obd_disconnect(class_export_get(req->rq_export));
1170         } else {
1171                 rc = err_serious(rc);
1172         }
1173         return rc;
1174 }
1175
1176 static int mdt_disconnect(struct mdt_thread_info *info)
1177 {
1178         int rc;
1179         ENTRY;
1180
1181         rc = target_handle_disconnect(mdt_info_req(info));
1182         if (rc)
1183                 rc = err_serious(rc);
1184         RETURN(rc);
1185 }
1186
1187 static int mdt_sendpage(struct mdt_thread_info *info,
1188                         struct lu_rdpg *rdpg)
1189 {
1190         struct ptlrpc_request   *req = mdt_info_req(info);
1191         struct obd_export       *exp = req->rq_export;
1192         struct ptlrpc_bulk_desc *desc;
1193         struct l_wait_info      *lwi = &info->mti_u.rdpg.mti_wait_info;
1194         int                      tmpcount;
1195         int                      tmpsize;
1196         int                      timeout;
1197         int                      i;
1198         int                      rc;
1199         ENTRY;
1200
1201         desc = ptlrpc_prep_bulk_exp(req, rdpg->rp_npages, BULK_PUT_SOURCE,
1202                                     MDS_BULK_PORTAL);
1203         if (desc == NULL)
1204                 RETURN(-ENOMEM);
1205
1206         for (i = 0, tmpcount = rdpg->rp_count;
1207                 i < rdpg->rp_npages; i++, tmpcount -= tmpsize) {
1208                 tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
1209                 ptlrpc_prep_bulk_page(desc, rdpg->rp_pages[i], 0, tmpsize);
1210         }
1211
1212         LASSERT(desc->bd_nob == rdpg->rp_count);
1213         rc = sptlrpc_svc_wrap_bulk(req, desc);
1214         if (rc)
1215                 GOTO(free_desc, rc);
1216
1217         rc = ptlrpc_start_bulk_transfer(desc);
1218         if (rc)
1219                 GOTO(free_desc, rc);
1220
1221         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1222                 GOTO(abort_bulk, rc = 0);
1223
1224         timeout = (int) req->rq_deadline - cfs_time_current_sec();
1225         if (timeout < 0)
1226                 CERROR("Req deadline already passed %lu (now: %lu)\n",
1227                        req->rq_deadline, cfs_time_current_sec());
1228         *lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(max(timeout, 1)),
1229                                     cfs_time_seconds(1), NULL, NULL);
1230         rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc) ||
1231                           exp->exp_failed || exp->exp_abort_active_req, lwi);
1232         LASSERT (rc == 0 || rc == -ETIMEDOUT);
1233
1234         if (rc == 0) {
1235                 if (desc->bd_success &&
1236                     desc->bd_nob_transferred == rdpg->rp_count)
1237                         GOTO(free_desc, rc);
1238
1239                 rc = -ETIMEDOUT;
1240                 if (exp->exp_abort_active_req || exp->exp_failed)
1241                         GOTO(abort_bulk, rc);
1242         }
1243
1244         DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
1245                   (rc == -ETIMEDOUT) ? "timeout" : "network error",
1246                   desc->bd_nob_transferred, rdpg->rp_count,
1247                   exp->exp_client_uuid.uuid,
1248                   exp->exp_connection->c_remote_uuid.uuid);
1249
1250         class_fail_export(exp);
1251
1252         EXIT;
1253 abort_bulk:
1254         ptlrpc_abort_bulk(desc);
1255 free_desc:
1256         ptlrpc_free_bulk(desc);
1257         return rc;
1258 }
1259
1260 #ifdef HAVE_SPLIT_SUPPORT
1261 /*
1262  * Retrieve dir entry from the page and insert it to the slave object, actually,
1263  * this should be in osd layer, but since it will not in the final product, so
1264  * just do it here and do not define more moo api anymore for this.
1265  */
1266 static int mdt_write_dir_page(struct mdt_thread_info *info, struct page *page,
1267                               int size)
1268 {
1269         struct mdt_object *object = info->mti_object;
1270         struct lu_fid *lf = &info->mti_tmp_fid2;
1271         struct md_attr *ma = &info->mti_attr;
1272         struct lu_dirpage *dp;
1273         struct lu_dirent *ent;
1274         int rc = 0, offset = 0;
1275         ENTRY;
1276
1277         /* Make sure we have at least one entry. */
1278         if (size == 0)
1279                 RETURN(-EINVAL);
1280
1281         /*
1282          * Disable trans for this name insert, since it will include many trans
1283          * for this.
1284          */
1285         info->mti_no_need_trans = 1;
1286         /*
1287          * When write_dir_page, no need update parent's ctime,
1288          * and no permission check for name_insert.
1289          */
1290         ma->ma_attr.la_ctime = 0;
1291         ma->ma_attr.la_valid = LA_MODE;
1292         ma->ma_valid = MA_INODE;
1293
1294         cfs_kmap(page);
1295         dp = page_address(page);
1296         offset = (int)((__u32)lu_dirent_start(dp) - (__u32)dp);
1297
1298         for (ent = lu_dirent_start(dp); ent != NULL;
1299              ent = lu_dirent_next(ent)) {
1300                 struct lu_name *lname;
1301                 char *name;
1302
1303                 if (le16_to_cpu(ent->lde_namelen) == 0)
1304                         continue;
1305
1306                 fid_le_to_cpu(lf, &ent->lde_fid);
1307                 if (le64_to_cpu(ent->lde_hash) & MAX_HASH_HIGHEST_BIT)
1308                         ma->ma_attr.la_mode = S_IFDIR;
1309                 else
1310                         ma->ma_attr.la_mode = 0;
1311                 OBD_ALLOC(name, le16_to_cpu(ent->lde_namelen) + 1);
1312                 if (name == NULL)
1313                         GOTO(out, rc = -ENOMEM);
1314
1315                 memcpy(name, ent->lde_name, le16_to_cpu(ent->lde_namelen));
1316                 lname = mdt_name(info->mti_env, name,
1317                                  le16_to_cpu(ent->lde_namelen));
1318                 ma->ma_attr_flags |= (MDS_PERM_BYPASS | MDS_QUOTA_IGNORE);
1319                 rc = mdo_name_insert(info->mti_env,
1320                                      md_object_next(&object->mot_obj),
1321                                      lname, lf, ma);
1322                 OBD_FREE(name, le16_to_cpu(ent->lde_namelen) + 1);
1323                 if (rc) {
1324                         CERROR("Can't insert %*.*s, rc %d\n",
1325                                le16_to_cpu(ent->lde_namelen),
1326                                le16_to_cpu(ent->lde_namelen),
1327                                ent->lde_name, rc);
1328                         GOTO(out, rc);
1329                 }
1330
1331                 offset += lu_dirent_size(ent);
1332                 if (offset >= size)
1333                         break;
1334         }
1335         EXIT;
1336 out:
1337         cfs_kunmap(page);
1338         return rc;
1339 }
1340
1341 static int mdt_bulk_timeout(void *data)
1342 {
1343         ENTRY;
1344
1345         CERROR("mdt bulk transfer timeout \n");
1346
1347         RETURN(1);
1348 }
1349
1350 static int mdt_writepage(struct mdt_thread_info *info)
1351 {
1352         struct ptlrpc_request   *req = mdt_info_req(info);
1353         struct mdt_body         *reqbody;
1354         struct l_wait_info      *lwi;
1355         struct ptlrpc_bulk_desc *desc;
1356         struct page             *page;
1357         int                rc;
1358         ENTRY;
1359
1360
1361         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1362         if (reqbody == NULL)
1363                 RETURN(err_serious(-EFAULT));
1364
1365         desc = ptlrpc_prep_bulk_exp(req, 1, BULK_GET_SINK, MDS_BULK_PORTAL);
1366         if (desc == NULL)
1367                 RETURN(err_serious(-ENOMEM));
1368
1369         /* allocate the page for the desc */
1370         page = cfs_alloc_page(CFS_ALLOC_STD);
1371         if (page == NULL)
1372                 GOTO(desc_cleanup, rc = -ENOMEM);
1373
1374         CDEBUG(D_INFO, "Received page offset %d size %d \n",
1375                (int)reqbody->size, (int)reqbody->nlink);
1376
1377         ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
1378                               (int)reqbody->nlink);
1379
1380         rc = sptlrpc_svc_prep_bulk(req, desc);
1381         if (rc != 0)
1382                 GOTO(cleanup_page, rc);
1383         /*
1384          * Check if client was evicted while we were doing i/o before touching
1385          * network.
1386          */
1387         OBD_ALLOC_PTR(lwi);
1388         if (!lwi)
1389                 GOTO(cleanup_page, rc = -ENOMEM);
1390
1391         if (desc->bd_export->exp_failed)
1392                 rc = -ENOTCONN;
1393         else
1394                 rc = ptlrpc_start_bulk_transfer (desc);
1395         if (rc == 0) {
1396                 *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
1397                                             mdt_bulk_timeout, desc);
1398                 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
1399                                   desc->bd_export->exp_failed, lwi);
1400                 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1401                 if (rc == -ETIMEDOUT) {
1402                         DEBUG_REQ(D_ERROR, req, "timeout on bulk GET");
1403                         ptlrpc_abort_bulk(desc);
1404                 } else if (desc->bd_export->exp_failed) {
1405                         DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
1406                         rc = -ENOTCONN;
1407                         ptlrpc_abort_bulk(desc);
1408                 } else if (!desc->bd_success ||
1409                            desc->bd_nob_transferred != desc->bd_nob) {
1410                         DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
1411                                   desc->bd_success ?
1412                                   "truncated" : "network error on",
1413                                   desc->bd_nob_transferred, desc->bd_nob);
1414                         /* XXX should this be a different errno? */
1415                         rc = -ETIMEDOUT;
1416                 }
1417         } else {
1418                 DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
1419         }
1420         if (rc)
1421                 GOTO(cleanup_lwi, rc);
1422         rc = mdt_write_dir_page(info, page, reqbody->nlink);
1423
1424 cleanup_lwi:
1425         OBD_FREE_PTR(lwi);
1426 cleanup_page:
1427         cfs_free_page(page);
1428 desc_cleanup:
1429         ptlrpc_free_bulk(desc);
1430         RETURN(rc);
1431 }
1432 #endif
1433
1434 static int mdt_readpage(struct mdt_thread_info *info)
1435 {
1436         struct mdt_object *object = info->mti_object;
1437         struct lu_rdpg    *rdpg = &info->mti_u.rdpg.mti_rdpg;
1438         struct mdt_body   *reqbody;
1439         struct mdt_body   *repbody;
1440         int                rc;
1441         int                i;
1442         ENTRY;
1443
1444         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1445                 RETURN(err_serious(-ENOMEM));
1446
1447         reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
1448         repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
1449         if (reqbody == NULL || repbody == NULL)
1450                 RETURN(err_serious(-EFAULT));
1451
1452         /*
1453          * prepare @rdpg before calling lower layers and transfer itself. Here
1454          * reqbody->size contains offset of where to start to read and
1455          * reqbody->nlink contains number bytes to read.
1456          */
1457         rdpg->rp_hash = reqbody->size;
1458         if (rdpg->rp_hash != reqbody->size) {
1459                 CERROR("Invalid hash: "LPX64" != "LPX64"\n",
1460                        rdpg->rp_hash, reqbody->size);
1461                 RETURN(-EFAULT);
1462         }
1463
1464         rdpg->rp_attrs = reqbody->mode;
1465         rdpg->rp_count  = reqbody->nlink;
1466         rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1)>>CFS_PAGE_SHIFT;
1467         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1468         if (rdpg->rp_pages == NULL)
1469                 RETURN(-ENOMEM);
1470
1471         for (i = 0; i < rdpg->rp_npages; ++i) {
1472                 rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
1473                 if (rdpg->rp_pages[i] == NULL)
1474                         GOTO(free_rdpg, rc = -ENOMEM);
1475         }
1476
1477         /* call lower layers to fill allocated pages with directory data */
1478         rc = mo_readpage(info->mti_env, mdt_object_child(object), rdpg);
1479         if (rc)
1480                 GOTO(free_rdpg, rc);
1481
1482         /* send pages to client */
1483         rc = mdt_sendpage(info, rdpg);
1484
1485         EXIT;
1486 free_rdpg:
1487
1488         for (i = 0; i < rdpg->rp_npages; i++)
1489                 if (rdpg->rp_pages[i] != NULL)
1490                         cfs_free_page(rdpg->rp_pages[i]);
1491         OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
1492
1493         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1494                 RETURN(0);
1495
1496         return rc;
1497 }
1498
1499 static int mdt_reint_internal(struct mdt_thread_info *info,
1500                               struct mdt_lock_handle *lhc,
1501                               __u32 op)
1502 {
1503         struct req_capsule      *pill = info->mti_pill;
1504         struct mdt_device       *mdt = info->mti_mdt;
1505         struct md_quota         *mq = md_quota(info->mti_env);
1506         struct mdt_body         *repbody;
1507         int                      rc = 0;
1508         ENTRY;
1509
1510         /* pack reply */
1511         if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1512                 req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
1513                                      mdt->mdt_max_mdsize);
1514         if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1515                 req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1516                                      mdt->mdt_max_cookiesize);
1517
1518         rc = req_capsule_server_pack(pill);
1519         if (rc != 0) {
1520                 CERROR("Can't pack response, rc %d\n", rc);
1521                 RETURN(err_serious(rc));
1522         }
1523
1524         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
1525                 repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
1526                 LASSERT(repbody);
1527                 repbody->eadatasize = 0;
1528                 repbody->aclsize = 0;
1529         }
1530
1531         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK))
1532                 GOTO(out_shrink, rc = err_serious(-EFAULT));
1533
1534         rc = mdt_reint_unpack(info, op);
1535         if (rc != 0) {
1536                 CERROR("Can't unpack reint, rc %d\n", rc);
1537                 GOTO(out_shrink, rc = err_serious(rc));
1538         }
1539
1540         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_REINT_DELAY, 10);
1541
1542         /* for replay no cookkie / lmm need, because client have this already */
1543         if (info->mti_spec.no_create == 1)  {
1544                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
1545                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER, 0);
1546
1547                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
1548                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
1549                                              0);
1550         }
1551
1552         rc = mdt_init_ucred_reint(info);
1553         if (rc)
1554                 GOTO(out_shrink, rc);
1555
1556         rc = mdt_fix_attr_ucred(info, op);
1557         if (rc != 0)
1558                 GOTO(out_ucred, rc = err_serious(rc));
1559
1560         if (mdt_check_resent(info, mdt_reconstruct, lhc)) {
1561                 rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
1562                 GOTO(out_ucred, rc);
1563         }
1564         mq->mq_exp = info->mti_exp;
1565         rc = mdt_reint_rec(info, lhc);
1566         EXIT;
1567 out_ucred:
1568         mdt_exit_ucred(info);
1569 out_shrink:
1570         mdt_shrink_reply(info);
1571         return rc;
1572 }
1573
1574 static long mdt_reint_opcode(struct mdt_thread_info *info,
1575                              const struct req_format **fmt)
1576 {
1577         struct mdt_rec_reint *rec;
1578         long opc;
1579
1580         opc = err_serious(-EFAULT);
1581         rec = req_capsule_client_get(info->mti_pill, &RMF_REC_REINT);
1582         if (rec != NULL) {
1583                 opc = rec->rr_opcode;
1584                 DEBUG_REQ(D_INODE, mdt_info_req(info), "reint opt = %ld", opc);
1585                 if (opc < REINT_MAX && fmt[opc] != NULL)
1586                         req_capsule_extend(info->mti_pill, fmt[opc]);
1587                 else {
1588                         CERROR("Unsupported opc: %ld\n", opc);
1589                         opc = err_serious(opc);
1590                 }
1591         }
1592         return opc;
1593 }
1594
1595 static int mdt_reint(struct mdt_thread_info *info)
1596 {
1597         long opc;
1598         int  rc;
1599
1600         static const struct req_format *reint_fmts[REINT_MAX] = {
1601                 [REINT_SETATTR]  = &RQF_MDS_REINT_SETATTR,
1602                 [REINT_CREATE]   = &RQF_MDS_REINT_CREATE,
1603                 [REINT_LINK]     = &RQF_MDS_REINT_LINK,
1604                 [REINT_UNLINK]   = &RQF_MDS_REINT_UNLINK,
1605                 [REINT_RENAME]   = &RQF_MDS_REINT_RENAME,
1606                 [REINT_OPEN]     = &RQF_MDS_REINT_OPEN,
1607                 [REINT_SETXATTR] = &RQF_MDS_REINT_SETXATTR
1608         };
1609
1610         ENTRY;
1611
1612         opc = mdt_reint_opcode(info, reint_fmts);
1613         if (opc >= 0) {
1614                 /*
1615                  * No lock possible here from client to pass it to reint code
1616                  * path.
1617                  */
1618                 rc = mdt_reint_internal(info, NULL, opc);
1619         } else {
1620                 rc = opc;
1621         }
1622
1623         info->mti_fail_id = OBD_FAIL_MDS_REINT_NET_REP;
1624         RETURN(rc);
1625 }
1626
1627 /* this should sync the whole device */
1628 static int mdt_device_sync(const struct lu_env *env, struct mdt_device *mdt)
1629 {
1630         struct dt_device *dt = mdt->mdt_bottom;
1631         int rc;
1632         ENTRY;
1633
1634         rc = dt->dd_ops->dt_sync(env, dt);
1635         RETURN(rc);
1636 }
1637
1638 /* this should sync this object */
1639 static int mdt_object_sync(struct mdt_thread_info *info)
1640 {
1641         struct md_object *next;
1642         int rc;
1643         ENTRY;
1644
1645         if (!mdt_object_exists(info->mti_object)) {
1646                 CWARN("Non existing object  "DFID"!\n",
1647                       PFID(mdt_object_fid(info->mti_object)));
1648                 RETURN(-ESTALE);
1649         }
1650         next = mdt_object_child(info->mti_object);
1651         rc = mo_object_sync(info->mti_env, next);
1652
1653         RETURN(rc);
1654 }
1655
1656 static int mdt_sync(struct mdt_thread_info *info)
1657 {
1658         struct req_capsule *pill = info->mti_pill;
1659         struct mdt_body *body;
1660         int rc;
1661         ENTRY;
1662
1663         /* The fid may be zero, so we req_capsule_set manually */
1664         req_capsule_set(pill, &RQF_MDS_SYNC);
1665
1666         body = req_capsule_client_get(pill, &RMF_MDT_BODY);
1667         if (body == NULL)
1668                 RETURN(err_serious(-EINVAL));
1669
1670         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1671                 RETURN(err_serious(-ENOMEM));
1672
1673         if (fid_seq(&body->fid1) == 0) {
1674                 /* sync the whole device */
1675                 rc = req_capsule_server_pack(pill);
1676                 if (rc == 0)
1677                         rc = mdt_device_sync(info->mti_env, info->mti_mdt);
1678                 else
1679                         rc = err_serious(rc);
1680         } else {
1681                 /* sync an object */
1682                 rc = mdt_unpack_req_pack_rep(info, HABEO_CORPUS|HABEO_REFERO);
1683                 if (rc == 0) {
1684                         rc = mdt_object_sync(info);
1685                         if (rc == 0) {
1686                                 struct md_object *next;
1687                                 const struct lu_fid *fid;
1688                                 struct lu_attr *la = &info->mti_attr.ma_attr;
1689
1690                                 next = mdt_object_child(info->mti_object);
1691                                 info->mti_attr.ma_need = MA_INODE;
1692                                 info->mti_attr.ma_valid = 0;
1693                                 rc = mo_attr_get(info->mti_env, next,
1694                                                  &info->mti_attr);
1695                                 if (rc == 0) {
1696                                         body = req_capsule_server_get(pill,
1697                                                                 &RMF_MDT_BODY);
1698                                         fid = mdt_object_fid(info->mti_object);
1699                                         mdt_pack_attr2body(info, body, la, fid);
1700                                 }
1701                         }
1702                 } else
1703                         rc = err_serious(rc);
1704         }
1705         RETURN(rc);
1706 }
1707
1708 #ifdef HAVE_QUOTA_SUPPORT
1709 static int mdt_quotacheck_handle(struct mdt_thread_info *info)
1710 {
1711         struct obd_quotactl *oqctl;
1712         struct req_capsule *pill = info->mti_pill;
1713         struct obd_export *exp = info->mti_exp;
1714         struct md_quota *mq = md_quota(info->mti_env);
1715         struct md_device *next = info->mti_mdt->mdt_child;
1716         int rc;
1717         ENTRY;
1718
1719         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1720         if (oqctl == NULL)
1721                 RETURN(-EPROTO);
1722
1723         /* remote client has no permission for quotacheck */
1724         if (unlikely(exp_connect_rmtclient(exp)))
1725                 RETURN(-EPERM);
1726
1727         rc = req_capsule_server_pack(pill);
1728         if (rc)
1729                 RETURN(rc);
1730
1731         mq->mq_exp = exp;
1732         rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next,
1733                                                oqctl->qc_type);
1734         RETURN(rc);
1735 }
1736
1737 static int mdt_quotactl_handle(struct mdt_thread_info *info)
1738 {
1739         struct obd_quotactl *oqctl, *repoqc;
1740         struct req_capsule *pill = info->mti_pill;
1741         struct obd_export *exp = info->mti_exp;
1742         struct md_quota *mq = md_quota(info->mti_env);
1743         struct md_device *next = info->mti_mdt->mdt_child;
1744         const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
1745         int id, rc;
1746         ENTRY;
1747
1748         oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
1749         if (oqctl == NULL)
1750                 RETURN(-EPROTO);
1751
1752         id = oqctl->qc_id;
1753         if (exp_connect_rmtclient(exp)) {
1754                 struct ptlrpc_request *req = mdt_info_req(info);
1755                 struct mdt_export_data *med = mdt_req2med(req);
1756                 struct lustre_idmap_table *idmap = med->med_idmap;
1757
1758                 if (unlikely(oqctl->qc_cmd != Q_GETQUOTA &&
1759                              oqctl->qc_cmd != Q_GETINFO))
1760                         RETURN(-EPERM);
1761
1762
1763                 if (oqctl->qc_type == USRQUOTA)
1764                         id = lustre_idmap_lookup_uid(NULL, idmap, 0,
1765                                                      oqctl->qc_id);
1766                 else if (oqctl->qc_type == GRPQUOTA)
1767                         id = lustre_idmap_lookup_gid(NULL, idmap, 0,
1768                                                      oqctl->qc_id);
1769                 else
1770                         RETURN(-EINVAL);
1771
1772                 if (id == CFS_IDMAP_NOTFOUND) {
1773                         CDEBUG(D_QUOTA, "no mapping for id %u\n",
1774                                oqctl->qc_id);
1775                         RETURN(-EACCES);
1776                 }
1777         }
1778
1779         rc = req_capsule_server_pack(pill);
1780         if (rc)
1781                 RETURN(rc);
1782
1783         repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
1784         LASSERT(repoqc != NULL);
1785
1786         mq->mq_exp = exp;
1787         switch (oqctl->qc_cmd) {
1788         case Q_QUOTAON:
1789                 rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
1790                 break;
1791         case Q_QUOTAOFF:
1792                 rc = mqo->mqo_off(info->mti_env, next, oqctl->qc_type);
1793                 break;
1794         case Q_SETINFO:
1795                 rc = mqo->mqo_setinfo(info->mti_env, next, oqctl->qc_type, id,
1796                                       &oqctl->qc_dqinfo);
1797                 break;
1798         case Q_GETINFO:
1799                 rc = mqo->mqo_getinfo(info->mti_env, next, oqctl->qc_type, id,
1800                                       &oqctl->qc_dqinfo);
1801                 break;
1802         case Q_SETQUOTA:
1803                 rc = mqo->mqo_setquota(info->mti_env, next, oqctl->qc_type, id,
1804                                        &oqctl->qc_dqblk);
1805                 break;
1806         case Q_GETQUOTA:
1807                 rc = mqo->mqo_getquota(info->mti_env, next, oqctl->qc_type, id,
1808                                        &oqctl->qc_dqblk);
1809                 break;
1810         case Q_GETOINFO:
1811                 rc = mqo->mqo_getoinfo(info->mti_env, next, oqctl->qc_type, id,
1812                                        &oqctl->qc_dqinfo);
1813                 break;
1814         case Q_GETOQUOTA:
1815                 rc = mqo->mqo_getoquota(info->mti_env, next, oqctl->qc_type, id,
1816                                         &oqctl->qc_dqblk);
1817                 break;
1818         case LUSTRE_Q_INVALIDATE:
1819                 rc = mqo->mqo_invalidate(info->mti_env, next, oqctl->qc_type);
1820                 break;
1821         case LUSTRE_Q_FINVALIDATE:
1822                 rc = mqo->mqo_finvalidate(info->mti_env, next, oqctl->qc_type);
1823                 break;
1824         default:
1825                 CERROR("unsupported mdt_quotactl command: %d\n",
1826                        oqctl->qc_cmd);
1827                 RETURN(-EFAULT);
1828         }
1829
1830         *repoqc = *oqctl;
1831         RETURN(rc);
1832 }
1833 #endif
1834
1835
1836 /*
1837  * OBD PING and other handlers.
1838  */
1839 static int mdt_obd_ping(struct mdt_thread_info *info)
1840 {
1841         int rc;
1842         ENTRY;
1843
1844         req_capsule_set(info->mti_pill, &RQF_OBD_PING);
1845
1846         rc = target_handle_ping(mdt_info_req(info));
1847         if (rc < 0)
1848                 rc = err_serious(rc);
1849         RETURN(rc);
1850 }
1851
1852 static int mdt_obd_log_cancel(struct mdt_thread_info *info)
1853 {
1854         return err_serious(-EOPNOTSUPP);
1855 }
1856
1857 static int mdt_obd_qc_callback(struct mdt_thread_info *info)
1858 {
1859         return err_serious(-EOPNOTSUPP);
1860 }
1861
1862
1863 /*
1864  * LLOG handlers.
1865  */
1866
1867 /** clone llog ctxt from child (mdd)
1868  * This allows remote llog (replicator) access.
1869  * We can either pass all llog RPCs (eg mdt_llog_create) on to child where the
1870  * context was originally set up, or we can handle them directly.
1871  * I choose the latter, but that means I need any llog
1872  * contexts set up by child to be accessable by the mdt.  So we clone the
1873  * context into our context list here.
1874  */
1875 static int mdt_llog_ctxt_clone(const struct lu_env *env, struct mdt_device *mdt,
1876                                int idx)
1877 {
1878         struct md_device  *next = mdt->mdt_child;
1879         struct llog_ctxt *ctxt;
1880         int rc;
1881
1882         if (!llog_ctxt_null(mdt2obd_dev(mdt), idx))
1883                 return 0;
1884
1885         rc = next->md_ops->mdo_llog_ctxt_get(env, next, idx, (void **)&ctxt);
1886         if (rc || ctxt == NULL) {
1887                 CERROR("Can't get mdd ctxt %d\n", rc);
1888                 return rc;
1889         }
1890
1891         rc = llog_group_set_ctxt(&mdt2obd_dev(mdt)->obd_olg, ctxt, idx);
1892         if (rc)
1893                 CERROR("Can't set mdt ctxt %d\n", rc);
1894
1895         return rc;
1896 }
1897
1898 static int mdt_llog_ctxt_unclone(const struct lu_env *env,
1899                                  struct mdt_device *mdt, int idx)
1900 {
1901         struct llog_ctxt *ctxt;
1902
1903         ctxt = llog_get_context(mdt2obd_dev(mdt), idx);
1904         if (ctxt == NULL)
1905                 return 0;
1906         /* Put once for the get we just did, and once for the clone */
1907         llog_ctxt_put(ctxt);
1908         llog_ctxt_put(ctxt);
1909         return 0;
1910 }
1911
1912 static int mdt_llog_create(struct mdt_thread_info *info)
1913 {
1914         int rc;
1915
1916         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
1917         rc = llog_origin_handle_create(mdt_info_req(info));
1918         return (rc < 0 ? err_serious(rc) : rc);
1919 }
1920
1921 static int mdt_llog_destroy(struct mdt_thread_info *info)
1922 {
1923         int rc;
1924
1925         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_DESTROY);
1926         rc = llog_origin_handle_destroy(mdt_info_req(info));
1927         return (rc < 0 ? err_serious(rc) : rc);
1928 }
1929
1930 static int mdt_llog_read_header(struct mdt_thread_info *info)
1931 {
1932         int rc;
1933
1934         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
1935         rc = llog_origin_handle_read_header(mdt_info_req(info));
1936         return (rc < 0 ? err_serious(rc) : rc);
1937 }
1938
1939 static int mdt_llog_next_block(struct mdt_thread_info *info)
1940 {
1941         int rc;
1942
1943         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
1944         rc = llog_origin_handle_next_block(mdt_info_req(info));
1945         return (rc < 0 ? err_serious(rc) : rc);
1946 }
1947
1948 static int mdt_llog_prev_block(struct mdt_thread_info *info)
1949 {
1950         int rc;
1951
1952         req_capsule_set(info->mti_pill, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
1953         rc = llog_origin_handle_prev_block(mdt_info_req(info));
1954         return (rc < 0 ? err_serious(rc) : rc);
1955 }
1956
1957
1958 /*
1959  * DLM handlers.
1960  */
1961 static struct ldlm_callback_suite cbs = {
1962         .lcs_completion = ldlm_server_completion_ast,
1963         .lcs_blocking   = ldlm_server_blocking_ast,
1964         .lcs_glimpse    = NULL
1965 };
1966
1967 static int mdt_enqueue(struct mdt_thread_info *info)
1968 {
1969         struct ptlrpc_request *req;
1970         __u64 req_bits;
1971         int rc;
1972
1973         /*
1974          * info->mti_dlm_req already contains swapped and (if necessary)
1975          * converted dlm request.
1976          */
1977         LASSERT(info->mti_dlm_req != NULL);
1978
1979         req = mdt_info_req(info);
1980
1981         /*
1982          * Lock without inodebits makes no sense and will oops later in
1983          * ldlm. Let's check it now to see if we have wrong lock from client or
1984          * bits get corrupted somewhere in mdt_intent_policy().
1985          */
1986         req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
1987         /* This is disabled because we need to support liblustre flock.
1988          * LASSERT(req_bits != 0);
1989          */
1990
1991         rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
1992                                   req, info->mti_dlm_req, &cbs);
1993         info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
1994         return rc ? err_serious(rc) : req->rq_status;
1995 }
1996
1997 static int mdt_convert(struct mdt_thread_info *info)
1998 {
1999         int rc;
2000         struct ptlrpc_request *req;
2001
2002         LASSERT(info->mti_dlm_req);
2003         req = mdt_info_req(info);
2004         rc = ldlm_handle_convert0(req, info->mti_dlm_req);
2005         return rc ? err_serious(rc) : req->rq_status;
2006 }
2007
2008 static int mdt_bl_callback(struct mdt_thread_info *info)
2009 {
2010         CERROR("bl callbacks should not happen on MDS\n");
2011         LBUG();
2012         return err_serious(-EOPNOTSUPP);
2013 }
2014
2015 static int mdt_cp_callback(struct mdt_thread_info *info)
2016 {
2017         CERROR("cp callbacks should not happen on MDS\n");
2018         LBUG();
2019         return err_serious(-EOPNOTSUPP);
2020 }
2021
2022 /*
2023  * sec context handlers
2024  */
2025 static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
2026 {
2027         int rc;
2028
2029         rc = mdt_handle_idmap(info);
2030
2031         if (unlikely(rc)) {
2032                 struct ptlrpc_request *req = mdt_info_req(info);
2033                 __u32                  opc;
2034
2035                 opc = lustre_msg_get_opc(req->rq_reqmsg);
2036                 if (opc == SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
2037                         sptlrpc_svc_ctx_invalidate(req);
2038         }
2039
2040         OBD_FAIL_TIMEOUT(OBD_FAIL_SEC_CTX_HDL_PAUSE, obd_fail_val);
2041
2042         return rc;
2043 }
2044
2045 static struct mdt_object *mdt_obj(struct lu_object *o)
2046 {
2047         LASSERT(lu_device_is_mdt(o->lo_dev));
2048         return container_of0(o, struct mdt_object, mot_obj.mo_lu);
2049 }
2050
2051 struct mdt_object *mdt_object_find(const struct lu_env *env,
2052                                    struct mdt_device *d,
2053                                    const struct lu_fid *f)
2054 {
2055         struct lu_object *o;
2056         struct mdt_object *m;
2057         ENTRY;
2058
2059         CDEBUG(D_INFO, "Find object for "DFID"\n", PFID(f));
2060         o = lu_object_find(env, &d->mdt_md_dev.md_lu_dev, f, NULL);
2061         if (unlikely(IS_ERR(o)))
2062                 m = (struct mdt_object *)o;
2063         else
2064                 m = mdt_obj(o);
2065         RETURN(m);
2066 }
2067
2068 /**
2069  * Asyncronous commit for mdt device.
2070  *
2071  * Pass asynchonous commit call down the MDS stack.
2072  *
2073  * \param env environment
2074  * \param mdt the mdt device
2075  */
2076 static void mdt_device_commit_async(const struct lu_env *env,
2077                                     struct mdt_device *mdt)
2078 {
2079         struct dt_device *dt = mdt->mdt_bottom;
2080         int rc;
2081
2082         rc = dt->dd_ops->dt_commit_async(env, dt);
2083         if (unlikely(rc != 0))
2084                 CWARN("async commit start failed with rc = %d", rc);
2085 }
2086
2087 /**
2088  * Mark the lock as "synchonous".
2089  *
2090  * Mark the lock to deffer transaction commit to the unlock time.
2091  *
2092  * \param lock the lock to mark as "synchonous"
2093  *
2094  * \see mdt_is_lock_sync
2095  * \see mdt_save_lock
2096  */
2097 static inline void mdt_set_lock_sync(struct ldlm_lock *lock)
2098 {
2099         lock->l_ast_data = (void*)1;
2100 }
2101
2102 /**
2103  * Check whehter the lock "synchonous" or not.
2104  *
2105  * \param lock the lock to check
2106  * \retval 1 the lock is "synchonous"
2107  * \retval 0 the lock isn't "synchronous"
2108  *
2109  * \see mdt_set_lock_sync
2110  * \see mdt_save_lock
2111  */
2112 static inline int mdt_is_lock_sync(struct ldlm_lock *lock)
2113 {
2114         return lock->l_ast_data != NULL;
2115 }
2116
2117 /**
2118  * Blocking AST for mdt locks.
2119  *
2120  * Starts transaction commit if in case of COS lock conflict or
2121  * deffers such a commit to the mdt_save_lock.
2122  *
2123  * \param lock the lock which blocks a request or cancelling lock
2124  * \param desc unused
2125  * \param data unused
2126  * \param flag indicates whether this cancelling or blocking callback
2127  * \retval 0
2128  * \see ldlm_blocking_ast_nocheck
2129  */
2130 int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2131                      void *data, int flag)
2132 {
2133         struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
2134         struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
2135         int rc;
2136         ENTRY;
2137
2138         if (flag == LDLM_CB_CANCELING)
2139                 RETURN(0);
2140         lock_res_and_lock(lock);
2141         if (lock->l_blocking_ast != mdt_blocking_ast) {
2142                 unlock_res_and_lock(lock);
2143                 RETURN(0);
2144         }
2145         if (mdt_cos_is_enabled(mdt) &&
2146             lock->l_req_mode & (LCK_PW | LCK_EX) &&
2147             lock->l_blocking_lock != NULL &&
2148             lock->l_client_cookie != lock->l_blocking_lock->l_client_cookie) {
2149                 mdt_set_lock_sync(lock);
2150         }
2151         rc = ldlm_blocking_ast_nocheck(lock);
2152
2153         /* There is no lock conflict if l_blocking_lock == NULL,
2154          * it indicates a blocking ast sent from ldlm_lock_decref_internal
2155          * when the last reference to a local lock was released */
2156         if (lock->l_req_mode == LCK_COS && lock->l_blocking_lock != NULL) {
2157                 struct lu_env env;
2158
2159                 rc = lu_env_init(&env, LCT_MD_THREAD);
2160                 if (unlikely(rc != 0))
2161                         CWARN("lu_env initialization failed with rc = %d,"
2162                               "cannot start asynchronous commit\n", rc);
2163                 else
2164                         mdt_device_commit_async(&env, mdt);
2165                 lu_env_fini(&env);
2166         }
2167         RETURN(rc);
2168 }
2169
2170 int mdt_object_lock(struct mdt_thread_info *info, struct mdt_object *o,
2171                     struct mdt_lock_handle *lh, __u64 ibits, int locality)
2172 {
2173         struct ldlm_namespace *ns = info->mti_mdt->mdt_namespace;
2174         ldlm_policy_data_t *policy = &info->mti_policy;
2175         struct ldlm_res_id *res_id = &info->mti_res_id;
2176         int rc;
2177         ENTRY;
2178
2179         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2180         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2181         LASSERT(lh->mlh_reg_mode != LCK_MINMODE);
2182         LASSERT(lh->mlh_type != MDT_NUL_LOCK);
2183
2184         if (mdt_object_exists(o) < 0) {
2185                 if (locality == MDT_CROSS_LOCK) {
2186                         /* cross-ref object fix */
2187                         ibits &= ~MDS_INODELOCK_UPDATE;
2188                         ibits |= MDS_INODELOCK_LOOKUP;
2189                 } else {
2190                         LASSERT(!(ibits & MDS_INODELOCK_UPDATE));
2191                         LASSERT(ibits & MDS_INODELOCK_LOOKUP);
2192                 }
2193                 /* No PDO lock on remote object */
2194                 LASSERT(lh->mlh_type != MDT_PDO_LOCK);
2195         }
2196
2197         if (lh->mlh_type == MDT_PDO_LOCK) {
2198                 /* check for exists after object is locked */
2199                 if (mdt_object_exists(o) == 0) {
2200                         /* Non-existent object shouldn't have PDO lock */
2201                         RETURN(-ESTALE);
2202                 } else {
2203                         /* Non-dir object shouldn't have PDO lock */
2204                         LASSERT(S_ISDIR(lu_object_attr(&o->mot_obj.mo_lu)));
2205                 }
2206         }
2207
2208         memset(policy, 0, sizeof(*policy));
2209         fid_build_reg_res_name(mdt_object_fid(o), res_id);
2210
2211         /*
2212          * Take PDO lock on whole directory and build correct @res_id for lock
2213          * on part of directory.
2214          */
2215         if (lh->mlh_pdo_hash != 0) {
2216                 LASSERT(lh->mlh_type == MDT_PDO_LOCK);
2217                 mdt_lock_pdo_mode(info, o, lh);
2218                 if (lh->mlh_pdo_mode != LCK_NL) {
2219                         /*
2220                          * Do not use LDLM_FL_LOCAL_ONLY for parallel lock, it
2221                          * is never going to be sent to client and we do not
2222                          * want it slowed down due to possible cancels.
2223                          */
2224                         policy->l_inodebits.bits = MDS_INODELOCK_UPDATE;
2225                         rc = mdt_fid_lock(ns, &lh->mlh_pdo_lh, lh->mlh_pdo_mode,
2226                                           policy, res_id, LDLM_FL_ATOMIC_CB,
2227                                           &info->mti_exp->exp_handle.h_cookie);
2228                         if (unlikely(rc))
2229                                 RETURN(rc);
2230                 }
2231
2232                 /*
2233                  * Finish res_id initializing by name hash marking part of
2234                  * directory which is taking modification.
2235                  */
2236                 res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
2237         }
2238
2239         policy->l_inodebits.bits = ibits;
2240
2241         /*
2242          * Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
2243          * going to be sent to client. If it is - mdt_intent_policy() path will
2244          * fix it up and turn FL_LOCAL flag off.
2245          */
2246         rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
2247                           res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
2248                           &info->mti_exp->exp_handle.h_cookie);
2249         if (rc)
2250                 GOTO(out, rc);
2251
2252 out:
2253         if (rc)
2254                 mdt_object_unlock(info, o, lh, 1);
2255
2256
2257         RETURN(rc);
2258 }
2259
2260 /**
2261  * Save a lock within request object.
2262  *
2263  * Keep the lock referenced until whether client ACK or transaction
2264  * commit happens or release the lock immediately depending on input
2265  * parameters. If COS is ON, a write lock is converted to COS lock
2266  * before saving.
2267  *
2268  * \param info thead info object
2269  * \param h lock handle
2270  * \param mode lock mode
2271  * \param decref force immediate lock releasing
2272  */
2273 static
2274 void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
2275                    ldlm_mode_t mode, int decref)
2276 {
2277         ENTRY;
2278
2279         if (lustre_handle_is_used(h)) {
2280                 if (decref || !info->mti_has_trans ||
2281                     !(mode & (LCK_PW | LCK_EX))){
2282                         mdt_fid_unlock(h, mode);
2283                 } else {
2284                         struct mdt_device *mdt = info->mti_mdt;
2285                         struct ldlm_lock *lock = ldlm_handle2lock(h);
2286                         struct ptlrpc_request *req = mdt_info_req(info);
2287                         int no_ack = 0;
2288
2289                         LASSERTF(lock != NULL, "no lock for cookie "LPX64"\n",
2290                                  h->cookie);
2291                         CDEBUG(D_HA, "request = %p reply state = %p"
2292                                " transno = "LPD64"\n",
2293                                req, req->rq_reply_state, req->rq_transno);
2294                         if (mdt_cos_is_enabled(mdt)) {
2295                                 no_ack = 1;
2296                                 ldlm_lock_downgrade(lock, LCK_COS);
2297                                 mode = LCK_COS;
2298                         }
2299                         ptlrpc_save_lock(req, h, mode, no_ack);
2300                         if (mdt_is_lock_sync(lock)) {
2301                                 CDEBUG(D_HA, "found sync-lock,"
2302                                        " async commit started\n");
2303                                 mdt_device_commit_async(info->mti_env,
2304                                                         mdt);
2305                         }
2306                         LDLM_LOCK_PUT(lock);
2307                 }
2308                 h->cookie = 0ull;
2309         }
2310
2311         EXIT;
2312 }
2313
2314 /**
2315  * Unlock mdt object.
2316  *
2317  * Immeditely release the regular lock and the PDO lock or save the
2318  * lock in reqeuest and keep them referenced until client ACK or
2319  * transaction commit.
2320  *
2321  * \param info thread info object
2322  * \param o mdt object
2323  * \param lh mdt lock handle referencing regular and PDO locks
2324  * \param decref force immediate lock releasing
2325  */
2326 void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
2327                        struct mdt_lock_handle *lh, int decref)
2328 {
2329         ENTRY;
2330
2331         mdt_save_lock(info, &lh->mlh_pdo_lh, lh->mlh_pdo_mode, decref);
2332         mdt_save_lock(info, &lh->mlh_reg_lh, lh->mlh_reg_mode, decref);
2333
2334         EXIT;
2335 }
2336
2337 struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *info,
2338                                         const struct lu_fid *f,
2339                                         struct mdt_lock_handle *lh,
2340                                         __u64 ibits)
2341 {
2342         struct mdt_object *o;
2343
2344         o = mdt_object_find(info->mti_env, info->mti_mdt, f);
2345         if (!IS_ERR(o)) {
2346                 int rc;
2347
2348                 rc = mdt_object_lock(info, o, lh, ibits,
2349                                      MDT_LOCAL_LOCK);
2350                 if (rc != 0) {
2351                         mdt_object_put(info->mti_env, o);
2352                         o = ERR_PTR(rc);
2353                 }
2354         }
2355         return o;
2356 }
2357
2358 void mdt_object_unlock_put(struct mdt_thread_info * info,
2359                            struct mdt_object * o,
2360                            struct mdt_lock_handle *lh,
2361                            int decref)
2362 {
2363         mdt_object_unlock(info, o, lh, decref);
2364         mdt_object_put(info->mti_env, o);
2365 }
2366
2367 static struct mdt_handler *mdt_handler_find(__u32 opc,
2368                                             struct mdt_opc_slice *supported)
2369 {
2370         struct mdt_opc_slice *s;
2371         struct mdt_handler   *h;
2372
2373         h = NULL;
2374         for (s = supported; s->mos_hs != NULL; s++) {
2375                 if (s->mos_opc_start <= opc && opc < s->mos_opc_end) {
2376                         h = s->mos_hs + (opc - s->mos_opc_start);
2377                         if (likely(h->mh_opc != 0))
2378                                 LASSERTF(h->mh_opc == opc,
2379                                          "opcode mismatch %d != %d\n",
2380                                          h->mh_opc, opc);
2381                         else
2382                                 h = NULL; /* unsupported opc */
2383                         break;
2384                 }
2385         }
2386         return h;
2387 }
2388
2389 static int mdt_lock_resname_compat(struct mdt_device *m,
2390                                    struct ldlm_request *req)
2391 {
2392         /* XXX something... later. */
2393         return 0;
2394 }
2395
2396 static int mdt_lock_reply_compat(struct mdt_device *m, struct ldlm_reply *rep)
2397 {
2398         /* XXX something... later. */
2399         return 0;
2400 }
2401
2402 /*
2403  * Generic code handling requests that have struct mdt_body passed in:
2404  *
2405  *  - extract mdt_body from request and save it in @info, if present;
2406  *
2407  *  - create lu_object, corresponding to the fid in mdt_body, and save it in
2408  *  @info;
2409  *
2410  *  - if HABEO_CORPUS flag is set for this request type check whether object
2411  *  actually exists on storage (lu_object_exists()).
2412  *
2413  */
2414 static int mdt_body_unpack(struct mdt_thread_info *info, __u32 flags)
2415 {
2416         const struct mdt_body    *body;
2417         struct mdt_object        *obj;
2418         const struct lu_env      *env;
2419         struct req_capsule       *pill;
2420         int                       rc;
2421         ENTRY;
2422
2423         env = info->mti_env;
2424         pill = info->mti_pill;
2425
2426         body = info->mti_body = req_capsule_client_get(pill, &RMF_MDT_BODY);
2427         if (body == NULL)
2428                 RETURN(-EFAULT);
2429
2430         if (!(body->valid & OBD_MD_FLID))
2431                 RETURN(0);
2432
2433         if (!fid_is_sane(&body->fid1)) {
2434                 CERROR("Invalid fid: "DFID"\n", PFID(&body->fid1));
2435                 RETURN(-EINVAL);
2436         }
2437
2438         /*
2439          * Do not get size or any capa fields before we check that request
2440          * contains capa actually. There are some requests which do not, for
2441          * instance MDS_IS_SUBDIR.
2442          */
2443         if (req_capsule_has_field(pill, &RMF_CAPA1, RCL_CLIENT) &&
2444             req_capsule_get_size(pill, &RMF_CAPA1, RCL_CLIENT))
2445                 mdt_set_capainfo(info, 0, &body->fid1,
2446                                  req_capsule_client_get(pill, &RMF_CAPA1));
2447
2448         obj = mdt_object_find(env, info->mti_mdt, &body->fid1);
2449         if (!IS_ERR(obj)) {
2450                 if ((flags & HABEO_CORPUS) &&
2451                     !mdt_object_exists(obj)) {
2452                         mdt_object_put(env, obj);
2453                         /* for capability renew ENOENT will be handled in
2454                          * mdt_renew_capa */
2455                         if (body->valid & OBD_MD_FLOSSCAPA)
2456                                 rc = 0;
2457                         else
2458                                 rc = -ENOENT;
2459                 } else {
2460                         info->mti_object = obj;
2461                         rc = 0;
2462                 }
2463         } else
2464                 rc = PTR_ERR(obj);
2465
2466         RETURN(rc);
2467 }
2468
2469 static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags)
2470 {
2471         struct req_capsule *pill = info->mti_pill;
2472         int rc;
2473         ENTRY;
2474
2475         if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_CLIENT))
2476                 rc = mdt_body_unpack(info, flags);
2477         else
2478                 rc = 0;
2479
2480         if (rc == 0 && (flags & HABEO_REFERO)) {
2481                 struct mdt_device *mdt = info->mti_mdt;
2482
2483                 /* Pack reply. */
2484
2485                 if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER))
2486                         req_capsule_set_size(pill, &RMF_MDT_MD, RCL_SERVER,
2487                                              mdt->mdt_max_mdsize);
2488                 if (req_capsule_has_field(pill, &RMF_LOGCOOKIES, RCL_SERVER))
2489                         req_capsule_set_size(pill, &RMF_LOGCOOKIES, RCL_SERVER,
2490                                              mdt->mdt_max_cookiesize);
2491
2492                 rc = req_capsule_server_pack(pill);
2493         }
2494         RETURN(rc);
2495 }
2496
2497 static int mdt_init_capa_ctxt(const struct lu_env *env, struct mdt_device *m)
2498 {
2499         struct md_device *next = m->mdt_child;
2500
2501         return next->md_ops->mdo_init_capa_ctxt(env, next,
2502                                                 m->mdt_opts.mo_mds_capa,
2503                                                 m->mdt_capa_timeout,
2504                                                 m->mdt_capa_alg,
2505                                                 m->mdt_capa_keys);
2506 }
2507
2508 /*
2509  * Invoke handler for this request opc. Also do necessary preprocessing
2510  * (according to handler ->mh_flags), and post-processing (setting of
2511  * ->last_{xid,committed}).
2512  */
2513 static int mdt_req_handle(struct mdt_thread_info *info,
2514                           struct mdt_handler *h, struct ptlrpc_request *req)
2515 {
2516         int   rc, serious = 0;
2517         __u32 flags;
2518
2519         ENTRY;
2520
2521         LASSERT(h->mh_act != NULL);
2522         LASSERT(h->mh_opc == lustre_msg_get_opc(req->rq_reqmsg));
2523         LASSERT(current->journal_info == NULL);
2524
2525         /*
2526          * Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
2527          * to put same checks into handlers like mdt_close(), mdt_reint(),
2528          * etc., without talking to mdt authors first. Checking same thing
2529          * there again is useless and returning 0 error without packing reply
2530          * is buggy! Handlers either pack reply or return error.
2531          *
2532          * We return 0 here and do not send any reply in order to emulate
2533          * network failure. Do not send any reply in case any of NET related
2534          * fail_id has occured.
2535          */
2536         if (OBD_FAIL_CHECK_ORSET(h->mh_fail_id, OBD_FAIL_ONCE))
2537                 RETURN(0);
2538
2539         rc = 0;
2540         flags = h->mh_flags;
2541         LASSERT(ergo(flags & (HABEO_CORPUS|HABEO_REFERO), h->mh_fmt != NULL));
2542
2543         if (h->mh_fmt != NULL) {
2544                 req_capsule_set(info->mti_pill, h->mh_fmt);
2545                 rc = mdt_unpack_req_pack_rep(info, flags);
2546         }
2547
2548         if (rc == 0 && flags & MUTABOR &&
2549             req->rq_export->exp_connect_flags & OBD_CONNECT_RDONLY)
2550                 /* should it be rq_status? */
2551                 rc = -EROFS;
2552
2553         if (rc == 0 && flags & HABEO_CLAVIS) {
2554                 struct ldlm_request *dlm_req;
2555
2556                 LASSERT(h->mh_fmt != NULL);
2557
2558                 dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
2559                 if (dlm_req != NULL) {
2560                         if (info->mti_mdt->mdt_opts.mo_compat_resname)
2561                                 rc = mdt_lock_resname_compat(info->mti_mdt,
2562                                                              dlm_req);
2563                         info->mti_dlm_req = dlm_req;
2564                 } else {
2565                         rc = -EFAULT;
2566                 }
2567         }
2568
2569         /* capability setting changed via /proc, needs reinitialize ctxt */
2570         if (info->mti_mdt && info->mti_mdt->mdt_capa_conf) {
2571                 mdt_init_capa_ctxt(info->mti_env, info->mti_mdt);
2572                 info->mti_mdt->mdt_capa_conf = 0;
2573         }
2574
2575         if (likely(rc == 0)) {
2576                 /*
2577                  * Process request, there can be two types of rc:
2578                  * 1) errors with msg unpack/pack, other failures outside the
2579                  * operation itself. This is counted as serious errors;
2580                  * 2) errors during fs operation, should be placed in rq_status
2581                  * only
2582                  */
2583                 rc = h->mh_act(info);
2584                 if (rc == 0 &&
2585                     !req->rq_no_reply && req->rq_reply_state == NULL) {
2586                         DEBUG_REQ(D_ERROR, req, "MDT \"handler\" %s did not "
2587                                   "pack reply and returned 0 error\n",
2588                                   h->mh_name);
2589                         LBUG();
2590                 }
2591                 serious = is_serious(rc);
2592                 rc = clear_serious(rc);
2593         } else
2594                 serious = 1;
2595
2596         req->rq_status = rc;
2597
2598         /*
2599          * ELDLM_* codes which > 0 should be in rq_status only as well as
2600          * all non-serious errors.
2601          */
2602         if (rc > 0 || !serious)
2603                 rc = 0;
2604
2605         LASSERT(current->journal_info == NULL);
2606
2607         if (rc == 0 && (flags & HABEO_CLAVIS) &&
2608             info->mti_mdt->mdt_opts.mo_compat_resname) {
2609                 struct ldlm_reply *dlmrep;
2610
2611                 dlmrep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
2612                 if (dlmrep != NULL)
2613                         rc = mdt_lock_reply_compat(info->mti_mdt, dlmrep);
2614         }
2615
2616         /* If we're DISCONNECTing, the mdt_export_data is already freed */
2617         if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
2618                 target_committed_to_req(req);
2619
2620         if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
2621                      lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
2622                 DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
2623                 LBUG();
2624         }
2625
2626         target_send_reply(req, rc, info->mti_fail_id);
2627         RETURN(0);
2628 }
2629
2630 void mdt_lock_handle_init(struct mdt_lock_handle *lh)
2631 {
2632         lh->mlh_type = MDT_NUL_LOCK;
2633         lh->mlh_reg_lh.cookie = 0ull;
2634         lh->mlh_reg_mode = LCK_MINMODE;
2635         lh->mlh_pdo_lh.cookie = 0ull;
2636         lh->mlh_pdo_mode = LCK_MINMODE;
2637 }
2638
2639 void mdt_lock_handle_fini(struct mdt_lock_handle *lh)
2640 {
2641         LASSERT(!lustre_handle_is_used(&lh->mlh_reg_lh));
2642         LASSERT(!lustre_handle_is_used(&lh->mlh_pdo_lh));
2643 }
2644
2645 /*
2646  * Initialize fields of struct mdt_thread_info. Other fields are left in
2647  * uninitialized state, because it's too expensive to zero out whole
2648  * mdt_thread_info (> 1K) on each request arrival.
2649  */
2650 static void mdt_thread_info_init(struct ptlrpc_request *req,
2651                                  struct mdt_thread_info *info)
2652 {
2653         int i;
2654         struct md_capainfo *ci;
2655
2656         req_capsule_init(&req->rq_pill, req, RCL_SERVER);
2657         info->mti_pill = &req->rq_pill;
2658
2659         /* lock handle */
2660         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2661                 mdt_lock_handle_init(&info->mti_lh[i]);
2662
2663         /* mdt device: it can be NULL while CONNECT */
2664         if (req->rq_export) {
2665                 info->mti_mdt = mdt_dev(req->rq_export->exp_obd->obd_lu_dev);
2666                 info->mti_exp = req->rq_export;
2667         } else
2668                 info->mti_mdt = NULL;
2669         info->mti_env = req->rq_svc_thread->t_env;
2670         ci = md_capainfo(info->mti_env);
2671         memset(ci, 0, sizeof *ci);
2672         if (req->rq_export) {
2673                 if (exp_connect_rmtclient(req->rq_export))
2674                         ci->mc_auth = LC_ID_CONVERT;
2675                 else if (req->rq_export->exp_connect_flags &
2676                          OBD_CONNECT_MDS_CAPA)
2677                         ci->mc_auth = LC_ID_PLAIN;
2678                 else
2679                         ci->mc_auth = LC_ID_NONE;
2680         }
2681
2682         info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
2683         info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
2684         info->mti_mos[0] = NULL;
2685         info->mti_mos[1] = NULL;
2686         info->mti_mos[2] = NULL;
2687         info->mti_mos[3] = NULL;
2688
2689         memset(&info->mti_attr, 0, sizeof(info->mti_attr));
2690         info->mti_body = NULL;
2691         info->mti_object = NULL;
2692         info->mti_dlm_req = NULL;
2693         info->mti_has_trans = 0;
2694         info->mti_no_need_trans = 0;
2695         info->mti_cross_ref = 0;
2696         info->mti_opdata = 0;
2697
2698         /* To not check for split by default. */
2699         info->mti_spec.sp_ck_split = 0;
2700         info->mti_spec.no_create = 0;
2701 }
2702
2703 static void mdt_thread_info_fini(struct mdt_thread_info *info)
2704 {
2705         int i;
2706
2707         req_capsule_fini(info->mti_pill);
2708         if (info->mti_object != NULL) {
2709                 /*
2710                  * freeing an object may lead to OSD level transaction, do not
2711                  * let it mess with MDT. bz19385.
2712                  */
2713                 info->mti_no_need_trans = 1;
2714                 mdt_object_put(info->mti_env, info->mti_object);
2715                 info->mti_object = NULL;
2716         }
2717         for (i = 0; i < ARRAY_SIZE(info->mti_lh); i++)
2718                 mdt_lock_handle_fini(&info->mti_lh[i]);
2719         info->mti_env = NULL;
2720 }
2721
2722 static int mdt_filter_recovery_request(struct ptlrpc_request *req,
2723                                        struct obd_device *obd, int *process)
2724 {
2725         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2726         case MDS_CONNECT: /* This will never get here, but for completeness. */
2727         case OST_CONNECT: /* This will never get here, but for completeness. */
2728         case MDS_DISCONNECT:
2729         case OST_DISCONNECT:
2730                *process = 1;
2731                RETURN(0);
2732
2733         case MDS_CLOSE:
2734         case MDS_DONE_WRITING:
2735         case MDS_SYNC: /* used in unmounting */
2736         case OBD_PING:
2737         case MDS_REINT:
2738         case SEQ_QUERY:
2739         case FLD_QUERY:
2740         case LDLM_ENQUEUE:
2741                 *process = target_queue_recovery_request(req, obd);
2742                 RETURN(0);
2743
2744         default:
2745                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2746                 *process = -EAGAIN;
2747                 RETURN(0);
2748         }
2749 }
2750
2751 /*
2752  * Handle recovery. Return:
2753  *        +1: continue request processing;
2754  *       -ve: abort immediately with the given error code;
2755  *         0: send reply with error code in req->rq_status;
2756  */
2757 static int mdt_recovery(struct mdt_thread_info *info)
2758 {
2759         struct ptlrpc_request *req = mdt_info_req(info);
2760         int recovering;
2761         struct obd_device *obd;
2762
2763         ENTRY;
2764
2765         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2766         case MDS_CONNECT:
2767         case SEC_CTX_INIT:
2768         case SEC_CTX_INIT_CONT:
2769         case SEC_CTX_FINI:
2770                 {
2771 #if 0
2772                         int rc;
2773
2774                         rc = mdt_handle_idmap(info);
2775                         if (rc)
2776                                 RETURN(rc);
2777                         else
2778 #endif
2779                                 RETURN(+1);
2780                 }
2781         }
2782
2783         if (unlikely(!class_connected_export(req->rq_export))) {
2784                 CERROR("operation %d on unconnected MDS from %s\n",
2785                        lustre_msg_get_opc(req->rq_reqmsg),
2786                        libcfs_id2str(req->rq_peer));
2787                 /* FIXME: For CMD cleanup, when mds_B stop, the req from
2788                  * mds_A will get -ENOTCONN(especially for ping req),
2789                  * which will cause that mds_A deactive timeout, then when
2790                  * mds_A cleanup, the cleanup process will be suspended since
2791                  * deactive timeout is not zero.
2792                  */
2793                 req->rq_status = -ENOTCONN;
2794                 target_send_reply(req, -ENOTCONN, info->mti_fail_id);
2795                 RETURN(0);
2796         }
2797
2798         /* sanity check: if the xid matches, the request must be marked as a
2799          * resent or replayed */
2800         if (req_xid_is_last(req)) {
2801                 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
2802                       (MSG_RESENT | MSG_REPLAY))) {
2803                         DEBUG_REQ(D_WARNING, req, "rq_xid "LPU64" matches last_xid, "
2804                                   "expected REPLAY or RESENT flag (%x)", req->rq_xid,
2805                                   lustre_msg_get_flags(req->rq_reqmsg));
2806                         LBUG();
2807                         req->rq_status = -ENOTCONN;
2808                         RETURN(-ENOTCONN);
2809                 }
2810         }
2811
2812         /* else: note the opposite is not always true; a RESENT req after a
2813          * failover will usually not match the last_xid, since it was likely
2814          * never committed. A REPLAYed request will almost never match the
2815          * last xid, however it could for a committed, but still retained,
2816          * open. */
2817
2818         obd = req->rq_export->exp_obd;
2819
2820         /* Check for aborted recovery... */
2821         cfs_spin_lock_bh(&obd->obd_processing_task_lock);
2822         recovering = obd->obd_recovering;
2823         cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
2824         if (unlikely(recovering)) {
2825                 int rc;
2826                 int should_process;
2827                 DEBUG_REQ(D_INFO, req, "Got new replay");
2828                 rc = mdt_filter_recovery_request(req, obd, &should_process);
2829                 if (rc != 0 || !should_process)
2830                         RETURN(rc);
2831                 else if (should_process < 0) {
2832                         req->rq_status = should_process;
2833                         rc = ptlrpc_error(req);
2834                         RETURN(rc);
2835                 }
2836         }
2837         RETURN(+1);
2838 }
2839
2840 static int mdt_msg_check_version(struct lustre_msg *msg)
2841 {
2842         int rc;
2843
2844         switch (lustre_msg_get_opc(msg)) {
2845         case MDS_CONNECT:
2846         case MDS_DISCONNECT:
2847         case MDS_SET_INFO:
2848         case OBD_PING:
2849         case SEC_CTX_INIT:
2850         case SEC_CTX_INIT_CONT:
2851         case SEC_CTX_FINI:
2852                 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2853                 if (rc)
2854                         CERROR("bad opc %u version %08x, expecting %08x\n",
2855                                lustre_msg_get_opc(msg),
2856                                lustre_msg_get_version(msg),
2857                                LUSTRE_OBD_VERSION);
2858                 break;
2859         case MDS_GETSTATUS:
2860         case MDS_GETATTR:
2861         case MDS_GETATTR_NAME:
2862         case MDS_STATFS:
2863         case MDS_READPAGE:
2864         case MDS_WRITEPAGE:
2865         case MDS_IS_SUBDIR:
2866         case MDS_REINT:
2867         case MDS_CLOSE:
2868         case MDS_DONE_WRITING:
2869         case MDS_PIN:
2870         case MDS_SYNC:
2871         case MDS_GETXATTR:
2872         case MDS_SETXATTR:
2873         case MDS_GET_INFO:
2874         case MDS_QUOTACHECK:
2875         case MDS_QUOTACTL:
2876         case QUOTA_DQACQ:
2877         case QUOTA_DQREL:
2878         case SEQ_QUERY:
2879         case FLD_QUERY:
2880                 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2881                 if (rc)
2882                         CERROR("bad opc %u version %08x, expecting %08x\n",
2883                                lustre_msg_get_opc(msg),
2884                                lustre_msg_get_version(msg),
2885                                LUSTRE_MDS_VERSION);
2886                 break;
2887         case LDLM_ENQUEUE:
2888         case LDLM_CONVERT:
2889         case LDLM_BL_CALLBACK:
2890         case LDLM_CP_CALLBACK:
2891                 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2892                 if (rc)
2893                         CERROR("bad opc %u version %08x, expecting %08x\n",
2894                                lustre_msg_get_opc(msg),
2895                                lustre_msg_get_version(msg),
2896                                LUSTRE_DLM_VERSION);
2897                 break;
2898         case OBD_LOG_CANCEL:
2899         case LLOG_ORIGIN_HANDLE_CREATE:
2900         case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2901         case LLOG_ORIGIN_HANDLE_READ_HEADER:
2902         case LLOG_ORIGIN_HANDLE_CLOSE:
2903         case LLOG_ORIGIN_HANDLE_DESTROY:
2904         case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2905         case LLOG_CATINFO:
2906                 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2907                 if (rc)
2908                         CERROR("bad opc %u version %08x, expecting %08x\n",
2909                                lustre_msg_get_opc(msg),
2910                                lustre_msg_get_version(msg),
2911                                LUSTRE_LOG_VERSION);
2912                 break;
2913         default:
2914                 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
2915                 rc = -ENOTSUPP;
2916         }
2917         return rc;
2918 }
2919
2920 static int mdt_handle0(struct ptlrpc_request *req,
2921                        struct mdt_thread_info *info,
2922                        struct mdt_opc_slice *supported)
2923 {
2924         struct mdt_handler *h;
2925         struct lustre_msg  *msg;
2926         int                 rc;
2927
2928         ENTRY;
2929
2930         if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_MDS_ALL_REQUEST_NET, OBD_FAIL_ONCE))
2931                 RETURN(0);
2932
2933         LASSERT(current->journal_info == NULL);
2934
2935         msg = req->rq_reqmsg;
2936         rc = mdt_msg_check_version(msg);
2937         if (likely(rc == 0)) {
2938                 rc = mdt_recovery(info);
2939                 if (likely(rc == +1)) {
2940                         h = mdt_handler_find(lustre_msg_get_opc(msg),
2941                                              supported);
2942                         if (likely(h != NULL)) {
2943                                 rc = mdt_req_handle(info, h, req);
2944                         } else {
2945                                 CERROR("The unsupported opc: 0x%x\n",
2946                                        lustre_msg_get_opc(msg) );
2947                                 req->rq_status = -ENOTSUPP;
2948                                 rc = ptlrpc_error(req);
2949                                 RETURN(rc);
2950                         }
2951                 }
2952         } else
2953                 CERROR(LUSTRE_MDT_NAME" drops mal-formed request\n");
2954         RETURN(rc);
2955 }
2956
2957 /*
2958  * MDT handler function called by ptlrpc service thread when request comes.
2959  *
2960  * XXX common "target" functionality should be factored into separate module
2961  * shared by mdt, ost and stand-alone services like fld.
2962  */
2963 static int mdt_handle_common(struct ptlrpc_request *req,
2964                              struct mdt_opc_slice *supported)
2965 {
2966         struct lu_env          *env;
2967         struct mdt_thread_info *info;
2968         int                     rc;
2969         ENTRY;
2970
2971         env = req->rq_svc_thread->t_env;
2972         LASSERT(env != NULL);
2973         LASSERT(env->le_ses != NULL);
2974         LASSERT(env->le_ctx.lc_thread == req->rq_svc_thread);
2975         info = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
2976         LASSERT(info != NULL);
2977
2978         mdt_thread_info_init(req, info);
2979
2980         rc = mdt_handle0(req, info, supported);
2981
2982         mdt_thread_info_fini(info);
2983         RETURN(rc);
2984 }
2985
2986 /*
2987  * This is called from recovery code as handler of _all_ RPC types, FLD and SEQ
2988  * as well.
2989  */
2990 int mdt_recovery_handle(struct ptlrpc_request *req)
2991 {
2992         int rc;
2993         ENTRY;
2994
2995         switch (lustre_msg_get_opc(req->rq_reqmsg)) {
2996         case FLD_QUERY:
2997                 rc = mdt_handle_common(req, mdt_fld_handlers);
2998                 break;
2999         case SEQ_QUERY:
3000                 rc = mdt_handle_common(req, mdt_seq_handlers);
3001                 break;
3002         default:
3003                 rc = mdt_handle_common(req, mdt_regular_handlers);
3004                 break;
3005         }
3006
3007         RETURN(rc);
3008 }
3009
3010 static int mdt_regular_handle(struct ptlrpc_request *req)
3011 {
3012         return mdt_handle_common(req, mdt_regular_handlers);
3013 }
3014
3015 static int mdt_readpage_handle(struct ptlrpc_request *req)
3016 {
3017         return mdt_handle_common(req, mdt_readpage_handlers);
3018 }
3019
3020 static int mdt_xmds_handle(struct ptlrpc_request *req)
3021 {
3022         return mdt_handle_common(req, mdt_xmds_handlers);
3023 }
3024
3025 static int mdt_mdsc_handle(struct ptlrpc_request *req)
3026 {
3027         return mdt_handle_common(req, mdt_seq_handlers);
3028 }
3029
3030 static int mdt_mdss_handle(struct ptlrpc_request *req)
3031 {
3032         return mdt_handle_common(req, mdt_seq_handlers);
3033 }
3034
3035 static int mdt_dtss_handle(struct ptlrpc_request *req)
3036 {
3037         return mdt_handle_common(req, mdt_seq_handlers);
3038 }
3039
3040 static int mdt_fld_handle(struct ptlrpc_request *req)
3041 {
3042         return mdt_handle_common(req, mdt_fld_handlers);
3043 }
3044
3045 enum mdt_it_code {
3046         MDT_IT_OPEN,
3047         MDT_IT_OCREAT,
3048         MDT_IT_CREATE,
3049         MDT_IT_GETATTR,
3050         MDT_IT_READDIR,
3051         MDT_IT_LOOKUP,
3052         MDT_IT_UNLINK,
3053         MDT_IT_TRUNC,
3054         MDT_IT_GETXATTR,
3055         MDT_IT_NR
3056 };
3057
3058 static int mdt_intent_getattr(enum mdt_it_code opcode,
3059                               struct mdt_thread_info *info,
3060                               struct ldlm_lock **,
3061                               int);
3062 static int mdt_intent_reint(enum mdt_it_code opcode,
3063                             struct mdt_thread_info *info,
3064                             struct ldlm_lock **,
3065                             int);
3066
3067 static struct mdt_it_flavor {
3068         const struct req_format *it_fmt;
3069         __u32                    it_flags;
3070         int                    (*it_act)(enum mdt_it_code ,
3071                                          struct mdt_thread_info *,
3072                                          struct ldlm_lock **,
3073                                          int);
3074         long                     it_reint;
3075 } mdt_it_flavor[] = {
3076         [MDT_IT_OPEN]     = {
3077                 .it_fmt   = &RQF_LDLM_INTENT,
3078                 /*.it_flags = HABEO_REFERO,*/
3079                 .it_flags = 0,
3080                 .it_act   = mdt_intent_reint,
3081                 .it_reint = REINT_OPEN
3082         },
3083         [MDT_IT_OCREAT]   = {
3084                 .it_fmt   = &RQF_LDLM_INTENT,
3085                 .it_flags = MUTABOR,
3086                 .it_act   = mdt_intent_reint,
3087                 .it_reint = REINT_OPEN
3088         },
3089         [MDT_IT_CREATE]   = {
3090                 .it_fmt   = &RQF_LDLM_INTENT,
3091                 .it_flags = MUTABOR,
3092                 .it_act   = mdt_intent_reint,
3093                 .it_reint = REINT_CREATE
3094         },
3095         [MDT_IT_GETATTR]  = {
3096                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3097                 .it_flags = HABEO_REFERO,
3098                 .it_act   = mdt_intent_getattr
3099         },
3100         [MDT_IT_READDIR]  = {
3101                 .it_fmt   = NULL,
3102                 .it_flags = 0,
3103                 .it_act   = NULL
3104         },
3105         [MDT_IT_LOOKUP]   = {
3106                 .it_fmt   = &RQF_LDLM_INTENT_GETATTR,
3107                 .it_flags = HABEO_REFERO,
3108                 .it_act   = mdt_intent_getattr
3109         },
3110         [MDT_IT_UNLINK]   = {
3111                 .it_fmt   = &RQF_LDLM_INTENT_UNLINK,
3112                 .it_flags = MUTABOR,
3113                 .it_act   = NULL,
3114                 .it_reint = REINT_UNLINK
3115         },
3116         [MDT_IT_TRUNC]    = {
3117                 .it_fmt   = NULL,
3118                 .it_flags = MUTABOR,
3119                 .it_act   = NULL
3120         },
3121         [MDT_IT_GETXATTR] = {
3122                 .it_fmt   = NULL,
3123                 .it_flags = 0,
3124                 .it_act   = NULL
3125         }
3126 };
3127
3128 int mdt_intent_lock_replace(struct mdt_thread_info *info,
3129                             struct ldlm_lock **lockp,
3130                             struct ldlm_lock *new_lock,
3131                             struct mdt_lock_handle *lh,
3132                             int flags)
3133 {
3134         struct ptlrpc_request  *req = mdt_info_req(info);
3135         struct ldlm_lock       *lock = *lockp;
3136
3137         /*
3138          * Get new lock only for cases when possible resent did not find any
3139          * lock.
3140          */
3141         if (new_lock == NULL)
3142                 new_lock = ldlm_handle2lock_long(&lh->mlh_reg_lh, 0);
3143
3144         if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY)) {
3145                 lh->mlh_reg_lh.cookie = 0;
3146                 RETURN(0);
3147         }
3148
3149         LASSERTF(new_lock != NULL,
3150                  "lockh "LPX64"\n", lh->mlh_reg_lh.cookie);
3151
3152         /*
3153          * If we've already given this lock to a client once, then we should
3154          * have no readers or writers.  Otherwise, we should have one reader
3155          * _or_ writer ref (which will be zeroed below) before returning the
3156          * lock to a client.
3157          */
3158         if (new_lock->l_export == req->rq_export) {
3159                 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3160         } else {
3161                 LASSERT(new_lock->l_export == NULL);
3162                 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3163         }
3164
3165         *lockp = new_lock;
3166
3167         if (new_lock->l_export == req->rq_export) {
3168                 /*
3169                  * Already gave this to the client, which means that we
3170                  * reconstructed a reply.
3171                  */
3172                 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3173                         MSG_RESENT);
3174                 lh->mlh_reg_lh.cookie = 0;
3175                 RETURN(ELDLM_LOCK_REPLACED);
3176         }
3177
3178         /*
3179          * Fixup the lock to be given to the client.
3180          */
3181         lock_res_and_lock(new_lock);
3182         /* Zero new_lock->l_readers and new_lock->l_writers without triggering
3183          * possible blocking AST. */
3184         while (new_lock->l_readers > 0) {
3185                 lu_ref_del(&new_lock->l_reference, "reader", new_lock);
3186                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3187                 new_lock->l_readers--;
3188         }
3189         while (new_lock->l_writers > 0) {
3190                 lu_ref_del(&new_lock->l_reference, "writer", new_lock);
3191                 lu_ref_del(&new_lock->l_reference, "user", new_lock);
3192                 new_lock->l_writers--;
3193         }
3194
3195         new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
3196         new_lock->l_blocking_ast = lock->l_blocking_ast;
3197         new_lock->l_completion_ast = lock->l_completion_ast;
3198         new_lock->l_remote_handle = lock->l_remote_handle;
3199         new_lock->l_flags &= ~LDLM_FL_LOCAL;
3200
3201         unlock_res_and_lock(new_lock);
3202
3203         cfs_hash_add(new_lock->l_export->exp_lock_hash,
3204                      &new_lock->l_remote_handle,
3205                      &new_lock->l_exp_hash);
3206
3207         LDLM_LOCK_RELEASE(new_lock);
3208         lh->mlh_reg_lh.cookie = 0;
3209
3210         RETURN(ELDLM_LOCK_REPLACED);
3211 }
3212
3213 static void mdt_intent_fixup_resent(struct mdt_thread_info *info,
3214                                     struct ldlm_lock *new_lock,
3215                                     struct ldlm_lock **old_lock,
3216                                     struct mdt_lock_handle *lh)
3217 {
3218         struct ptlrpc_request  *req = mdt_info_req(info);
3219         struct obd_export      *exp = req->rq_export;
3220         struct lustre_handle    remote_hdl;
3221         struct ldlm_request    *dlmreq;
3222         struct ldlm_lock       *lock;
3223
3224         if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3225                 return;
3226
3227         dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
3228         remote_hdl = dlmreq->lock_handle[0];
3229
3230         lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
3231         if (lock) {
3232                 if (lock != new_lock) {
3233                         lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
3234                         lh->mlh_reg_mode = lock->l_granted_mode;
3235
3236                         LDLM_DEBUG(lock, "Restoring lock cookie");
3237                         DEBUG_REQ(D_DLMTRACE, req,
3238                                   "restoring lock cookie "LPX64,
3239                                   lh->mlh_reg_lh.cookie);
3240                         if (old_lock)
3241                                 *old_lock = LDLM_LOCK_GET(lock);
3242                         cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3243                         return;
3244                 }
3245
3246                 cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
3247         }
3248
3249         /*
3250          * If the xid matches, then we know this is a resent request, and allow
3251          * it. (It's probably an OPEN, for which we don't send a lock.
3252          */