Whamcloud - gitweb
Merge b_md to HEAD for 0.5.19 release.
[fs/lustre-release.git] / lustre / mds / handler.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/mds/handler.c
5  *  Lustre Metadata Server (mds) request handler
6  *
7  *  Copyright (c) 2001, 2002 Cluster File Systems, Inc.
8  *   Author: Peter Braam <braam@clusterfs.com>
9  *   Author: Andreas Dilger <adilger@clusterfs.com>
10  *   Author: Phil Schwan <phil@clusterfs.com>
11  *   Author: Mike Shaver <shaver@clusterfs.com>
12  *
13  *   This file is part of Lustre, http://www.lustre.org.
14  *
15  *   Lustre is free software; you can redistribute it and/or
16  *   modify it under the terms of version 2 of the GNU General Public
17  *   License as published by the Free Software Foundation.
18  *
19  *   Lustre is distributed in the hope that it will be useful,
20  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
21  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  *   GNU General Public License for more details.
23  *
24  *   You should have received a copy of the GNU General Public License
25  *   along with Lustre; if not, write to the Free Software
26  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28
29 #define EXPORT_SYMTAB
30 #define DEBUG_SUBSYSTEM S_MDS
31
32 #include <linux/module.h>
33 #include <linux/lustre_mds.h>
34 #include <linux/lustre_dlm.h>
35 #include <linux/init.h>
36 #include <linux/obd_class.h>
37 #include <linux/random.h>
38 #include <linux/locks.h>
39 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
40 #include <linux/buffer_head.h>
41 #endif
42 #include <linux/obd_lov.h>
43 #include <linux/lustre_mds.h>
44 #include <linux/lustre_fsfilt.h>
45 #include <linux/lprocfs_status.h>
46
47 static kmem_cache_t *mds_file_cache;
48
49 extern int mds_get_lovtgts(struct mds_obd *obd, int tgt_count,
50                            obd_uuid_t *uuidarray);
51 extern int mds_get_lovdesc(struct mds_obd  *obd, struct lov_desc *desc);
52 extern void mds_start_transno(struct mds_obd *mds);
53 extern int mds_finish_transno(struct mds_obd *mds, void *handle,
54                               struct ptlrpc_request *req, int rc);
55 static int mds_cleanup(struct obd_device * obddev);
56
57 extern struct lprocfs_vars status_var_nm_1[];
58 extern struct lprocfs_vars status_class_var[];
59
60 inline struct mds_obd *mds_req2mds(struct ptlrpc_request *req)
61 {
62         return &req->rq_export->exp_obd->u.mds;
63 }
64
65 static int mds_bulk_timeout(void *data)
66 {
67         struct ptlrpc_bulk_desc *desc = data;
68
69         ENTRY;
70         recovd_conn_fail(desc->bd_connection);
71         RETURN(1);
72 }
73
74 /* Assumes caller has already pushed into the kernel filesystem context */
75 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
76                         __u64 offset)
77 {
78         struct ptlrpc_bulk_desc *desc;
79         struct ptlrpc_bulk_page *bulk;
80         struct l_wait_info lwi;
81         char *buf;
82         int rc = 0;
83         ENTRY;
84
85         desc = ptlrpc_prep_bulk(req->rq_connection);
86         if (desc == NULL)
87                 GOTO(out, rc = -ENOMEM);
88
89         bulk = ptlrpc_prep_bulk_page(desc);
90         if (bulk == NULL)
91                 GOTO(cleanup_bulk, rc = -ENOMEM);
92
93         OBD_ALLOC(buf, PAGE_SIZE);
94         if (buf == NULL)
95                 GOTO(cleanup_bulk, rc = -ENOMEM);
96
97         rc = fsfilt_readpage(req->rq_export->exp_obd, file, buf, PAGE_SIZE,
98                              (loff_t *)&offset);
99
100         if (rc != PAGE_SIZE)
101                 GOTO(cleanup_buf, rc = -EIO);
102
103         bulk->bp_xid = req->rq_xid;
104         bulk->bp_buf = buf;
105         bulk->bp_buflen = PAGE_SIZE;
106         desc->bd_ptl_ev_hdlr = NULL;
107         desc->bd_portal = MDS_BULK_PORTAL;
108
109         rc = ptlrpc_send_bulk(desc);
110         if (rc)
111                 GOTO(cleanup_buf, rc);
112
113         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
114                 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
115                        OBD_FAIL_MDS_SENDPAGE, rc);
116                 ptlrpc_abort_bulk(desc);
117                 GOTO(cleanup_buf, rc);
118         }
119
120         lwi = LWI_TIMEOUT(obd_timeout * HZ, mds_bulk_timeout, desc);
121         rc = l_wait_event(desc->bd_waitq, desc->bd_flags & PTL_BULK_FL_SENT,
122                           &lwi);
123         if (rc) {
124                 if (rc != -ETIMEDOUT)
125                         LBUG();
126                 GOTO(cleanup_buf, rc);
127         }
128
129         EXIT;
130  cleanup_buf:
131         OBD_FREE(buf, PAGE_SIZE);
132  cleanup_bulk:
133         ptlrpc_bulk_decref(desc);
134  out:
135         return rc;
136 }
137
138 /*
139  * Look up a named entry in a directory, and get an LDLM lock on it.
140  * 'dir' is a inode for which an LDLM lock has already been taken.
141  *
142  * If we do not need an exclusive or write lock on this entry (e.g.
143  * a read lock for attribute lookup only) then we do not hold the
144  * directory semaphore on return.  It is up to the caller to know what
145  * type of lock it is getting, and clean up appropriately.
146  */
147 struct dentry *mds_name2locked_dentry(struct obd_device *obd,
148                                       struct dentry *dir, struct vfsmount **mnt,
149                                       char *name, int namelen, int lock_mode,
150                                       struct lustre_handle *lockh,
151                                       int dir_lock_mode)
152 {
153         struct dentry *dchild;
154         int flags = 0, rc;
155         __u64 res_id[3] = {0};
156         ENTRY;
157
158         down(&dir->d_inode->i_sem);
159         dchild = lookup_one_len(name, dir, namelen);
160         if (IS_ERR(dchild)) {
161                 CERROR("child lookup error %ld\n", PTR_ERR(dchild));
162                 up(&dir->d_inode->i_sem);
163                 LBUG();
164                 RETURN(dchild);
165         }
166         if (dir_lock_mode != LCK_EX && dir_lock_mode != LCK_PW) {
167                 up(&dir->d_inode->i_sem);
168                 ldlm_lock_decref(lockh, dir_lock_mode);
169         }
170
171         if (lock_mode == 0 || !dchild->d_inode)
172                 RETURN(dchild);
173
174         res_id[0] = dchild->d_inode->i_ino;
175         res_id[1] = dchild->d_inode->i_generation;
176         rc = ldlm_match_or_enqueue(NULL, NULL, obd->obd_namespace, NULL,
177                                    res_id, LDLM_PLAIN, NULL, 0, lock_mode,
178                                    &flags, ldlm_completion_ast,
179                                    mds_blocking_ast, NULL, 0, lockh);
180         if (rc != ELDLM_OK) {
181                 l_dput(dchild);
182                 up(&dir->d_inode->i_sem);
183                 RETURN(ERR_PTR(-ENOLCK)); /* XXX translate ldlm code */
184         }
185
186         RETURN(dchild);
187 }
188
189 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
190                                      struct vfsmount **mnt, int lock_mode,
191                                      struct lustre_handle *lockh)
192 {
193         struct mds_obd *mds = &obd->u.mds;
194         struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
195         int flags = 0, rc;
196         __u64 res_id[3] = {0};
197         ENTRY;
198
199         if (IS_ERR(de))
200                 RETURN(de);
201
202         res_id[0] = de->d_inode->i_ino;
203         res_id[1] = de->d_inode->i_generation;
204         rc = ldlm_match_or_enqueue(NULL, NULL, obd->obd_namespace, NULL,
205                                    res_id, LDLM_PLAIN, NULL, 0, lock_mode,
206                                    &flags, ldlm_completion_ast,
207                                    mds_blocking_ast, NULL, 0, lockh);
208         if (rc != ELDLM_OK) {
209                 l_dput(de);
210                 retval = ERR_PTR(-ENOLCK); /* XXX translate ldlm code */
211         }
212
213         RETURN(retval);
214 }
215
216 #ifndef DCACHE_DISCONNECTED
217 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
218 #endif
219
220 /* Look up an entry by inode number. */
221 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
222                               struct vfsmount **mnt)
223 {
224         /* stolen from NFS */
225         struct super_block *sb = mds->mds_sb;
226         unsigned long ino = fid->id;
227         __u32 generation = fid->generation;
228         struct inode *inode;
229         struct list_head *lp;
230         struct dentry *result;
231
232         if (ino == 0)
233                 RETURN(ERR_PTR(-ESTALE));
234
235         inode = iget(sb, ino);
236         if (inode == NULL)
237                 RETURN(ERR_PTR(-ENOMEM));
238
239         CDEBUG(D_DENTRY, "--> mds_fid2dentry: sb %p\n", inode->i_sb);
240
241         if (is_bad_inode(inode) ||
242             (generation && inode->i_generation != generation)) {
243                 /* we didn't find the right inode.. */
244                 CERROR("bad inode %lu, link: %d ct: %d or version  %u/%u\n",
245                        inode->i_ino, inode->i_nlink,
246                        atomic_read(&inode->i_count), inode->i_generation,
247                        generation);
248                 iput(inode);
249                 RETURN(ERR_PTR(-ENOENT));
250         }
251
252         /* now to find a dentry. If possible, get a well-connected one */
253         if (mnt)
254                 *mnt = mds->mds_vfsmnt;
255         spin_lock(&dcache_lock);
256         list_for_each(lp, &inode->i_dentry) {
257                 result = list_entry(lp, struct dentry, d_alias);
258                 if (!(result->d_flags & DCACHE_DISCONNECTED)) {
259                         dget_locked(result);
260                         result->d_vfs_flags |= DCACHE_REFERENCED;
261                         spin_unlock(&dcache_lock);
262                         iput(inode);
263                         if (mnt)
264                                 mntget(*mnt);
265                         return result;
266                 }
267         }
268         spin_unlock(&dcache_lock);
269         result = d_alloc_root(inode);
270         if (result == NULL) {
271                 iput(inode);
272                 return ERR_PTR(-ENOMEM);
273         }
274         if (mnt)
275                 mntget(*mnt);
276         result->d_flags |= DCACHE_DISCONNECTED;
277         return result;
278 }
279
280 /* Establish a connection to the MDS.
281  *
282  * This will set up an export structure for the client to hold state data
283  * about that client, like open files, the last operation number it did
284  * on the server, etc.
285  */
286 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
287                        obd_uuid_t cluuid, struct recovd_obd *recovd,
288                        ptlrpc_recovery_cb_t recover)
289 {
290         struct obd_export *exp;
291         struct mds_export_data *med;
292         struct mds_client_data *mcd;
293         struct list_head *p;
294         int rc;
295         ENTRY;
296
297         if (!conn || !obd || !cluuid)
298                 RETURN(-EINVAL);
299
300         /* lctl gets a backstage, all-access pass. */
301         if (!strcmp(cluuid, "OBD_CLASS_UUID"))
302                 goto dont_check_exports;
303
304         spin_lock(&obd->obd_dev_lock);
305         list_for_each(p, &obd->obd_exports) {
306                 exp = list_entry(p, struct obd_export, exp_obd_chain);
307                 mcd = exp->exp_mds_data.med_mcd;
308                 if (!mcd) {
309                         CERROR("FYI: NULL mcd - simultaneous connects\n");
310                         continue;
311                 }
312                 if (!memcmp(cluuid, mcd->mcd_uuid, sizeof mcd->mcd_uuid)) {
313                         spin_unlock(&obd->obd_dev_lock);
314                         LASSERT(exp->exp_obd == obd);
315                         
316                         RETURN(target_handle_reconnect(conn, exp, cluuid));
317                 }
318         }
319         spin_unlock(&obd->obd_dev_lock);
320
321         if (obd->u.mds.mds_recoverable_clients != 0) {
322                 CERROR("denying connection for new client %s: in recovery\n",
323                        cluuid);
324                 RETURN(-EBUSY);
325         }
326
327  dont_check_exports:
328         /* XXX There is a small race between checking the list and adding a
329          * new connection for the same UUID, but the real threat (list
330          * corruption when multiple different clients connect) is solved.
331          *
332          * There is a second race between adding the export to the list,
333          * and filling in the client data below.  Hence skipping the case
334          * of NULL mcd above.  We should already be controlling multiple
335          * connects at the client, and we can't hold the spinlock over
336          * memory allocations without risk of deadlocking.
337          */
338         rc = class_connect(conn, obd, cluuid);
339         if (rc)
340                 RETURN(rc);
341         exp = class_conn2export(conn);
342         LASSERT(exp);
343         med = &exp->exp_mds_data;
344
345         OBD_ALLOC(mcd, sizeof(*mcd));
346         if (!mcd) {
347                 CERROR("mds: out of memory for client data\n");
348                 GOTO(out_export, rc = -ENOMEM);
349         }
350
351         memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
352         med->med_mcd = mcd;
353
354         INIT_LIST_HEAD(&med->med_open_head);
355         spin_lock_init(&med->med_open_lock);
356
357         rc = mds_client_add(&obd->u.mds, med, -1);
358         if (rc)
359                 GOTO(out_mcd, rc);
360
361         RETURN(0);
362
363 out_mcd:
364         OBD_FREE(mcd, sizeof(*mcd));
365 out_export:
366         class_disconnect(conn);
367
368         return rc;
369 }
370
371 /* Call with med->med_open_lock held, please. */
372 inline int mds_close_mfd(struct mds_file_data *mfd, struct mds_export_data *med)
373 {
374         struct file *file = mfd->mfd_file;
375         LASSERT(file->private_data == mfd);
376
377         list_del(&mfd->mfd_list);
378         mfd->mfd_servercookie = DEAD_HANDLE_MAGIC;
379         kmem_cache_free(mds_file_cache, mfd);
380
381         return filp_close(file, 0);
382 }
383
384 static int mds_disconnect(struct lustre_handle *conn)
385 {
386         struct obd_export *export = class_conn2export(conn);
387         struct list_head *tmp, *n;
388         struct mds_export_data *med = &export->exp_mds_data;
389         int rc;
390         ENTRY;
391
392         /*
393          * Close any open files.
394          */
395         spin_lock(&med->med_open_lock);
396         list_for_each_safe(tmp, n, &med->med_open_head) {
397                 struct mds_file_data *mfd =
398                         list_entry(tmp, struct mds_file_data, mfd_list);
399                 CERROR("force closing client file handle for %*s\n",
400                        mfd->mfd_file->f_dentry->d_name.len,
401                        mfd->mfd_file->f_dentry->d_name.name);
402                 rc = mds_close_mfd(mfd, med);
403                 if (rc)
404                         CDEBUG(D_INODE, "Error closing file: %d\n", rc);
405         }
406         spin_unlock(&med->med_open_lock);
407
408         ldlm_cancel_locks_for_export(export);
409         mds_client_free(export);
410
411         rc = class_disconnect(conn);
412
413         RETURN(rc);
414 }
415
416 /*
417  * XXX This is NOT guaranteed to flush all transactions to disk (even though
418  *     it is equivalent to calling sync()) because it only _starts_ the flush
419  *     and does not wait for completion.  It's better than nothing though.
420  *     What we really want is a mild form of fsync_dev_lockfs(), but it is
421  *     non-standard, or enabling do_sync_supers in ext3, just for this call.
422  */
423 static void mds_fsync_super(struct super_block *sb)
424 {
425         lock_kernel();
426         lock_super(sb);
427         if (sb->s_dirt && sb->s_op && sb->s_op->write_super)
428                 sb->s_op->write_super(sb);
429         unlock_super(sb);
430         unlock_kernel();
431 }
432
433 static int mds_getstatus(struct ptlrpc_request *req)
434 {
435         struct mds_obd *mds = mds_req2mds(req);
436         struct mds_body *body;
437         int rc, size = sizeof(*body);
438         ENTRY;
439
440         rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
441         if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
442                 CERROR("mds: out of memory for message: size=%d\n", size);
443                 req->rq_status = -ENOMEM;
444                 RETURN(-ENOMEM);
445         }
446
447         /* Flush any outstanding transactions to disk so the client will
448          * get the latest last_committed value and can drop their local
449          * requests if they have any.  This would be fsync_super() if it
450          * was exported.
451          */
452         mds_fsync_super(mds->mds_sb);
453
454         body = lustre_msg_buf(req->rq_repmsg, 0);
455         memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
456
457         /* the last_committed and last_xid fields are filled in for all
458          * replies already - no need to do so here also.
459          */
460         RETURN(0);
461 }
462
463 static int mds_getlovinfo(struct ptlrpc_request *req)
464 {
465         struct mds_obd *mds = mds_req2mds(req);
466         struct mds_status_req *streq;
467         struct lov_desc *desc;
468         int tgt_count;
469         int rc, size[2] = {sizeof(*desc)};
470         ENTRY;
471
472         streq = lustre_msg_buf(req->rq_reqmsg, 0);
473         streq->flags = NTOH__u32(streq->flags);
474         streq->repbuf = NTOH__u32(streq->repbuf);
475         size[1] = streq->repbuf;
476
477         rc = lustre_pack_msg(2, size, NULL, &req->rq_replen, &req->rq_repmsg);
478         if (rc) {
479                 CERROR("mds: out of memory for message: size=%d\n", size[1]);
480                 req->rq_status = -ENOMEM;
481                 RETURN(-ENOMEM);
482         }
483
484         if (!mds->mds_has_lov_desc) {
485                 req->rq_status = -ENOENT;
486                 RETURN(0);
487         }
488
489         desc = lustre_msg_buf(req->rq_repmsg, 0);
490         memcpy(desc, &mds->mds_lov_desc, sizeof *desc);
491         lov_packdesc(desc);
492         tgt_count = le32_to_cpu(desc->ld_tgt_count);
493         if (tgt_count * sizeof(obd_uuid_t) > streq->repbuf) {
494                 CERROR("too many targets, enlarge client buffers\n");
495                 req->rq_status = -ENOSPC;
496                 RETURN(0);
497         }
498
499         rc = mds_get_lovtgts(mds, tgt_count,
500                              lustre_msg_buf(req->rq_repmsg, 1));
501         if (rc) {
502                 CERROR("get_lovtgts error %d\n", rc);
503                 req->rq_status = rc;
504                 RETURN(0);
505         }
506         RETURN(0);
507 }
508
509 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
510                      void *data, __u32 data_len, int flag)
511 {
512         int do_ast;
513         ENTRY;
514
515         if (flag == LDLM_CB_CANCELING) {
516                 /* Don't need to do anything here. */
517                 RETURN(0);
518         }
519
520         /* XXX layering violation!  -phil */
521         l_lock(&lock->l_resource->lr_namespace->ns_lock);
522         lock->l_flags |= LDLM_FL_CBPENDING;
523         do_ast = (!lock->l_readers && !lock->l_writers);
524         l_unlock(&lock->l_resource->lr_namespace->ns_lock);
525
526         if (do_ast) {
527                 struct lustre_handle lockh;
528                 int rc;
529
530                 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
531                 ldlm_lock2handle(lock, &lockh);
532                 rc = ldlm_cli_cancel(&lockh);
533                 if (rc < 0)
534                         CERROR("ldlm_cli_cancel: %d\n", rc);
535         } else
536                 LDLM_DEBUG(lock, "Lock still has references, will be"
537                            "cancelled later");
538         RETURN(0);
539 }
540
541 int mds_pack_md(struct mds_obd *mds, struct ptlrpc_request *req,
542                 int offset, struct mds_body *body, struct inode *inode)
543 {
544         struct lov_mds_md *lmm;
545         int lmm_size = req->rq_repmsg->buflens[offset];
546         int rc;
547
548         if (lmm_size == 0) {
549                 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n", inode->i_ino);
550                 RETURN(0);
551         }
552
553         lmm = lustre_msg_buf(req->rq_repmsg, offset);
554
555         /* I don't really like this, but it is a sanity check on the client
556          * MD request.  However, if the client doesn't know how much space
557          * to reserve for the MD, this shouldn't be fatal either...
558          */
559         if (lmm_size > mds->mds_max_mdsize) {
560                 CERROR("Reading MD for inode %lu of %d bytes > max %d\n",
561                        inode->i_ino, lmm_size, mds->mds_max_mdsize);
562                 // RETURN(-EINVAL);
563         }
564
565         /* We don't need to store the reply size, because this buffer is
566          * discarded right after unpacking, and the LOV can figure out the
567          * size itself from the ost count.
568          */
569         if ((rc = fsfilt_get_md(req->rq_export->exp_obd, inode,
570                                 lmm, lmm_size)) < 0) {
571                 CDEBUG(D_INFO, "No md for ino %lu: rc = %d\n", inode->i_ino,rc);
572         } else if (rc > 0) {
573                 body->valid |= OBD_MD_FLEASIZE;
574                 rc = 0;
575         }
576
577         return rc;
578 }
579
580 static int mds_getattr_internal(struct mds_obd *mds, struct dentry *dentry,
581                                 struct ptlrpc_request *req,
582                                 struct mds_body *reqbody, int reply_off)
583 {
584         struct mds_body *body;
585         struct inode *inode = dentry->d_inode;
586         int rc = 0;
587         ENTRY;
588
589         if (inode == NULL)
590                 RETURN(-ENOENT);
591
592         body = lustre_msg_buf(req->rq_repmsg, reply_off);
593
594         mds_pack_inode2fid(&body->fid1, inode);
595         mds_pack_inode2body(body, inode);
596
597         if (S_ISREG(inode->i_mode) && reqbody->valid & OBD_MD_FLEASIZE) {
598                 rc = mds_pack_md(mds, req, reply_off + 1, body, inode);
599         } else if (S_ISLNK(inode->i_mode) && reqbody->valid & OBD_MD_LINKNAME) {
600                 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1);
601                 int len = req->rq_repmsg->buflens[reply_off + 1];
602
603                 rc = inode->i_op->readlink(dentry, symname, len);
604                 if (rc < 0) {
605                         CERROR("readlink failed: %d\n", rc);
606                 } else {
607                         CDEBUG(D_INODE, "read symlink dest %s\n", symname);
608                         body->valid |= OBD_MD_LINKNAME;
609                         rc = 0;
610                 }
611         }
612         RETURN(rc);
613 }
614
615 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
616                                 int offset)
617 {
618         struct mds_obd *mds = mds_req2mds(req);
619         struct mds_body *body;
620         int rc = 0, size[2] = {sizeof(*body)}, bufcount = 1;
621         ENTRY;
622
623         body = lustre_msg_buf(req->rq_reqmsg, offset);
624
625         if (S_ISREG(inode->i_mode) && body->valid & OBD_MD_FLEASIZE) {
626                 int rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0);
627                 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
628                        rc, inode->i_ino);
629                 if (rc < 0) {
630                         if (rc != -ENODATA)
631                                 CERROR("error getting inode %lu MD: rc = %d\n",
632                                        inode->i_ino, rc);
633                         size[bufcount] = 0;
634                 } else if (rc > mds->mds_max_mdsize) {
635                         size[bufcount] = 0;
636                         CERROR("MD size %d larger than maximum possible %u\n",
637                                rc, mds->mds_max_mdsize);
638                 } else
639                         size[bufcount] = rc;
640                 bufcount++;
641         } else if (body->valid & OBD_MD_LINKNAME) {
642                 size[bufcount] = MIN(inode->i_size + 1, body->size);
643                 bufcount++;
644                 CDEBUG(D_INODE, "symlink size: %Lu, reply space: "LPU64"\n",
645                        inode->i_size + 1, body->size);
646         }
647
648         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
649                 CERROR("failed MDS_GETATTR_PACK test\n");
650                 req->rq_status = -ENOMEM;
651                 GOTO(out, rc = -ENOMEM);
652         }
653
654         rc = lustre_pack_msg(bufcount, size, NULL, &req->rq_replen,
655                              &req->rq_repmsg);
656         if (rc) {
657                 CERROR("out of memoryK\n");
658                 req->rq_status = rc;
659                 GOTO(out, rc);
660         }
661
662         EXIT;
663  out:
664         return(rc);
665 }
666
667 static int mds_getattr_name(int offset, struct ptlrpc_request *req)
668 {
669         struct mds_obd *mds = mds_req2mds(req);
670         struct obd_device *obd = req->rq_export->exp_obd;
671         struct obd_run_ctxt saved;
672         struct mds_body *body;
673         struct dentry *de = NULL, *dchild = NULL;
674         struct inode *dir;
675         struct lustre_handle lockh;
676         char *name;
677         int namelen, flags = 0, lock_mode, rc = 0;
678         struct obd_ucred uc;
679         __u64 res_id[3] = {0, 0, 0};
680         ENTRY;
681
682         LASSERT(!strcmp(obd->obd_type->typ_name, "mds"));
683
684         if (req->rq_reqmsg->bufcount <= offset + 1) {
685                 LBUG();
686                 GOTO(out_pre_de, rc = -EINVAL);
687         }
688
689         body = lustre_msg_buf(req->rq_reqmsg, offset);
690         name = lustre_msg_buf(req->rq_reqmsg, offset + 1);
691         namelen = req->rq_reqmsg->buflens[offset + 1];
692         /* requests were at offset 2, replies go back at 1 */
693         if (offset)
694                 offset = 1;
695
696         uc.ouc_fsuid = body->fsuid;
697         uc.ouc_fsgid = body->fsgid;
698         uc.ouc_cap = body->capability;
699         push_ctxt(&saved, &mds->mds_ctxt, &uc);
700         de = mds_fid2dentry(mds, &body->fid1, NULL);
701         if (IS_ERR(de)) {
702                 GOTO(out_pre_de, rc = PTR_ERR(de));
703         }
704
705         dir = de->d_inode;
706         CDEBUG(D_INODE, "parent ino %lu, name %*s\n", dir->i_ino,namelen,name);
707
708         lock_mode = LCK_PR;
709         res_id[0] = dir->i_ino;
710         res_id[1] = dir->i_generation;
711
712         rc = ldlm_lock_match(obd->obd_namespace, res_id, LDLM_PLAIN,
713                              NULL, 0, lock_mode, &lockh);
714         if (rc == 0) {
715                 LDLM_DEBUG_NOLOCK("enqueue res "LPU64, res_id[0]);
716                 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, NULL,
717                                       res_id, LDLM_PLAIN, NULL, 0, lock_mode,
718                                       &flags, ldlm_completion_ast,
719                                       mds_blocking_ast, NULL, 0, &lockh);
720                 if (rc != ELDLM_OK) {
721                         CERROR("lock enqueue: err: %d\n", rc);
722                         GOTO(out_create_de, rc = -EIO);
723                 }
724         }
725         ldlm_lock_dump_handle(D_OTHER, &lockh);
726
727         down(&dir->i_sem);
728         dchild = lookup_one_len(name, de, namelen - 1);
729         up(&dir->i_sem);
730         if (IS_ERR(dchild)) {
731                 CDEBUG(D_INODE, "child lookup error %ld\n", PTR_ERR(dchild));
732                 GOTO(out_create_dchild, rc = PTR_ERR(dchild));
733         } else if (dchild->d_inode == NULL) {
734                 GOTO(out_create_dchild, rc = -ENOENT);
735         }
736
737         if (req->rq_repmsg == NULL)
738                 mds_getattr_pack_msg(req, dchild->d_inode, offset);
739
740         rc = mds_getattr_internal(mds, dchild, req, body, offset);
741
742         EXIT;
743 out_create_dchild:
744         l_dput(dchild);
745         ldlm_lock_decref(&lockh, lock_mode);
746 out_create_de:
747         l_dput(de);
748 out_pre_de:
749         req->rq_status = rc;
750         pop_ctxt(&saved, &mds->mds_ctxt, &uc);
751         return rc;
752 }
753
754 static int mds_getattr(int offset, struct ptlrpc_request *req)
755 {
756         struct mds_obd *mds = mds_req2mds(req);
757         struct obd_run_ctxt saved;
758         struct dentry *de;
759         struct mds_body *body;
760         struct obd_ucred uc;
761         int rc = 0;
762         ENTRY;
763
764         body = lustre_msg_buf(req->rq_reqmsg, offset);
765         uc.ouc_fsuid = body->fsuid;
766         uc.ouc_fsgid = body->fsgid;
767         uc.ouc_cap = body->capability;
768         push_ctxt(&saved, &mds->mds_ctxt, &uc);
769         de = mds_fid2dentry(mds, &body->fid1, NULL);
770         if (IS_ERR(de)) {
771                 rc = req->rq_status = -ENOENT;
772                 GOTO(out_pop, PTR_ERR(de));
773         }
774
775         rc = mds_getattr_pack_msg(req, de->d_inode, offset);
776
777         req->rq_status = mds_getattr_internal(mds, de, req, body, 0);
778
779         l_dput(de);
780         EXIT;
781 out_pop:
782         pop_ctxt(&saved, &mds->mds_ctxt, &uc);
783         return rc;
784 }
785
786 static int mds_statfs(struct ptlrpc_request *req)
787 {
788         struct obd_device *obd = req->rq_export->exp_obd;
789         struct obd_statfs *osfs;
790         int rc, size = sizeof(*osfs);
791         ENTRY;
792
793         rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
794         if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
795                 CERROR("mds: statfs lustre_pack_msg failed: rc = %d\n", rc);
796                 GOTO(out, rc);
797         }
798
799         osfs = lustre_msg_buf(req->rq_repmsg, 0);
800         rc = fsfilt_statfs(obd, obd->u.mds.mds_sb, osfs);
801         if (rc) {
802                 CERROR("mds: statfs failed: rc %d\n", rc);
803                 GOTO(out, rc);
804         }
805         obd_statfs_pack(osfs, osfs);
806
807         EXIT;
808 out:
809         req->rq_status = rc;
810         return 0;
811 }
812
813 static struct mds_file_data *mds_handle2mfd(struct lustre_handle *handle)
814 {
815         struct mds_file_data *mfd = NULL;
816         ENTRY;
817
818         if (!handle || !handle->addr)
819                 RETURN(NULL);
820
821         mfd = (struct mds_file_data *)(unsigned long)(handle->addr);
822         if (!kmem_cache_validate(mds_file_cache, mfd))
823                 RETURN(NULL);
824
825         if (mfd->mfd_servercookie != handle->cookie)
826                 RETURN(NULL);
827
828         RETURN(mfd);
829 }
830
831 static int mds_store_md(struct mds_obd *mds, struct ptlrpc_request *req,
832                         int offset, struct mds_body *body, struct inode *inode)
833 {
834         struct obd_device *obd = req->rq_export->exp_obd;
835         struct lov_mds_md *lmm = lustre_msg_buf(req->rq_reqmsg, offset);
836         int lmm_size = req->rq_reqmsg->buflens[offset];
837         struct obd_run_ctxt saved;
838         struct obd_ucred uc;
839         void *handle;
840         int rc, rc2;
841         ENTRY;
842
843         /* I don't really like this, but it is a sanity check on the client
844          * MD request.
845          */
846         if (lmm_size > mds->mds_max_mdsize) {
847                 CERROR("Saving MD for inode %lu of %d bytes > max %d\n",
848                        inode->i_ino, lmm_size, mds->mds_max_mdsize);
849                 //RETURN(-EINVAL);
850         }
851
852         CDEBUG(D_INODE, "storing %d bytes MD for inode %lu\n",
853                lmm_size, inode->i_ino);
854         uc.ouc_fsuid = body->fsuid;
855         uc.ouc_fsgid = body->fsgid;
856         uc.ouc_cap = body->capability;
857         push_ctxt(&saved, &mds->mds_ctxt, &uc);
858         mds_start_transno(mds);
859         handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR);
860         if (IS_ERR(handle)) {
861                 rc = PTR_ERR(handle);
862                 mds_finish_transno(mds, handle, req, rc);
863                 GOTO(out_ea, rc);
864         }
865
866         rc = fsfilt_set_md(obd, inode,handle,lmm,lmm_size);
867         rc = mds_finish_transno(mds, handle, req, rc);
868
869         rc2 = fsfilt_commit(obd, inode, handle);
870         if (rc2 && !rc)
871                 rc = rc2;
872 out_ea:
873         pop_ctxt(&saved, &mds->mds_ctxt, &uc);
874
875         RETURN(rc);
876 }
877
878 static int mds_open(struct ptlrpc_request *req)
879 {
880         struct mds_obd *mds = mds_req2mds(req);
881         struct mds_body *body;
882         struct mds_export_data *med;
883         struct mds_file_data *mfd;
884         struct dentry *de;
885         struct file *file;
886         struct vfsmount *mnt;
887         __u32 flags;
888         struct list_head *tmp;
889         int rc, size = sizeof(*body);
890         ENTRY;
891
892         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OPEN_PACK)) {
893                 CERROR("test case OBD_FAIL_MDS_OPEN_PACK\n");
894                 req->rq_status = -ENOMEM;
895                 RETURN(-ENOMEM);
896         }
897
898         rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
899         if (rc) {
900                 CERROR("mds: pack error: rc = %d\n", rc);
901                 req->rq_status = rc;
902                 RETURN(rc);
903         }
904
905         body = lustre_msg_buf(req->rq_reqmsg, 0);
906
907         /* was this animal open already and the client lost the reply? */
908         /* XXX need some way to detect a reopen, to avoid locked list walks */
909         med = &req->rq_export->exp_mds_data;
910         spin_lock(&med->med_open_lock);
911         list_for_each(tmp, &med->med_open_head) {
912                 mfd = list_entry(tmp, typeof(*mfd), mfd_list);
913                 if (!memcmp(&mfd->mfd_clienthandle, &body->handle,
914                             sizeof(mfd->mfd_clienthandle)) &&
915                     body->fid1.id == mfd->mfd_file->f_dentry->d_inode->i_ino) {
916                         de = mfd->mfd_file->f_dentry;
917                         spin_unlock(&med->med_open_lock);
918                         CERROR("Re opening "LPD64"\n", body->fid1.id);
919                         GOTO(out_pack, rc = 0);
920                 }
921         }
922         spin_unlock(&med->med_open_lock);
923
924         mfd = kmem_cache_alloc(mds_file_cache, GFP_KERNEL);
925         if (!mfd) {
926                 CERROR("mds: out of memory\n");
927                 req->rq_status = -ENOMEM;
928                 RETURN(0);
929         }
930
931         de = mds_fid2dentry(mds, &body->fid1, &mnt);
932         if (IS_ERR(de))
933                 GOTO(out_free, rc = PTR_ERR(de));
934
935         /* check if this inode has seen a delayed object creation */
936         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MDS_OPEN_HAS_EA) {
937                 rc = mds_store_md(mds, req, 1, body, de->d_inode);
938                 if (rc) {
939                         l_dput(de);
940                         mntput(mnt);
941                         GOTO(out_free, rc);
942                 }
943         }
944
945         flags = body->flags;
946         /* dentry_open does a dput(de) and mntput(mnt) on error */
947         file = dentry_open(de, mnt, flags & ~O_DIRECT);
948         if (IS_ERR(file)) {
949                 rc = PTR_ERR(file);
950                 GOTO(out_free, 0);
951         }
952
953         file->private_data = mfd;
954         mfd->mfd_file = file;
955         memcpy(&mfd->mfd_clienthandle, &body->handle, sizeof(body->handle));
956         get_random_bytes(&mfd->mfd_servercookie, sizeof(mfd->mfd_servercookie));
957         spin_lock(&med->med_open_lock);
958         list_add(&mfd->mfd_list, &med->med_open_head);
959         spin_unlock(&med->med_open_lock);
960
961 out_pack:
962         body = lustre_msg_buf(req->rq_repmsg, 0);
963         mds_pack_inode2fid(&body->fid1, de->d_inode);
964         mds_pack_inode2body(body, de->d_inode);
965         body->handle.addr = (__u64)(unsigned long)mfd;
966         body->handle.cookie = mfd->mfd_servercookie;
967         CDEBUG(D_INODE, "llite file "LPX64": addr %p, cookie "LPX64"\n",
968                mfd->mfd_clienthandle.addr, mfd, mfd->mfd_servercookie);
969         RETURN(0);
970
971 out_free:
972         mfd->mfd_servercookie = DEAD_HANDLE_MAGIC;
973         kmem_cache_free(mds_file_cache, mfd);
974         req->rq_status = rc;
975         RETURN(0);
976 }
977
978 static int mds_close(struct ptlrpc_request *req)
979 {
980         struct mds_export_data *med = &req->rq_export->exp_mds_data;
981         struct mds_body *body;
982         struct mds_file_data *mfd;
983         int rc;
984         ENTRY;
985
986         body = lustre_msg_buf(req->rq_reqmsg, 0);
987
988         mfd = mds_handle2mfd(&body->handle);
989         if (!mfd) {
990                 DEBUG_REQ(D_ERROR, req, "no handle for file close "LPD64
991                           ": addr "LPX64", cookie "LPX64"\n",
992                           body->fid1.id, body->handle.addr,
993                           body->handle.cookie);
994                 RETURN(-ESTALE);
995         }
996
997         spin_lock(&med->med_open_lock);
998         req->rq_status = mds_close_mfd(mfd, med);
999         spin_unlock(&med->med_open_lock);
1000
1001         if (OBD_FAIL_CHECK(OBD_FAIL_MDS_CLOSE_PACK)) {
1002                 CERROR("test case OBD_FAIL_MDS_CLOSE_PACK\n");
1003                 req->rq_status = -ENOMEM;
1004                 RETURN(-ENOMEM);
1005         }
1006
1007         rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
1008         if (rc) {
1009                 CERROR("mds: lustre_pack_msg: rc = %d\n", rc);
1010                 req->rq_status = rc;
1011         }
1012
1013         RETURN(0);
1014 }
1015
1016 static int mds_readpage(struct ptlrpc_request *req)
1017 {
1018         struct mds_obd *mds = mds_req2mds(req);
1019         struct vfsmount *mnt;
1020         struct dentry *de;
1021         struct file *file;
1022         struct mds_body *body, *repbody;
1023         struct obd_run_ctxt saved;
1024         int rc, size = sizeof(*body);
1025         struct obd_ucred uc;
1026         ENTRY;
1027
1028         rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
1029         if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) {
1030                 CERROR("mds: out of memory\n");
1031                 GOTO(out, rc = -ENOMEM);
1032         }
1033
1034         body = lustre_msg_buf(req->rq_reqmsg, 0);
1035         uc.ouc_fsuid = body->fsuid;
1036         uc.ouc_fsgid = body->fsgid;
1037         uc.ouc_cap = body->capability;
1038         push_ctxt(&saved, &mds->mds_ctxt, &uc);
1039         de = mds_fid2dentry(mds, &body->fid1, &mnt);
1040         if (IS_ERR(de))
1041                 GOTO(out_pop, rc = PTR_ERR(de));
1042
1043         CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1044
1045         file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1046         /* note: in case of an error, dentry_open puts dentry */
1047         if (IS_ERR(file))
1048                 GOTO(out_pop, rc = PTR_ERR(file));
1049
1050         repbody = lustre_msg_buf(req->rq_repmsg, 0);
1051         repbody->size = file->f_dentry->d_inode->i_size;
1052         repbody->valid = OBD_MD_FLSIZE;
1053
1054         /* to make this asynchronous make sure that the handling function
1055            doesn't send a reply when this function completes. Instead a
1056            callback function would send the reply */
1057         rc = mds_sendpage(req, file, body->size);
1058
1059         filp_close(file, 0);
1060 out_pop:
1061         pop_ctxt(&saved, &mds->mds_ctxt, &uc);
1062 out:
1063         req->rq_status = rc;
1064         RETURN(0);
1065 }
1066
1067 int mds_reint(struct ptlrpc_request *req, int offset)
1068 {
1069         int rc;
1070         struct mds_update_record rec;
1071
1072         rc = mds_update_unpack(req, offset, &rec);
1073         if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1074                 CERROR("invalid record\n");
1075                 req->rq_status = -EINVAL;
1076                 RETURN(0);
1077         }
1078         /* rc will be used to interrupt a for loop over multiple records */
1079         rc = mds_reint_rec(&rec, offset, req);
1080         return rc;
1081 }
1082
1083 /* forward declaration */
1084 int mds_handle(struct ptlrpc_request *req);
1085
1086 static int check_for_next_transno(struct mds_obd *mds)
1087 {
1088         struct ptlrpc_request *req;
1089         req = list_entry(mds->mds_recovery_queue.next,
1090                          struct ptlrpc_request, rq_list);
1091         LASSERT(req->rq_reqmsg->transno >= mds->mds_next_recovery_transno);
1092         return req->rq_reqmsg->transno == mds->mds_next_recovery_transno;
1093 }
1094
1095 static void process_recovery_queue(struct mds_obd *mds)
1096 {
1097         struct ptlrpc_request *req;
1098         ENTRY;
1099
1100         for (;;) {
1101                 spin_lock(&mds->mds_processing_task_lock);
1102                 LASSERT(mds->mds_processing_task == current->pid);
1103                 req = list_entry(mds->mds_recovery_queue.next,
1104                                  struct ptlrpc_request, rq_list);
1105
1106                 if (req->rq_reqmsg->transno != mds->mds_next_recovery_transno) {
1107                         spin_unlock(&mds->mds_processing_task_lock);
1108                         CDEBUG(D_HA, "Waiting for transno "LPD64" (1st is "
1109                                LPD64")\n",
1110                                mds->mds_next_recovery_transno,
1111                                req->rq_reqmsg->transno);
1112                         wait_event(mds->mds_next_transno_waitq,
1113                                    check_for_next_transno(mds));
1114                         continue;
1115                 }
1116                 list_del_init(&req->rq_list);
1117                 spin_unlock(&mds->mds_processing_task_lock);
1118
1119                 DEBUG_REQ(D_ERROR, req, "processing: ");
1120                 (void)mds_handle(req);
1121                 mds_fsync_super(mds->mds_sb);
1122                 OBD_FREE(req, sizeof *req);
1123                 spin_lock(&mds->mds_processing_task_lock);
1124                 mds->mds_next_recovery_transno++;
1125                 if (list_empty(&mds->mds_recovery_queue)) {
1126                         mds->mds_processing_task = 0;
1127                         spin_unlock(&mds->mds_processing_task_lock);
1128                         break;
1129                 }
1130                 spin_unlock(&mds->mds_processing_task_lock);
1131         }
1132         EXIT;
1133 }
1134
1135 static int queue_recovery_request(struct ptlrpc_request *req,
1136                                   struct mds_obd *mds)
1137 {
1138         struct list_head *tmp;
1139         int inserted = 0;
1140         __u64 transno = req->rq_reqmsg->transno;
1141         struct ptlrpc_request *saved_req;
1142
1143         if (!transno) {
1144                 INIT_LIST_HEAD(&req->rq_list);
1145                 DEBUG_REQ(D_ERROR, req, "not queueing");
1146                 return 1;
1147         }
1148
1149         spin_lock(&mds->mds_processing_task_lock);
1150
1151         if (mds->mds_processing_task == current->pid) {
1152                 /* Processing the queue right now, don't re-add. */
1153                 LASSERT(list_empty(&req->rq_list));
1154                 spin_unlock(&mds->mds_processing_task_lock);
1155                 return 1;
1156         }
1157
1158         OBD_ALLOC(saved_req, sizeof *saved_req);
1159         if (!saved_req)
1160                 LBUG();
1161         memcpy(saved_req, req, sizeof *req);
1162         req = saved_req;
1163         INIT_LIST_HEAD(&req->rq_list);
1164
1165         /* XXX O(n^2) */
1166         list_for_each(tmp, &mds->mds_recovery_queue) {
1167                 struct ptlrpc_request *reqiter =
1168                         list_entry(tmp, struct ptlrpc_request, rq_list);
1169
1170                 if (reqiter->rq_reqmsg->transno > transno) {
1171                         list_add_tail(&req->rq_list, &reqiter->rq_list);
1172                         inserted = 1;
1173                         break;
1174                 }
1175         }
1176
1177         if (!inserted) {
1178                 list_add_tail(&req->rq_list, &mds->mds_recovery_queue);
1179         }
1180
1181         if (mds->mds_processing_task != 0) {
1182                 /* Someone else is processing this queue, we'll leave it to
1183                  * them.
1184                  */
1185                 if (transno == mds->mds_next_recovery_transno)
1186                         wake_up(&mds->mds_next_transno_waitq);
1187                 spin_unlock(&mds->mds_processing_task_lock);
1188                 return 0;
1189         }
1190
1191         /* Nobody is processing, and we know there's (at least) one to process
1192          * now, so we'll do the honours.
1193          */
1194         mds->mds_processing_task = current->pid;
1195         spin_unlock(&mds->mds_processing_task_lock);
1196
1197         process_recovery_queue(mds);
1198         return 0;
1199 }
1200
1201 static int filter_recovery_request(struct ptlrpc_request *req,
1202                                    struct mds_obd *mds, int *process)
1203 {
1204         switch (req->rq_reqmsg->opc) {
1205         case MDS_CONNECT:
1206         case MDS_DISCONNECT:
1207                *process = 1;
1208                RETURN(0);
1209
1210         case MDS_OPEN:
1211         case MDS_GETSTATUS: /* used in unmounting */
1212         case MDS_REINT:
1213         case LDLM_ENQUEUE:
1214                 *process = queue_recovery_request(req, mds);
1215                 RETURN(0);
1216
1217         default:
1218                 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1219                 *process = 0;
1220                 /* XXX what should we set rq_status to here? */
1221                 RETURN(ptlrpc_error(req->rq_svc, req));
1222         }
1223 }
1224
1225 static int mds_queue_final_reply(struct ptlrpc_request *req, int rc)
1226 {
1227         struct mds_obd *mds = mds_req2mds(req);
1228         struct ptlrpc_request *saved_req;
1229
1230         spin_lock(&mds->mds_processing_task_lock);
1231         if (rc) {
1232                 /* Just like ptlrpc_error, but without the sending. */
1233                 lustre_pack_msg(0, NULL, NULL, &req->rq_replen,
1234                                 &req->rq_repmsg);
1235                 req->rq_type = PTL_RPC_MSG_ERR;
1236         }
1237
1238         LASSERT(list_empty(&req->rq_list));
1239         OBD_ALLOC(saved_req, sizeof *saved_req);
1240         memcpy(saved_req, req, sizeof *saved_req);
1241         req = saved_req;
1242         list_add(&req->rq_list, &mds->mds_delayed_reply_queue);
1243         if (--mds->mds_recoverable_clients == 0) {
1244                 struct list_head *tmp, *n;
1245                 ldlm_reprocess_all_ns(req->rq_export->exp_obd->obd_namespace);
1246                 CDEBUG(D_ERROR,
1247                        "all clients recovered, sending delayed replies\n");
1248                 list_for_each_safe(tmp, n, &mds->mds_delayed_reply_queue) {
1249                         req = list_entry(tmp, struct ptlrpc_request, rq_list);
1250                         DEBUG_REQ(D_ERROR, req, "delayed:");
1251                         ptlrpc_reply(req->rq_svc, req);
1252                         list_del(&req->rq_list);
1253                         OBD_FREE(req, sizeof *req);
1254                 }
1255         } else {
1256                 CERROR("%d recoverable clients remain\n",
1257                        mds->mds_recoverable_clients);
1258         }
1259
1260         spin_unlock(&mds->mds_processing_task_lock);
1261         return 1;
1262 }
1263
1264 static char *reint_names[] = {
1265         [REINT_SETATTR] "setattr",
1266         [REINT_CREATE]  "create",
1267         [REINT_LINK]    "link",
1268         [REINT_UNLINK]  "unlink",
1269         [REINT_RENAME]  "rename"
1270 };
1271
1272 int mds_handle(struct ptlrpc_request *req)
1273 {
1274         int rc;
1275         int should_process;
1276         struct mds_obd *mds = NULL; /* quell gcc overwarning */
1277         ENTRY;
1278
1279         rc = lustre_unpack_msg(req->rq_reqmsg, req->rq_reqlen);
1280         if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_HANDLE_UNPACK)) {
1281                 DEBUG_REQ(D_ERROR, req, "invalid request (%d)", rc);
1282                 GOTO(out, rc);
1283         }
1284
1285         LASSERT(!strcmp(req->rq_obd->obd_type->typ_name, LUSTRE_MDT_NAME));
1286
1287         if (req->rq_reqmsg->opc != MDS_CONNECT) {
1288                 if (req->rq_export == NULL) {
1289                         req->rq_status = -ENOTCONN;
1290                         GOTO(out, rc = -ENOTCONN);
1291                 }
1292
1293                 mds = mds_req2mds(req);
1294                 if (mds->mds_recoverable_clients != 0) {
1295                         rc = filter_recovery_request(req, mds, &should_process);
1296                         if (rc || !should_process)
1297                                 RETURN(rc);
1298                 }
1299         }
1300
1301         switch (req->rq_reqmsg->opc) {
1302         case MDS_CONNECT:
1303                 DEBUG_REQ(D_INODE, req, "connect");
1304                 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1305                 rc = target_handle_connect(req);
1306                 /* Make sure that last_rcvd is correct. */
1307                 if (!rc) {
1308                         /* Now that we have an export, set mds. */
1309                         mds = mds_req2mds(req);
1310                         mds_fsync_super(mds->mds_sb);
1311                 }
1312
1313                 /* Let the client know if it can replay. */
1314                 if (mds->mds_recoverable_clients) {
1315                         lustre_msg_add_flags(req->rq_repmsg,
1316                                              MSG_REPLAY_IN_PROGRESS);
1317                 }
1318                 break;
1319
1320         case MDS_DISCONNECT:
1321                 DEBUG_REQ(D_INODE, req, "disconnect");
1322                 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1323                 rc = target_handle_disconnect(req);
1324                 /* Make sure that last_rcvd is correct. */
1325                 if (!rc)
1326                         mds_fsync_super(mds->mds_sb);
1327                 req->rq_status = rc;
1328                 break;
1329
1330         case MDS_GETSTATUS:
1331                 DEBUG_REQ(D_INODE, req, "getstatus");
1332                 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1333                 rc = mds_getstatus(req);
1334                 break;
1335
1336         case MDS_GETLOVINFO:
1337                 DEBUG_REQ(D_INODE, req, "getlovinfo");
1338                 rc = mds_getlovinfo(req);
1339                 break;
1340
1341         case MDS_GETATTR:
1342                 DEBUG_REQ(D_INODE, req, "getattr");
1343                 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1344                 rc = mds_getattr(0, req);
1345                 break;
1346
1347         case MDS_GETATTR_NAME:
1348                 DEBUG_REQ(D_INODE, req, "getattr_name");
1349                 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1350                 rc = mds_getattr_name(0, req);
1351                 break;
1352
1353         case MDS_STATFS:
1354                 DEBUG_REQ(D_INODE, req, "statfs");
1355                 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1356                 rc = mds_statfs(req);
1357                 break;
1358
1359         case MDS_READPAGE:
1360                 DEBUG_REQ(D_INODE, req, "readpage\n");
1361                 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1362                 rc = mds_readpage(req);
1363
1364                 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1365                         return 0;
1366                 break;
1367
1368         case MDS_REINT: {
1369                 int size = sizeof(struct mds_body);
1370                 int opc = *(u32 *)lustre_msg_buf(req->rq_reqmsg, 0),
1371                         realopc = opc & REINT_OPCODE_MASK;
1372
1373                 DEBUG_REQ(D_INODE, req, "reint (%s%s)",
1374                           reint_names[realopc],
1375                           opc & REINT_REPLAYING ? "|REPLAYING" : "");
1376
1377                 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1378
1379                 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen,
1380                                      &req->rq_repmsg);
1381                 if (rc) {
1382                         req->rq_status = rc;
1383                         break;
1384                 }
1385                 rc = mds_reint(req, 0);
1386                 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET_REP, 0);
1387                 break;
1388                 }
1389
1390         case MDS_OPEN:
1391                 DEBUG_REQ(D_INODE, req, "open");
1392                 OBD_FAIL_RETURN(OBD_FAIL_MDS_OPEN_NET, 0);
1393                 rc = mds_open(req);
1394                 break;
1395
1396         case MDS_CLOSE:
1397                 DEBUG_REQ(D_INODE, req, "close");
1398                 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1399                 rc = mds_close(req);
1400                 break;
1401
1402         case LDLM_ENQUEUE:
1403                 DEBUG_REQ(D_INODE, req, "enqueue");
1404                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1405                 rc = ldlm_handle_enqueue(req);
1406                 break;
1407         case LDLM_CONVERT:
1408                 DEBUG_REQ(D_INODE, req, "convert");
1409                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1410                 rc = ldlm_handle_convert(req);
1411                 break;
1412         case LDLM_BL_CALLBACK:
1413         case LDLM_CP_CALLBACK:
1414                 DEBUG_REQ(D_INODE, req, "callback");
1415                 CERROR("callbacks should not happen on MDS\n");
1416                 LBUG();
1417                 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1418                 break;
1419         default:
1420                 rc = ptlrpc_error(req->rq_svc, req);
1421                 RETURN(rc);
1422         }
1423
1424         EXIT;
1425
1426         /* If we're DISCONNECTing, the mds_export_data is already freed */
1427         if (!rc && req->rq_reqmsg->opc != MDS_DISCONNECT) {
1428                 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1429
1430                 req->rq_repmsg->last_xid =
1431                         HTON__u64(le64_to_cpu(med->med_mcd->mcd_last_xid));
1432                 req->rq_repmsg->last_committed =
1433                         HTON__u64(mds->mds_last_committed);
1434                 CDEBUG(D_INFO, "last_transno %Lu, last_committed %Lu, xid %d\n",
1435                        (unsigned long long)mds->mds_last_rcvd,
1436                        (unsigned long long)mds->mds_last_committed,
1437                        cpu_to_le32(req->rq_xid));
1438         }
1439  out:
1440
1441         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
1442                 struct mds_obd *mds = mds_req2mds(req);
1443                 LASSERT(mds->mds_recoverable_clients);
1444                 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
1445                 return mds_queue_final_reply(req, rc);
1446         }
1447
1448         /* XXX bug 578 */
1449         /* MDS_CONNECT / EALREADY (note: not -EALREADY!) isn't an error */
1450         if (rc && (req->rq_reqmsg->opc != MDS_CONNECT ||
1451                    rc != EALREADY)) {
1452                 DEBUG_REQ(D_ERROR, req, "processing error (%d)", rc);
1453                 ptlrpc_error(req->rq_svc, req);
1454         } else {
1455                 DEBUG_REQ(D_NET, req, "sending reply");
1456                 ptlrpc_reply(req->rq_svc, req);
1457         }
1458         return 0;
1459 }
1460
1461 /* Update the server data on disk.  This stores the new mount_count and
1462  * also the last_rcvd value to disk.  If we don't have a clean shutdown,
1463  * then the server last_rcvd value may be less than that of the clients.
1464  * This will alert us that we may need to do client recovery.
1465  *
1466  * Also assumes for mds_last_rcvd that we are not modifying it (no locking).
1467  */
1468 int mds_update_server_data(struct mds_obd *mds)
1469 {
1470         struct mds_server_data *msd = mds->mds_server_data;
1471         struct file *filp = mds->mds_rcvd_filp;
1472         struct obd_run_ctxt saved;
1473         loff_t off = 0;
1474         int rc;
1475
1476         push_ctxt(&saved, &mds->mds_ctxt, NULL);
1477         msd->msd_last_rcvd = cpu_to_le64(mds->mds_last_rcvd);
1478         msd->msd_mount_count = cpu_to_le64(mds->mds_mount_count);
1479
1480         CDEBUG(D_SUPER, "MDS mount_count is %Lu, last_rcvd is %Lu\n",
1481                (unsigned long long)mds->mds_mount_count,
1482                (unsigned long long)mds->mds_last_rcvd);
1483         rc = lustre_fwrite(filp, (char *)msd, sizeof(*msd), &off);
1484         if (rc != sizeof(*msd)) {
1485                 CERROR("error writing MDS server data: rc = %d\n", rc);
1486                 if (rc > 0)
1487                         rc = -EIO;
1488                 GOTO(out, rc);
1489         }
1490 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1491         rc = fsync_dev(filp->f_dentry->d_inode->i_rdev);
1492 #else
1493         rc = file_fsync(filp,  filp->f_dentry, 1);
1494 #endif
1495         if (rc)
1496                 CERROR("error flushing MDS server data: rc = %d\n", rc);
1497
1498 out:
1499         pop_ctxt(&saved, &mds->mds_ctxt, NULL);
1500         RETURN(rc);
1501 }
1502
1503 /* mount the file system (secretly) */
1504 static int mds_setup(struct obd_device *obddev, obd_count len, void *buf)
1505 {
1506         struct obd_ioctl_data* data = buf;
1507         struct mds_obd *mds = &obddev->u.mds;
1508         struct vfsmount *mnt;
1509         int rc = 0;
1510         ENTRY;
1511
1512 #ifdef CONFIG_DEV_RDONLY
1513         dev_clear_rdonly(2);
1514 #endif
1515         if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2)
1516                 RETURN(rc = -EINVAL);
1517
1518         obddev->obd_fsops = fsfilt_get_ops(data->ioc_inlbuf2);
1519         if (IS_ERR(obddev->obd_fsops))
1520                 RETURN(rc = PTR_ERR(obddev->obd_fsops));
1521
1522         mnt = do_kern_mount(data->ioc_inlbuf2, 0, data->ioc_inlbuf1, NULL);
1523         if (IS_ERR(mnt)) {
1524                 rc = PTR_ERR(mnt);
1525                 CERROR("do_kern_mount failed: rc = %d\n", rc);
1526                 GOTO(err_ops, rc);
1527         }
1528
1529         CDEBUG(D_SUPER, "%s: mnt = %p\n", data->ioc_inlbuf1, mnt);
1530         mds->mds_sb = mnt->mnt_root->d_inode->i_sb;
1531         if (!mds->mds_sb)
1532                 GOTO(err_put, rc = -ENODEV);
1533
1534         init_MUTEX(&mds->mds_transno_sem);
1535         mds->mds_max_mdsize = sizeof(struct lov_mds_md);
1536         rc = mds_fs_setup(obddev, mnt);
1537         if (rc) {
1538                 CERROR("MDS filesystem method init failed: rc = %d\n", rc);
1539                 GOTO(err_put, rc);
1540         }
1541
1542         obddev->obd_namespace =
1543                 ldlm_namespace_new("mds_server", LDLM_NAMESPACE_SERVER);
1544         if (obddev->obd_namespace == NULL) {
1545                 mds_cleanup(obddev);
1546                 GOTO(err_fs, rc = -ENOMEM);
1547         }
1548
1549         ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1550                            "mds_ldlm_client", &obddev->obd_ldlm_client);
1551
1552         spin_lock_init(&mds->mds_processing_task_lock);
1553         mds->mds_processing_task = 0;
1554         mds->mds_has_lov_desc = 0;
1555         INIT_LIST_HEAD(&mds->mds_recovery_queue);
1556         INIT_LIST_HEAD(&mds->mds_delayed_reply_queue);
1557         init_waitqueue_head(&mds->mds_next_transno_waitq);
1558
1559         RETURN(0);
1560
1561 err_fs:
1562         mds_fs_cleanup(obddev);
1563 err_put:
1564         unlock_kernel();
1565         mntput(mds->mds_vfsmnt);
1566         mds->mds_sb = 0;
1567         lock_kernel();
1568 err_ops:
1569         fsfilt_put_ops(obddev->obd_fsops);
1570         RETURN(rc);
1571 }
1572
1573 static int mds_cleanup(struct obd_device *obddev)
1574 {
1575         struct super_block *sb;
1576         struct mds_obd *mds = &obddev->u.mds;
1577         ENTRY;
1578
1579         sb = mds->mds_sb;
1580         if (!mds->mds_sb)
1581                 RETURN(0);
1582
1583         mds_update_server_data(mds);
1584         mds_fs_cleanup(obddev);
1585
1586         unlock_kernel();
1587         mntput(mds->mds_vfsmnt);
1588         mds->mds_sb = 0;
1589
1590         ldlm_namespace_free(obddev->obd_namespace);
1591
1592         lock_kernel();
1593 #ifdef CONFIG_DEV_RDONLY
1594         dev_clear_rdonly(2);
1595 #endif
1596         fsfilt_put_ops(obddev->obd_fsops);
1597
1598         RETURN(0);
1599 }
1600
1601 static int ldlm_intent_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1602                               void *req_cookie, ldlm_mode_t mode, int flags,
1603                               void *data)
1604 {
1605         struct ptlrpc_request *req = req_cookie;
1606         int rc = 0;
1607         ENTRY;
1608
1609         if (!req_cookie)
1610                 RETURN(0);
1611
1612         if (req->rq_reqmsg->bufcount > 1) {
1613                 /* an intent needs to be considered */
1614                 struct ldlm_intent *it = lustre_msg_buf(req->rq_reqmsg, 1);
1615                 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1616                 struct mds_body *mds_rep;
1617                 struct ldlm_reply *rep;
1618                 __u64 new_resid[3] = {0, 0, 0}, old_res;
1619                 int rc, size[3] = {sizeof(struct ldlm_reply),
1620                                                   sizeof(struct mds_body),
1621                                                   mds->mds_max_mdsize};
1622
1623                 it->opc = NTOH__u64(it->opc);
1624
1625                 LDLM_DEBUG(lock, "intent policy, opc: %s",
1626                            ldlm_it2str(it->opc));
1627
1628                 rc = lustre_pack_msg(3, size, NULL, &req->rq_replen,
1629                                      &req->rq_repmsg);
1630                 if (rc) {
1631                         rc = req->rq_status = -ENOMEM;
1632                         RETURN(rc);
1633                 }
1634
1635                 rep = lustre_msg_buf(req->rq_repmsg, 0);
1636                 rep->lock_policy_res1 = 1;
1637
1638                 /* execute policy */
1639                 switch ((long)it->opc) {
1640                 case IT_CREAT|IT_OPEN:
1641                         rc = mds_reint(req, 2);
1642                         if (rc || (req->rq_status != 0 &&
1643                                    req->rq_status != -EEXIST)) {
1644                                 rep->lock_policy_res2 = req->rq_status;
1645                                 RETURN(ELDLM_LOCK_ABORTED);
1646                         }
1647                         break;
1648                 case IT_CREAT:
1649                 case IT_MKDIR:
1650                 case IT_MKNOD:
1651                 case IT_RENAME2:
1652                 case IT_LINK2:
1653                 case IT_RMDIR:
1654                 case IT_SYMLINK:
1655                 case IT_UNLINK:
1656                         rc = mds_reint(req, 2);
1657                         if (rc || (req->rq_status != 0 &&
1658                                    req->rq_status != -EISDIR &&
1659                                    req->rq_status != -ENOTDIR)) {
1660                                 rep->lock_policy_res2 = req->rq_status;
1661                                 RETURN(ELDLM_LOCK_ABORTED);
1662                         }
1663                         break;
1664                 case IT_GETATTR:
1665                 case IT_LOOKUP:
1666                 case IT_OPEN:
1667                 case IT_READDIR:
1668                 case IT_READLINK:
1669                 case IT_RENAME:
1670                 case IT_LINK:
1671                 case IT_SETATTR:
1672                         rc = mds_getattr_name(2, req);
1673                         /* FIXME: we need to sit down and decide on who should
1674                          * set req->rq_status, who should return negative and
1675                          * positive return values, and what they all mean. */
1676                         if (rc || req->rq_status != 0) {
1677                                 rep->lock_policy_res2 = req->rq_status;
1678                                 RETURN(ELDLM_LOCK_ABORTED);
1679                         }
1680                         break;
1681                 case IT_READDIR|IT_OPEN:
1682                         LBUG();
1683                         break;
1684                 default:
1685                         CERROR("Unhandled intent "LPD64"\n", it->opc);
1686                         LBUG();
1687                 }
1688
1689                 /* We don't bother returning a lock to the client for a file
1690                  * or directory we are removing.
1691                  *
1692                  * As for link and rename, there is no reason for the client
1693                  * to get a lock on the target at this point.  If they are
1694                  * going to modify the file/directory later they will get a
1695                  * lock at that time.
1696                  */
1697                 if (it->opc & (IT_UNLINK | IT_RMDIR | IT_LINK | IT_LINK2 |
1698                                IT_RENAME | IT_RENAME2))
1699                         RETURN(ELDLM_LOCK_ABORTED);
1700
1701                 rep->lock_policy_res2 = req->rq_status;
1702                 mds_rep = lustre_msg_buf(req->rq_repmsg, 1);
1703
1704                 /* If the client is about to open a file that doesn't have an
1705                  * MD stripe record, it's going to need a write lock.
1706                  */
1707                 if (it->opc & IT_OPEN && !(mds_rep->valid & OBD_MD_FLEASIZE)) {
1708                         LDLM_DEBUG(lock, "open with no EA; returning PW lock");
1709                         lock->l_req_mode = LCK_PW;
1710                 }
1711
1712                 if (flags & LDLM_FL_INTENT_ONLY) {
1713                         LDLM_DEBUG(lock, "INTENT_ONLY, aborting lock");
1714                         RETURN(ELDLM_LOCK_ABORTED);
1715                 }
1716                 /* Give the client a lock on the child object, instead of the
1717                  * parent that it requested. */
1718                 new_resid[0] = NTOH__u32(mds_rep->ino);
1719                 new_resid[1] = NTOH__u32(mds_rep->generation);
1720                 if (new_resid[0] == 0)
1721                         LBUG();
1722                 old_res = lock->l_resource->lr_name[0];
1723
1724                 ldlm_lock_change_resource(ns, lock, new_resid);
1725                 if (lock->l_resource == NULL) {
1726                         LBUG();
1727                         RETURN(-ENOMEM);
1728                 }
1729                 LDLM_DEBUG(lock, "intent policy, old res %ld",
1730                            (long)old_res);
1731                 RETURN(ELDLM_LOCK_CHANGED);
1732         } else {
1733                 int size = sizeof(struct ldlm_reply);
1734                 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen,
1735                                      &req->rq_repmsg);
1736                 if (rc) {
1737                         LBUG();
1738                         RETURN(-ENOMEM);
1739                 }
1740         }
1741         RETURN(rc);
1742 }
1743
1744 int mds_attach(struct obd_device *dev, obd_count len, void *data)
1745 {
1746         return lprocfs_reg_obd(dev, status_var_nm_1, dev);
1747 }
1748
1749 int mds_detach(struct obd_device *dev)
1750 {
1751         return lprocfs_dereg_obd(dev);
1752 }
1753
1754 static int mdt_setup(struct obd_device *obddev, obd_count len, void *buf)
1755 {
1756         int i;
1757         //        struct obd_ioctl_data* data = buf;
1758         struct mds_obd *mds = &obddev->u.mds;
1759         int rc = 0;
1760         ENTRY;
1761
1762         mds->mds_service = ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1763                                            MDS_BUFSIZE, MDS_MAXREQSIZE,
1764                                            MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
1765                                            "self", mds_handle, "mds");
1766         if (!mds->mds_service) {
1767                 CERROR("failed to start service\n");
1768                 RETURN(rc = -ENOMEM);
1769         }
1770
1771         for (i = 0; i < MDT_NUM_THREADS; i++) {
1772                 char name[32];
1773                 sprintf(name, "ll_mdt_%02d", i);
1774                 rc = ptlrpc_start_thread(obddev, mds->mds_service, name);
1775                 if (rc) {
1776                         CERROR("cannot start MDT thread #%d: rc %d\n", i, rc);
1777                         GOTO(err_thread, rc);
1778                 }
1779         }
1780
1781         RETURN(0);
1782
1783 err_thread:
1784         ptlrpc_stop_all_threads(mds->mds_service);
1785         ptlrpc_unregister_service(mds->mds_service);
1786         RETURN(rc);
1787 }
1788
1789
1790 static int mdt_cleanup(struct obd_device *obddev)
1791 {
1792         struct mds_obd *mds = &obddev->u.mds;
1793         ENTRY;
1794
1795         ptlrpc_stop_all_threads(mds->mds_service);
1796         ptlrpc_unregister_service(mds->mds_service);
1797
1798         RETURN(0);
1799 }
1800
1801 extern int mds_iocontrol(unsigned int cmd, struct lustre_handle *conn,
1802                          int len, void *karg, void *uarg);
1803
1804 /* use obd ops to offer management infrastructure */
1805 static struct obd_ops mds_obd_ops = {
1806         o_owner:       THIS_MODULE,
1807         o_attach:      mds_attach,
1808         o_detach:      mds_detach,
1809         o_connect:     mds_connect,
1810         o_disconnect:  mds_disconnect,
1811         o_setup:       mds_setup,
1812         o_cleanup:     mds_cleanup,
1813         o_iocontrol:   mds_iocontrol
1814 };
1815
1816 static struct obd_ops mdt_obd_ops = {
1817         o_owner:       THIS_MODULE,
1818         o_setup:       mdt_setup,
1819         o_cleanup:     mdt_cleanup,
1820 };
1821
1822
1823 static int __init mds_init(void)
1824 {
1825         mds_file_cache = kmem_cache_create("ll_mds_file_data",
1826                                            sizeof(struct mds_file_data),
1827                                            0, 0, NULL, NULL);
1828         if (mds_file_cache == NULL)
1829                 return -ENOMEM;
1830
1831         class_register_type(&mds_obd_ops, status_class_var, LUSTRE_MDS_NAME);
1832         class_register_type(&mdt_obd_ops, 0, LUSTRE_MDT_NAME);
1833         ldlm_register_intent(ldlm_intent_policy);
1834
1835         return 0;
1836 }
1837
1838 static void __exit mds_exit(void)
1839 {
1840         ldlm_unregister_intent();
1841         class_unregister_type(LUSTRE_MDS_NAME);
1842         class_unregister_type(LUSTRE_MDT_NAME);
1843         if (kmem_cache_destroy(mds_file_cache))
1844                 CERROR("couldn't free MDS file cache\n");
1845 }
1846
1847 MODULE_AUTHOR("Cluster File Systems <info@clusterfs.com>");
1848 MODULE_DESCRIPTION("Lustre Metadata Server (MDS) v0.01");
1849 MODULE_LICENSE("GPL");
1850
1851 module_init(mds_init);
1852 module_exit(mds_exit);