Whamcloud - gitweb
MDS wasn't handling refcount on vfsmount's correctly,
[fs/lustre-release.git] / lustre / mds / handler.c
1 /*
2  *  linux/mds/handler.c
3  *  
4  *  Lustre Metadata Server (mds) request handler
5  * 
6  *  Copyright (C) 2001, 2002 Cluster File Systems, Inc.
7  *
8  *  This code is issued under the GNU General Public License.
9  *  See the file COPYING in this distribution
10  *
11  *  by Peter Braam <braam@clusterfs.com>
12  * 
13  *  This server is single threaded at present (but can easily be multi threaded). 
14  * 
15  */
16
17
18 #define EXPORT_SYMTAB
19
20 #include <linux/version.h>
21 #include <linux/module.h>
22 #include <linux/fs.h>
23 #include <linux/stat.h>
24 #include <linux/locks.h>
25 #include <linux/ext2_fs.h>
26 #include <linux/quotaops.h>
27 #include <asm/unistd.h>
28 #include <asm/uaccess.h>
29 #include <linux/obd_support.h>
30 #include <linux/obd.h>
31 #include <linux/lustre_lib.h>
32 #include <linux/lustre_idl.h>
33 #include <linux/lustre_mds.h>
34 #include <linux/lustre_net.h>
35 #include <linux/obd_class.h>
36
37 // XXX for testing
38 static struct mds_obd *MDS;
39
40 // XXX make this networked!  
41 static int mds_queue_req(struct ptlrpc_request *req)
42 {
43         struct ptlrpc_request *srv_req;
44         
45         if (!MDS) { 
46                 EXIT;
47                 return -1;
48         }
49
50         OBD_ALLOC(srv_req, sizeof(*srv_req));
51         if (!srv_req) { 
52                 EXIT;
53                 return -ENOMEM;
54         }
55
56         printk("---> MDS at %d %p, incoming req %p, srv_req %p\n", 
57                __LINE__, MDS, req, srv_req);
58
59         memset(srv_req, 0, sizeof(*req)); 
60
61         /* move the request buffer */
62         srv_req->rq_reqbuf = req->rq_reqbuf;
63         srv_req->rq_reqlen = req->rq_reqlen;
64         srv_req->rq_obd = MDS;
65
66         /* remember where it came from */
67         srv_req->rq_reply_handle = req;
68
69         list_add(&srv_req->rq_list, &MDS->mds_reqs); 
70         wake_up(&MDS->mds_waitq);
71         return 0;
72 }
73
74 int mds_sendpage(struct ptlrpc_request *req, struct file *file, 
75                     __u64 offset, struct niobuf *dst)
76 {
77         int rc; 
78         mm_segment_t oldfs = get_fs();
79
80         if (req->rq_peer.peer_nid == 0) {
81                 /* dst->addr is a user address, but in a different task! */
82                 set_fs(KERNEL_DS); 
83                 rc = generic_file_read(file, (char *)(long)dst->addr, 
84                                        PAGE_SIZE, &offset); 
85                 set_fs(oldfs);
86
87                 if (rc != PAGE_SIZE) 
88                         return -EIO;
89         } else {
90                 char *buf;
91
92                 OBD_ALLOC(buf, PAGE_SIZE);
93                 if (!buf)
94                         return -ENOMEM;
95
96                 set_fs(KERNEL_DS); 
97                 rc = generic_file_read(file, buf, PAGE_SIZE, &offset); 
98                 set_fs(oldfs);
99
100                 if (rc != PAGE_SIZE) {
101                         OBD_FREE(buf, PAGE_SIZE);
102                         return -EIO;
103                 }
104
105                 req->rq_bulkbuf = buf;
106                 req->rq_bulklen = PAGE_SIZE;
107                 rc = ptl_send_buf(req, &req->rq_peer, MDS_BULK_PORTAL, 0);
108                 init_waitqueue_head(&req->rq_wait_for_bulk);
109                 sleep_on(&req->rq_wait_for_bulk);
110                 OBD_FREE(buf, PAGE_SIZE);
111                 req->rq_bulklen = 0; /* FIXME: eek. */
112         }
113
114         return 0;
115 }
116
117 int mds_reply(struct ptlrpc_request *req)
118 {
119         struct ptlrpc_request *clnt_req = req->rq_reply_handle;
120
121         ENTRY;
122         
123         if (req->rq_obd->mds_service != NULL) {
124                 /* This is a request that came from the network via portals. */
125
126                 /* FIXME: we need to increment the count of handled events */
127                 ptl_send_buf(req, &req->rq_peer, MDS_REPLY_PORTAL, 0);
128         } else {
129                 /* This is a local request that came from another thread. */
130
131                 /* move the reply to the client */ 
132                 clnt_req->rq_replen = req->rq_replen;
133                 clnt_req->rq_repbuf = req->rq_repbuf;
134                 req->rq_repbuf = NULL;
135                 req->rq_replen = 0;
136
137                 /* free the request buffer */
138                 OBD_FREE(req->rq_reqbuf, req->rq_reqlen);
139                 req->rq_reqbuf = NULL;
140
141                 /* wake up the client */ 
142                 wake_up_interruptible(&clnt_req->rq_wait_for_rep); 
143         }
144
145         EXIT;
146         return 0;
147 }
148
149 int mds_error(struct ptlrpc_request *req)
150 {
151         struct ptlrep_hdr *hdr;
152
153         ENTRY;
154
155         OBD_ALLOC(hdr, sizeof(*hdr));
156         if (!hdr) { 
157                 EXIT;
158                 return -ENOMEM;
159         }
160
161         memset(hdr, 0, sizeof(*hdr));
162         
163         hdr->seqno = req->rq_reqhdr->seqno;
164         hdr->status = req->rq_status; 
165         hdr->type = MDS_TYPE_ERR;
166
167         req->rq_repbuf = (char *)hdr;
168         req->rq_replen = sizeof(*hdr); 
169
170         EXIT;
171         return mds_reply(req);
172 }
173
174 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
175                               struct vfsmount **mnt)
176 {
177         /* stolen from NFS */ 
178         struct super_block *sb = mds->mds_sb; 
179         unsigned long ino = fid->id;
180         //__u32 generation = fid->generation;
181         __u32 generation = 0;
182         struct inode *inode;
183         struct list_head *lp;
184         struct dentry *result;
185
186         if (ino == 0)
187                 return ERR_PTR(-ESTALE);
188
189         inode = iget(sb, ino);
190         if (inode == NULL)
191                 return ERR_PTR(-ENOMEM);
192
193         printk("--> mds_fid2dentry: sb %p\n", inode->i_sb); 
194
195         if (is_bad_inode(inode)
196             || (generation && inode->i_generation != generation)
197                 ) {
198                 /* we didn't find the right inode.. */
199                 printk(__FUNCTION__ 
200                        "bad inode %lu, link: %d ct: %d or version  %u/%u\n",
201                         inode->i_ino,
202                         inode->i_nlink, atomic_read(&inode->i_count),
203                         inode->i_generation,
204                         generation);
205                 iput(inode);
206                 return ERR_PTR(-ESTALE);
207         }
208
209         /* now to find a dentry.
210          * If possible, get a well-connected one
211          */
212         if (mnt)
213                 *mnt = mds->mds_vfsmnt;
214         spin_lock(&dcache_lock);
215         for (lp = inode->i_dentry.next; lp != &inode->i_dentry ; lp=lp->next) {
216                 result = list_entry(lp,struct dentry, d_alias);
217                 if (! (result->d_flags & DCACHE_NFSD_DISCONNECTED)) {
218                         dget_locked(result);
219                         result->d_vfs_flags |= DCACHE_REFERENCED;
220                         spin_unlock(&dcache_lock);
221                         iput(inode);
222                         if (mnt)
223                                 mntget(*mnt);
224                         return result;
225                 }
226         }
227         spin_unlock(&dcache_lock);
228         result = d_alloc_root(inode);
229         if (result == NULL) {
230                 iput(inode);
231                 return ERR_PTR(-ENOMEM);
232         }
233         if (mnt)
234                 mntget(*mnt);
235         result->d_flags |= DCACHE_NFSD_DISCONNECTED;
236         return result;
237 }
238
239 static inline void mds_get_objid(struct inode *inode, __u64 *id)
240 {
241         memcpy(id, &inode->u.ext2_i.i_data, sizeof(*id));
242 }
243
244 int mds_getattr(struct ptlrpc_request *req)
245 {
246         struct dentry *de;
247         struct inode *inode;
248         struct mds_rep *rep;
249         int rc;
250         
251         rc = mds_pack_rep(NULL, 0, NULL, 0, &req->rq_rephdr, &req->rq_rep.mds, 
252                           &req->rq_replen, &req->rq_repbuf);
253         if (rc) { 
254                 EXIT;
255                 printk("mds: out of memory\n");
256                 req->rq_status = -ENOMEM;
257                 return 0;
258         }
259
260         req->rq_rephdr->seqno = req->rq_reqhdr->seqno;
261         rep = req->rq_rep.mds;
262
263         de = mds_fid2dentry(req->rq_obd, &req->rq_req.mds->fid1, NULL);
264         if (IS_ERR(de)) { 
265                 EXIT;
266                 req->rq_rephdr->status = -ENOENT;
267                 return 0;
268         }
269
270         inode = de->d_inode;
271         rep->ino = inode->i_ino;
272         rep->atime = inode->i_atime;
273         rep->ctime = inode->i_ctime;
274         rep->mtime = inode->i_mtime;
275         rep->uid = inode->i_uid;
276         rep->gid = inode->i_gid;
277         rep->size = inode->i_size;
278         rep->mode = inode->i_mode;
279         rep->nlink = inode->i_nlink;
280         rep->valid = ~0;
281         mds_get_objid(inode, &rep->objid);
282         dput(de); 
283         return 0;
284 }
285
286 int mds_readpage(struct ptlrpc_request *req)
287 {
288         struct vfsmount *mnt;
289         struct dentry *de;
290         struct file *file; 
291         struct niobuf *niobuf; 
292         struct mds_rep *rep;
293         int rc;
294         
295         rc = mds_pack_rep(NULL, 0, NULL, 0, &req->rq_rephdr, &req->rq_rep.mds, 
296                           &req->rq_replen, &req->rq_repbuf);
297         if (rc) { 
298                 EXIT;
299                 printk("mds: out of memory\n");
300                 req->rq_status = -ENOMEM;
301                 return 0;
302         }
303
304         req->rq_rephdr->seqno = req->rq_reqhdr->seqno;
305         rep = req->rq_rep.mds;
306
307         de = mds_fid2dentry(req->rq_obd, &req->rq_req.mds->fid1, &mnt);
308         if (IS_ERR(de)) { 
309                 EXIT;
310                 req->rq_rephdr->status = PTR_ERR(de); 
311                 return 0;
312         }
313
314         printk("mds_readpage: ino %ld\n", de->d_inode->i_ino);
315
316         file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE); 
317         /* note: in case of an error, dentry_open puts dentry */
318         if (IS_ERR(file)) { 
319                 EXIT;
320                 req->rq_rephdr->status = PTR_ERR(file);
321                 return 0;
322         }
323                 
324         niobuf = mds_req_tgt(req->rq_req.mds);
325
326         /* to make this asynchronous make sure that the handling function 
327            doesn't send a reply when this function completes. Instead a 
328            callback function would send the reply */ 
329         rc = mds_sendpage(req, file, req->rq_req.mds->size, niobuf); 
330
331         filp_close(file, 0);
332         req->rq_rephdr->status = rc;
333         EXIT;
334         return 0;
335 }
336
337 int mds_reint(struct ptlrpc_request *req)
338 {
339         int rc;
340         char *buf = mds_req_tgt(req->rq_req.mds);
341         int len = req->rq_req.mds->tgtlen;
342         struct mds_update_record rec;
343         
344         rc = mds_update_unpack(buf, len, &rec);
345         if (rc) { 
346                 printk(__FUNCTION__ ": invalid record\n");
347                 req->rq_status = -EINVAL;
348                 return 0;
349         }
350         /* rc will be used to interrupt a for loop over multiple records */
351         rc = mds_reint_rec(&rec, req); 
352         return 0; 
353 }
354
355 //int mds_handle(struct mds_conn *conn, int len, char *buf)
356 int mds_handle(struct ptlrpc_request *req)
357 {
358         int rc;
359         struct ptlreq_hdr *hdr;
360
361         ENTRY;
362
363         hdr = (struct ptlreq_hdr *)req->rq_reqbuf;
364
365         if (NTOH__u32(hdr->type) != MDS_TYPE_REQ) {
366                 printk("lustre_mds: wrong packet type sent %d\n",
367                        NTOH__u32(hdr->type));
368                 rc = -EINVAL;
369                 goto out;
370         }
371
372         rc = mds_unpack_req(req->rq_reqbuf, req->rq_reqlen, 
373                             &req->rq_reqhdr, &req->rq_req.mds);
374         if (rc) { 
375                 printk("lustre_mds: Invalid request\n");
376                 EXIT; 
377                 goto out;
378         }
379
380         switch (req->rq_reqhdr->opc) { 
381
382         case MDS_GETATTR:
383                 CDEBUG(D_INODE, "getattr\n");
384                 rc = mds_getattr(req);
385                 break;
386
387         case MDS_READPAGE:
388                 CDEBUG(D_INODE, "readpage\n");
389                 rc = mds_readpage(req);
390                 break;
391
392         case MDS_REINT:
393                 CDEBUG(D_INODE, "reint\n");
394                 rc = mds_reint(req);
395                 break;
396
397         default:
398                 return mds_error(req);
399         }
400
401 out:
402         if (rc) { 
403                 printk(__FUNCTION__ ": no header\n");
404                 return 0;
405         }
406
407         if( req->rq_status) { 
408                 mds_error(req);
409         } else { 
410                 CDEBUG(D_INODE, "sending reply\n"); 
411                 mds_reply(req); 
412         }
413
414         return 0;
415 }
416
417
418 static void mds_timer_run(unsigned long __data)
419 {
420         struct task_struct * p = (struct task_struct *) __data;
421
422         wake_up_process(p);
423 }
424
425 int mds_main(void *arg)
426 {
427         struct mds_obd *mds = (struct mds_obd *) arg;
428         struct timer_list timer;
429
430         lock_kernel();
431         daemonize();
432         spin_lock_irq(&current->sigmask_lock);
433         sigfillset(&current->blocked);
434         recalc_sigpending(current);
435         spin_unlock_irq(&current->sigmask_lock);
436
437         sprintf(current->comm, "lustre_mds");
438
439         /* Set up an interval timer which can be used to trigger a
440            wakeup after the interval expires */
441         init_timer(&timer);
442         timer.data = (unsigned long) current;
443         timer.function = mds_timer_run;
444         mds->mds_timer = &timer;
445
446         /* Record that the  thread is running */
447         mds->mds_thread = current;
448         wake_up(&mds->mds_done_waitq); 
449
450         printk(KERN_INFO "lustre_mds starting.  Commit interval %d seconds\n",
451                         mds->mds_interval / HZ);
452
453         /* XXX maintain a list of all managed devices: insert here */
454
455         /* And now, wait forever for commit wakeup events. */
456         while (1) {
457                 int rc;
458
459                 if (mds->mds_flags & MDS_UNMOUNT)
460                         break;
461
462                 wake_up(&mds->mds_done_waitq);
463                 interruptible_sleep_on(&mds->mds_waitq);
464
465                 CDEBUG(D_INODE, "lustre_mds wakes\n");
466                 CDEBUG(D_INODE, "pick up req here and continue\n"); 
467
468                 if (mds->mds_service != NULL) {
469                         ptl_event_t ev;
470
471                         while (1) {
472                                 struct ptlrpc_request request;
473                                 struct ptlrpc_service *service;
474
475                                 rc = PtlEQGet(mds->mds_service->srv_eq_h, &ev);
476                                 if (rc != PTL_OK && rc != PTL_EQ_DROPPED)
477                                         break;
478                                 
479                                 service = (struct ptlrpc_service *)ev.mem_desc.user_ptr;        
480
481                                 /* FIXME: If we move to an event-driven model,
482                                  * we should put the request on the stack of
483                                  * mds_handle instead. */
484                                 memset(&request, 0, sizeof(request));
485                                 request.rq_reqbuf = ev.mem_desc.start +
486                                         ev.offset;
487                                 request.rq_reqlen = ev.mem_desc.length;
488                                 request.rq_obd = MDS;
489                                 request.rq_xid = ev.match_bits;
490
491                                 request.rq_peer.peer_nid = ev.initiator.nid;
492                                 /* FIXME: this NI should be the incoming NI.
493                                  * We don't know how to find that from here. */
494                                 request.rq_peer.peer_ni =
495                                         mds->mds_service->srv_self.peer_ni;
496                                 rc = mds_handle(&request);
497
498                                 /* Inform the rpc layer the event has been handled */ 
499                                 ptl_received_rpc(service);
500                         }
501                 } else {
502                         struct ptlrpc_request *request;
503
504                         if (list_empty(&mds->mds_reqs)) {
505                                 CDEBUG(D_INODE, "woke because of timer\n");
506                         } else {
507                                 request = list_entry(mds->mds_reqs.next,
508                                                      struct ptlrpc_request,
509                                                      rq_list);
510                                 list_del(&request->rq_list);
511                                 rc = mds_handle(request);
512                         }
513                 }
514         }
515
516         del_timer_sync(mds->mds_timer);
517
518         /* XXX maintain a list of all managed devices: cleanup here */
519
520         mds->mds_thread = NULL;
521         wake_up(&mds->mds_done_waitq);
522         printk("lustre_mds: exiting\n");
523         return 0;
524 }
525
526 static void mds_stop_srv_thread(struct mds_obd *mds)
527 {
528         mds->mds_flags |= MDS_UNMOUNT;
529
530         while (mds->mds_thread) {
531                 wake_up(&mds->mds_waitq);
532                 sleep_on(&mds->mds_done_waitq);
533         }
534 }
535
536 static void mds_start_srv_thread(struct mds_obd *mds)
537 {
538         init_waitqueue_head(&mds->mds_waitq);
539         init_waitqueue_head(&mds->mds_done_waitq);
540         kernel_thread(mds_main, (void *)mds, 
541                       CLONE_VM | CLONE_FS | CLONE_FILES);
542         while (!mds->mds_thread) 
543                 sleep_on(&mds->mds_done_waitq);
544 }
545
546 /* mount the file system (secretly) */
547 static int mds_setup(struct obd_device *obddev, obd_count len,
548                         void *buf)
549                         
550 {
551         struct obd_ioctl_data* data = buf;
552         struct mds_obd *mds = &obddev->u.mds;
553         struct vfsmount *mnt;
554         struct lustre_peer peer;
555         int err; 
556         ENTRY;
557
558
559         mnt = do_kern_mount(data->ioc_inlbuf2, 0, data->ioc_inlbuf1, NULL); 
560         err = PTR_ERR(mnt);
561         if (IS_ERR(mnt)) { 
562                 EXIT;
563                 return err;
564         }
565
566         mds->mds_sb = mnt->mnt_root->d_inode->i_sb;
567         if (!obddev->u.mds.mds_sb) {
568                 EXIT;
569                 return -ENODEV;
570         }
571
572         mds->mds_vfsmnt = mnt;
573         obddev->u.mds.mds_fstype = strdup(data->ioc_inlbuf2);
574
575         mds->mds_ctxt.pwdmnt = mnt;
576         mds->mds_ctxt.pwd = mnt->mnt_root;
577         mds->mds_ctxt.fs = KERNEL_DS;
578         mds->mds_remote_nid = 0;
579
580         INIT_LIST_HEAD(&mds->mds_reqs);
581         mds->mds_thread = NULL;
582         mds->mds_flags = 0;
583         mds->mds_interval = 3 * HZ;
584         MDS = mds;
585
586         spin_lock_init(&obddev->u.mds.mds_lock);
587
588         err = kportal_uuid_to_peer("self", &peer);
589         if (err == 0) {
590                 OBD_ALLOC(mds->mds_service, sizeof(*mds->mds_service));
591                 if (mds->mds_service == NULL)
592                         return -ENOMEM;
593                 mds->mds_service->srv_buf_size = 64 * 1024;
594                 mds->mds_service->srv_portal = MDS_REQUEST_PORTAL;
595                 memcpy(&mds->mds_service->srv_self, &peer, sizeof(peer));
596                 mds->mds_service->srv_wait_queue = &mds->mds_waitq;
597
598                 rpc_register_service(mds->mds_service, "self");
599         }
600
601         mds_start_srv_thread(mds);
602
603         MOD_INC_USE_COUNT;
604         EXIT; 
605         return 0;
606
607
608 static int mds_cleanup(struct obd_device * obddev)
609 {
610         struct super_block *sb;
611         struct mds_obd *mds = &obddev->u.mds;
612
613         ENTRY;
614
615         if ( !(obddev->obd_flags & OBD_SET_UP) ) {
616                 EXIT;
617                 return 0;
618         }
619
620         if ( !list_empty(&obddev->obd_gen_clients) ) {
621                 printk(KERN_WARNING __FUNCTION__ ": still has clients!\n");
622                 EXIT;
623                 return -EBUSY;
624         }
625
626         MDS = NULL;
627         mds_stop_srv_thread(mds);
628         rpc_unregister_service(mds->mds_service);
629         OBD_FREE(mds->mds_service, sizeof(*mds->mds_service));
630
631         sb = mds->mds_sb;
632         if (!mds->mds_sb){
633                 EXIT;
634                 return 0;
635         }
636
637         if (!list_empty(&mds->mds_reqs)) {
638                 // XXX reply with errors and clean up
639                 CDEBUG(D_INODE, "Request list not empty!\n");
640         }
641
642         unlock_kernel();
643         mntput(mds->mds_vfsmnt); 
644         mds->mds_sb = 0;
645         kfree(mds->mds_fstype);
646         lock_kernel();
647
648         MOD_DEC_USE_COUNT;
649         EXIT;
650         return 0;
651 }
652
653 /* use obd ops to offer management infrastructure */
654 static struct obd_ops mds_obd_ops = {
655         o_setup:       mds_setup,
656         o_cleanup:     mds_cleanup,
657 };
658
659 static int __init mds_init(void)
660 {
661         obd_register_type(&mds_obd_ops, LUSTRE_MDS_NAME);
662         return 0;
663 }
664
665 static void __exit mds_exit(void)
666 {
667         obd_unregister_type(LUSTRE_MDS_NAME);
668 }
669
670 MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
671 MODULE_DESCRIPTION("Lustre Metadata Server (MDS) v0.01");
672 MODULE_LICENSE("GPL");
673
674
675 // for testing (maybe this stays)
676 EXPORT_SYMBOL(mds_queue_req);
677
678 module_init(mds_init);
679 module_exit(mds_exit);