Whamcloud - gitweb
46eaee86fa0a47439ed925d93ee4d0a9b0cab3f6
[fs/lustre-release.git] / lustre / llite / rw.c
1 /*
2  * OBDFS Super operations
3  *
4  * This code is issued under the GNU General Public License.
5  * See the file COPYING in this distribution
6  *
7  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
8  * Copryright (C) 1999 Stelias Computing Inc, 
9  *                (author Peter J. Braam <braam@stelias.com>)
10  * Copryright (C) 1999 Seagate Technology Inc.
11 */
12
13
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/string.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/locks.h>
21 #include <linux/unistd.h>
22
23 #include <asm/system.h>
24 #include <asm/uaccess.h>
25
26 #include <linux/fs.h>
27 #include <linux/stat.h>
28 #include <asm/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <asm/segment.h>
31 #include <linux/mm.h>
32 #include <linux/pagemap.h>
33 #include <linux/smp_lock.h>
34
35 #include <linux/obd_support.h>
36 #include <linux/lustre_lib.h>
37 #include <linux/lustre_idl.h>
38 #include <linux/lustre_mds.h>
39 #include <linux/lustre_light.h>
40
41 void ll_change_inode(struct inode *inode);
42
43 static int cache_writes = 0;
44
45
46 /* page cache support stuff */ 
47
48
49 /*
50  * Add a page to the dirty page list.
51  */
52 void set_page_dirty(struct page *page)
53 {
54         if (!test_and_set_bit(PG_dirty, &page->flags)) {
55                 struct address_space *mapping = page->mapping;
56
57                 if (mapping) {
58                         spin_lock(&pagecache_lock);
59                         list_del(&page->list);
60                         list_add(&page->list, &mapping->dirty_pages);
61                         spin_unlock(&pagecache_lock);
62
63                         if (mapping->host)
64                                 mark_inode_dirty_pages(mapping->host);
65                 }
66         }
67 }
68
69 /*
70  * Remove page from dirty list
71  */
72 void __set_page_clean(struct page *page)
73 {
74         struct address_space *mapping = page->mapping;
75         struct inode *inode;
76         
77         if (!mapping)
78                 return;
79
80         spin_lock(&pagecache_lock);
81         list_del(&page->list);
82         list_add(&page->list, &mapping->clean_pages);
83
84         inode = mapping->host;
85         if (list_empty(&mapping->dirty_pages)) { 
86                 CDEBUG(D_INODE, "inode clean\n");
87                 inode->i_state &= ~I_DIRTY_PAGES;
88         }
89         spin_unlock(&pagecache_lock);
90         EXIT;
91 }
92
93 inline void set_page_clean(struct page *page)
94 {
95         if (PageDirty(page)) { 
96                 ClearPageDirty(page);
97                 __set_page_clean(page);
98         }
99 }
100
101 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated too */
102 static int ll_brw(int rw, struct inode *inode, struct page *page, int create)
103 {
104         obd_count        num_obdo = 1;
105         obd_count        bufs_per_obdo = 1;
106         struct obdo     *oa;
107         obd_size         count = PAGE_SIZE;
108         obd_off          offset = ((obd_off)page->index) << PAGE_SHIFT;
109         obd_flag         flags = create ? OBD_BRW_CREATE : 0;
110         int              err;
111
112         ENTRY;
113
114         oa = obdo_alloc();
115         if ( !oa ) {
116                 EXIT;
117                 return -ENOMEM;
118         }
119         oa->o_valid = OBD_MD_FLNOTOBD;
120         ll_from_inode(oa, inode);
121
122         err = obd_brw(rw, IID(inode), num_obdo, &oa, &bufs_per_obdo,
123                                &page, &count, &offset, &flags);
124         //if ( !err )
125         //      ll_to_inode(inode, oa); /* copy o_blocks to i_blocks */
126
127         obdo_free(oa);
128         EXIT;
129         return err;
130 } /* ll_brw */
131
132 extern void set_page_clean(struct page *);
133
134 /* SYNCHRONOUS I/O to object storage for an inode -- object attr will be updated too */
135 static int ll_commit_page(struct page *page, int create, int from, int to)
136 {
137         struct inode *inode = page->mapping->host;
138         obd_count        num_obdo = 1;
139         obd_count        bufs_per_obdo = 1;
140         struct obdo     *oa;
141         obd_size         count = to;
142         obd_off          offset = (((obd_off)page->index) << PAGE_SHIFT);
143         obd_flag         flags = create ? OBD_BRW_CREATE : 0;
144         int              err;
145
146         ENTRY;
147         oa = obdo_alloc();
148         if ( !oa ) {
149                 EXIT;
150                 return -ENOMEM;
151         }
152         oa->o_valid = OBD_MD_FLNOTOBD;
153         ll_from_inode(oa, inode);
154
155         CDEBUG(D_INODE, "commit_page writing (at %d) to %d, count %Ld\n", 
156                from, to, count);
157
158         err = obd_brw(WRITE, IID(inode), num_obdo, &oa, &bufs_per_obdo,
159                                &page, &count, &offset, &flags);
160         if ( !err ) {
161                 SetPageUptodate(page);
162                 set_page_clean(page);
163         }
164
165         //if ( !err )
166         //      ll_to_inode(inode, oa); /* copy o_blocks to i_blocks */
167
168         obdo_free(oa);
169         EXIT;
170         return err;
171 } /* ll_brw */
172
173
174 /* returns the page unlocked, but with a reference */
175 int ll_readpage(struct file *file, struct page *page)
176 {
177         struct inode *inode = page->mapping->host;
178         int rc;
179
180         ENTRY;
181
182         if ( ((inode->i_size + PAGE_CACHE_SIZE -1)>>PAGE_SHIFT) 
183              <= page->index) {
184                 memset(kmap(page), 0, PAGE_CACHE_SIZE);
185                 kunmap(page);
186                 goto readpage_out;
187         }
188
189         if (Page_Uptodate(page)) {
190                 EXIT;
191                 goto readpage_out;
192         }
193
194         rc = ll_brw(READ, inode, page, 0);
195         if ( rc ) {
196                 EXIT; 
197                 return rc;
198         } 
199         /* PDEBUG(page, "READ"); */
200
201  readpage_out:
202         SetPageUptodate(page);
203         obd_unlock_page(page);
204         EXIT;
205         return 0;
206 } /* ll_readpage */
207
208
209
210 /* returns the page unlocked, but with a reference */
211 int ll_dir_readpage(struct file *file, struct page *page)
212 {
213         struct inode *inode = page->mapping->host;
214         char *buf;
215         __u64 offset;
216         int rc = 0;
217         struct mds_rep_hdr *hdr;
218
219         ENTRY;
220
221         if ( ((inode->i_size + PAGE_CACHE_SIZE -1)>>PAGE_SHIFT) 
222              <= page->index) {
223                 memset(kmap(page), 0, PAGE_CACHE_SIZE);
224                 kunmap(page);
225                 goto readpage_out;
226         }
227
228         if (Page_Uptodate(page)) {
229                 EXIT;
230                 goto readpage_out;
231         }
232
233         offset = page->index << PAGE_SHIFT; 
234         buf = kmap(page);
235         rc = mdc_readpage(inode->i_ino, S_IFDIR, offset, buf, NULL, &hdr);
236         kunmap(buff); 
237         if ( rc ) {
238                 EXIT; 
239                 goto readpage_out;
240         } 
241
242         if ((rc = hdr->status)) {
243                 EXIT;
244                 goto readpage_out;
245         }
246
247         /* PDEBUG(page, "READ"); */
248
249         SetPageUptodate(page);
250  readpage_out:
251         unlock_page(page);
252         EXIT;
253         return rc;
254 } /* ll_dir_readpage */
255
256 int ll_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
257 {
258         struct inode *inode = page->mapping->host;
259         obd_off offset = ((obd_off)page->index) << PAGE_SHIFT;
260         int rc = 0;
261         ENTRY; 
262         
263         kmap(page);
264         if (Page_Uptodate(page)) { 
265                 EXIT;
266                 goto prepare_done;
267         }
268
269         if ( (from <= offset) && (to >= offset + PAGE_SIZE) ) {
270                 EXIT;
271                 return 0;
272         }
273         
274         rc = ll_brw(READ, inode, page, 0);
275         if ( !rc ) {
276                 SetPageUptodate(page);
277         } 
278
279  prepare_done:
280         set_page_dirty(page);
281         //SetPageDirty(page);
282         EXIT;
283         return rc;
284 }
285
286
287
288
289
290
291 static kmem_cache_t *ll_pgrq_cachep = NULL;
292
293 int ll_init_pgrqcache(void)
294 {
295         ENTRY;
296         if (ll_pgrq_cachep == NULL) {
297                 CDEBUG(D_CACHE, "allocating ll_pgrq_cache\n");
298                 ll_pgrq_cachep = kmem_cache_create("ll_pgrq",
299                                                       sizeof(struct ll_pgrq),
300                                                       0, SLAB_HWCACHE_ALIGN,
301                                                       NULL, NULL);
302                 if (ll_pgrq_cachep == NULL) {
303                         EXIT;
304                         return -ENOMEM;
305                 } else {
306                         CDEBUG(D_CACHE, "allocated cache at %p\n",
307                                ll_pgrq_cachep);
308                 }
309         } else {
310                 CDEBUG(D_CACHE, "using existing cache at %p\n",
311                        ll_pgrq_cachep);
312         }
313         EXIT;
314         return 0;
315 } /* ll_init_wreqcache */
316
317 inline void ll_pgrq_del(struct ll_pgrq *pgrq)
318 {
319         --ll_cache_count;
320         CDEBUG(D_INFO, "deleting page %p from list [count %ld]\n",
321                pgrq->rq_page, ll_cache_count);
322         list_del(&pgrq->rq_plist);
323         OBDClearCachePage(pgrq->rq_page);
324         kmem_cache_free(ll_pgrq_cachep, pgrq);
325 }
326
327 void ll_cleanup_pgrqcache(void)
328 {
329         ENTRY;
330         if (ll_pgrq_cachep != NULL) {
331                 CDEBUG(D_CACHE, "destroying ll_pgrqcache at %p, count %ld\n",
332                        ll_pgrq_cachep, ll_cache_count);
333                 if (kmem_cache_destroy(ll_pgrq_cachep))
334                         printk(KERN_INFO __FUNCTION__
335                                ": unable to free all of cache\n");
336                 ll_pgrq_cachep = NULL;
337         } else
338                 printk(KERN_INFO __FUNCTION__ ": called with NULL pointer\n");
339
340         EXIT;
341 } /* ll_cleanup_wreqcache */
342
343
344 /* called with the list lock held */
345 static struct page *ll_find_page_index(struct inode *inode,
346                                           unsigned long index)
347 {
348         struct list_head *page_list = ll_iplist(inode);
349         struct list_head *tmp;
350         struct page *page;
351
352         ENTRY;
353
354         CDEBUG(D_INFO, "looking for inode %ld pageindex %ld\n",
355                inode->i_ino, index);
356         OIDEBUG(inode);
357
358         if (list_empty(page_list)) {
359                 EXIT;
360                 return NULL;
361         }
362         tmp = page_list;
363         while ( (tmp = tmp->next) != page_list ) {
364                 struct ll_pgrq *pgrq;
365
366                 pgrq = list_entry(tmp, struct ll_pgrq, rq_plist);
367                 page = pgrq->rq_page;
368                 if (index == page->index) {
369                         CDEBUG(D_INFO,
370                                "INDEX SEARCH found page %p, index %ld\n",
371                                page, index);
372                         EXIT;
373                         return page;
374                 }
375         } 
376
377         EXIT;
378         return NULL;
379 } /* ll_find_page_index */
380
381
382 /* call and free pages from Linux page cache: called with io lock on inodes */
383 int ll_do_vec_wr(struct inode **inodes, obd_count num_io,
384                     obd_count num_obdos, struct obdo **obdos,
385                     obd_count *oa_bufs, struct page **pages, char **bufs,
386                     obd_size *counts, obd_off *offsets, obd_flag *flags)
387 {
388         int err;
389
390         ENTRY;
391
392         CDEBUG(D_INFO, "writing %d page(s), %d obdo(s) in vector\n",
393                num_io, num_obdos);
394         if (obd_debug_level & D_INFO) { /* DEBUGGING */
395                 int i;
396                 printk("OBDOS: ");
397                 for (i = 0; i < num_obdos; i++)
398                         printk("%ld:0x%p ", (long)obdos[i]->o_id, obdos[i]);
399
400                 printk("\nPAGES: ");
401                 for (i = 0; i < num_io; i++)
402                         printk("0x%p ", pages[i]);
403                 printk("\n");
404         }
405
406         err = obd_brw(WRITE, IID(inodes[0]), num_obdos, obdos,
407                                   oa_bufs, pages, counts, offsets, flags);
408
409         CDEBUG(D_INFO, "BRW done\n");
410         /* release the pages from the page cache */
411         while ( num_io > 0 ) {
412                 --num_io;
413                 CDEBUG(D_INFO, "calling put_page for %p, index %ld\n",
414                        pages[num_io], pages[num_io]->index);
415                 /* PDEBUG(pages[num_io], "do_vec_wr"); */
416                 put_page(pages[num_io]);
417                 /* PDEBUG(pages[num_io], "do_vec_wr"); */
418         }
419         CDEBUG(D_INFO, "put_page done\n");
420
421         while ( num_obdos > 0) {
422                 --num_obdos;
423                 CDEBUG(D_INFO, "free obdo %ld\n",(long)obdos[num_obdos]->o_id);
424                 /* copy o_blocks to i_blocks */
425                 ll_set_size (inodes[num_obdos], obdos[num_obdos]->o_size);
426                 //ll_to_inode(inodes[num_obdos], obdos[num_obdos]);
427                 obdo_free(obdos[num_obdos]);
428         }
429         CDEBUG(D_INFO, "obdo_free done\n");
430         EXIT;
431         return err;
432 }
433
434
435 /*
436  * Add a page to the write request cache list for later writing.
437  * ASYNCHRONOUS write method.
438  */
439 static int ll_add_page_to_cache(struct inode *inode, struct page *page)
440 {
441         int err = 0;
442         ENTRY;
443
444         /* The PG_obdcache bit is cleared by ll_pgrq_del() BEFORE the page
445          * is written, so at worst we will write the page out twice.
446          *
447          * If the page has the PG_obdcache bit set, then the inode MUST be
448          * on the superblock dirty list so we don't need to check this.
449          * Dirty inodes are removed from the superblock list ONLY when they
450          * don't have any more cached pages.  It is possible to have an inode
451          * with no dirty pages on the superblock list, but not possible to
452          * have an inode with dirty pages NOT on the superblock dirty list.
453          */
454         if (!OBDAddCachePage(page)) {
455                 struct ll_pgrq *pgrq;
456                 pgrq = kmem_cache_alloc(ll_pgrq_cachep, SLAB_KERNEL);
457                 if (!pgrq) {
458                         OBDClearCachePage(page);
459                         EXIT;
460                         return -ENOMEM;
461                 }
462                 /* not really necessary since we set all pgrq fields here
463                 memset(pgrq, 0, sizeof(*pgrq)); 
464                 */
465                 
466                 pgrq->rq_page = page;
467                 pgrq->rq_jiffies = jiffies;
468                 get_page(pgrq->rq_page);
469
470                 obd_down(&ll_i2sbi(inode)->ll_list_mutex);
471                 list_add(&pgrq->rq_plist, ll_iplist(inode));
472                 ll_cache_count++;
473                 //printk("-- count %d\n", ll_cache_count);
474
475                 /* If inode isn't already on superblock inodes list, add it.
476                  *
477                  * We increment the reference count on the inode to keep it
478                  * from being freed from memory.  This _should_ be an iget()
479                  * with an iput() in both flush_reqs() and put_inode(), but
480                  * since put_inode() is called from iput() we can't call iput()
481                  * again there.  Instead we just increment/decrement i_count,
482                  * which is mostly what iget/iput do for an inode in memory.
483                  */
484                 if ( list_empty(ll_islist(inode)) ) {
485                         atomic_inc(&inode->i_count);
486                         CDEBUG(D_INFO,
487                                "adding inode %ld to superblock list %p\n",
488                                inode->i_ino, ll_slist(inode));
489                         list_add(ll_islist(inode), ll_slist(inode));
490                 }
491                 obd_up(&ll_i2sbi(inode)->ll_list_mutex);
492
493         }
494
495         /* XXX For testing purposes, we can write out the page here.
496         err = ll_flush_reqs(ll_slist(inode), ~0UL);
497          */
498
499         EXIT;
500         return err;
501 } /* ll_add_page_to_cache */
502
503 void rebalance(void)
504 {
505         if (ll_cache_count > 60000) {
506                 printk("-- count %ld\n", ll_cache_count);
507                 //ll_flush_dirty_pages(~0UL);
508                 printk("-- count %ld\n", ll_cache_count);
509         }
510 }
511
512 /* select between SYNC and ASYNC I/O methods */
513 int ll_do_writepage(struct page *page, int sync)
514 {
515         struct inode *inode = page->mapping->host;
516         int err;
517
518         ENTRY;
519         /* PDEBUG(page, "WRITEPAGE"); */
520         if ( sync )
521                 err = ll_brw(WRITE, inode, page, 1);
522         else {
523                 err = ll_add_page_to_cache(inode, page);
524                 CDEBUG(D_INFO, "DO_WR ino: %ld, page %p, err %d, uptodate %d\n",
525                        inode->i_ino, page, err, Page_Uptodate(page));
526         }
527                 
528         if ( !err ) {
529                 SetPageUptodate(page);
530                 set_page_clean(page);
531         }
532         /* PDEBUG(page,"WRITEPAGE"); */
533         EXIT;
534         return err;
535 } /* ll_do_writepage */
536
537
538
539 /* returns the page unlocked, but with a reference */
540 int ll_writepage(struct page *page)
541 {
542         int rc;
543         struct inode *inode = page->mapping->host;
544         ENTRY;
545         printk("---> writepage called ino %ld!\n", inode->i_ino);
546         BUG();
547         rc = ll_do_writepage(page, 1);
548         if ( !rc ) {
549                 set_page_clean(page);
550         } else {
551                 CDEBUG(D_INODE, "--> GRR %d\n", rc);
552         }
553         EXIT;
554         return rc;
555 }
556
557 void write_inode_pages(struct inode *inode)
558 {
559         struct list_head *tmp = &inode->i_mapping->dirty_pages;
560         
561         while ( (tmp = tmp->next) != &inode->i_mapping->dirty_pages) { 
562                 struct page *page;
563                 page = list_entry(tmp, struct page, list);
564                 ll_writepage(page);
565         }
566 }
567
568
569 int ll_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
570 {
571         struct inode *inode = page->mapping->host;
572         int rc = 0;
573         loff_t len = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
574         ENTRY;
575         CDEBUG(D_INODE, "commit write ino %ld (end at %Ld) from %d to %d ,ind %ld\n",
576                inode->i_ino, len, from, to, page->index);
577
578
579         if (cache_writes == 0) { 
580                 rc = ll_commit_page(page, 1, from, to);
581         }
582
583         if (len > inode->i_size) {
584                 ll_set_size(inode, len);
585         }
586
587         kunmap(page);
588         EXIT;
589         return rc;
590 }
591
592
593 /*
594  * This does the "real" work of the write. The generic routine has
595  * allocated the page, locked it, done all the page alignment stuff
596  * calculations etc. Now we should just copy the data from user
597  * space and write it back to the real medium..
598  *
599  * If the writer ends up delaying the write, the writer needs to
600  * increment the page use counts until he is done with the page.
601  *
602  * Return value is the number of bytes written.
603  */
604 int ll_write_one_page(struct file *file, struct page *page,
605                          unsigned long offset, unsigned long bytes,
606                          const char * buf)
607 {
608         struct inode *inode = file->f_dentry->d_inode;
609         int err;
610
611         ENTRY;
612         /* We check for complete page writes here, as we then don't have to
613          * get the page before writing over everything anyways.
614          */
615         if ( !Page_Uptodate(page) && (offset != 0 || bytes != PAGE_SIZE) ) {
616                 err = ll_brw(READ, inode, page, 0);
617                 if ( err )
618                         return err;
619                 SetPageUptodate(page);
620         }
621
622         if (copy_from_user((u8*)page_address(page) + offset, buf, bytes))
623                 return -EFAULT;
624
625         lock_kernel();
626         err = ll_writepage(page);
627         unlock_kernel();
628
629         return (err < 0 ? err : bytes);
630 } /* ll_write_one_page */
631
632 /* 
633  * return an up to date page:
634  *  - if locked is true then is returned locked
635  *  - if create is true the corresponding disk blocks are created 
636  *  - page is held, i.e. caller must release the page
637  *
638  * modeled on NFS code.
639  */
640 struct page *ll_getpage(struct inode *inode, unsigned long offset,
641                            int create, int locked)
642 {
643         struct page * page;
644         int index;
645         int err;
646
647         ENTRY;
648
649         offset = offset & PAGE_CACHE_MASK;
650         CDEBUG(D_INFO, "ino: %ld, offset %ld, create %d, locked %d\n",
651                inode->i_ino, offset, create, locked);
652         index = offset >> PAGE_CACHE_SHIFT;
653
654         page = grab_cache_page(&inode->i_data, index);
655
656         /* Yuck, no page */
657         if (! page) {
658             printk(KERN_WARNING " grab_cache_page says no dice ...\n");
659             EXIT;
660             return NULL;
661         }
662
663         /* PDEBUG(page, "GETPAGE: got page - before reading\n"); */
664         /* now check if the data in the page is up to date */
665         if ( Page_Uptodate(page)) { 
666                 if (!locked) {
667                         if (PageLocked(page))
668                                 obd_unlock_page(page);
669                 } else {
670                         printk("file %s, line %d: expecting locked page\n",
671                                __FILE__, __LINE__); 
672                 }
673                 EXIT;
674                 return page;
675         } 
676
677
678 #ifdef EXT2_OBD_DEBUG
679         if ((obd_debug_level & D_INFO) && ll_find_page_index(inode, index)) {
680                 CDEBUG(D_INFO, "OVERWRITE: found dirty page %p, index %ld\n",
681                        page, page->index);
682         }
683 #endif
684
685         err = ll_brw(READ, inode, page, create);
686
687         if ( err ) {
688                 SetPageError(page);
689                 obd_unlock_page(page);
690                 EXIT;
691                 return page;
692         }
693
694         if ( !locked )
695                 obd_unlock_page(page);
696         SetPageUptodate(page);
697         /* PDEBUG(page,"GETPAGE - after reading"); */
698         EXIT;
699         return page;
700 } /* ll_getpage */
701
702
703 void ll_truncate(struct inode *inode)
704 {
705         struct obdo *oa;
706         int err;
707         ENTRY;
708
709         //ll_dequeue_pages(inode);
710
711         oa = obdo_alloc();
712         if ( !oa ) {
713                 /* XXX This would give an inconsistent FS, so deal with it as
714                  * best we can for now - an obdo on the stack is not pretty.
715                  */
716                 struct obdo obdo;
717
718                 printk(__FUNCTION__ ": obdo_alloc failed - using stack!\n");
719
720                 obdo.o_valid = OBD_MD_FLNOTOBD;
721                 ll_from_inode(&obdo, inode);
722
723                 err = obd_punch(IID(inode), &obdo, 0, obdo.o_size);
724         } else {
725                 oa->o_valid = OBD_MD_FLNOTOBD;
726                 ll_from_inode(oa, inode);
727
728                 CDEBUG(D_INFO, "calling punch for %ld (%Lu bytes at 0)\n",
729                        (long)oa->o_id, oa->o_size);
730                 err = obd_punch(IID(inode), oa, oa->o_size, 0);
731
732                 obdo_free(oa);
733         }
734
735         if (err) {
736                 printk(__FUNCTION__ ": obd_truncate fails (%d)\n", err);
737                 EXIT;
738                 return;
739         }
740         EXIT;
741 } /* ll_truncate */
742
743 struct address_space_operations ll_aops = {
744         readpage: ll_readpage,
745         writepage: ll_writepage,
746         sync_page: block_sync_page,
747         prepare_write: ll_prepare_write, 
748         commit_write: ll_commit_write,
749         bmap: NULL
750 };
751
752
753 struct address_space_operations ll_dir_aops = {
754         readpage: ll_dir_readpage
755 };