1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/dir.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * ext2 directory handling functions
18 * Big-endian to little-endian byte-swapping/bitmaps by
19 * David S. Miller (davem@caip.rutgers.edu), 1995
21 * All code that works with directory layout had been switched to pagecache
24 * Adapted for Lustre Light
25 * Copyright (C) 2002, Cluster File Systems, Inc.
30 #include <linux/ext2_fs.h>
31 #include <linux/pagemap.h>
33 #include <linux/version.h>
34 #include <linux/smp_lock.h>
35 #include <asm/uaccess.h>
36 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
37 #include <linux/locks.h> // for wait_on_buffer
39 #include <linux/buffer_head.h> // for wait_on_buffer
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include <linux/obd_support.h>
45 #include <linux/obd_class.h>
46 #include <linux/lustre_lib.h>
47 #include <linux/lustre_idl.h>
48 #include <linux/lustre_mds.h>
49 #include <linux/lustre_lite.h>
50 #include <linux/lustre_dlm.h>
52 typedef struct ext2_dir_entry_2 ext2_dirent;
54 #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
55 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
58 static int ll_dir_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
63 /* returns the page unlocked, but with a reference */
64 static int ll_dir_readpage(struct file *file, struct page *page)
66 struct inode *inode = page->mapping->host;
67 struct ll_sb_info *sbi = ll_i2sbi(inode);
71 struct ptlrpc_request *request;
72 struct lustre_handle lockh;
73 struct mds_body *body;
74 struct lookup_intent it = { .it_op = IT_READDIR };
78 if ((inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT <= page->index){
79 memset(kmap(page), 0, PAGE_CACHE_SIZE);
81 GOTO(readpage_out, rc);
84 rc = mdc_enqueue(&sbi->ll_mdc_conn, LDLM_MDSINTENT, &it, LCK_PR,
85 inode, NULL, &lockh, NULL, 0, inode, sizeof(*inode));
86 request = (struct ptlrpc_request *)it.it_data;
88 ptlrpc_req_finished(request);
90 CERROR("lock enqueue: err: %d\n", rc);
94 ldlm_lock_dump((void *)(unsigned long)lockh.addr);
96 if (PageUptodate(page)) {
97 CERROR("Explain this please?\n");
98 GOTO(readpage_out, rc);
101 offset = page->index << PAGE_SHIFT;
103 rc = mdc_readpage(&sbi->ll_mdc_conn, inode->i_ino,
104 S_IFDIR, offset, buf, &request);
107 body = lustre_msg_buf(request->rq_repmsg, 0);
111 inode->i_size = body->size;
113 ptlrpc_req_finished(request);
118 SetPageUptodate(page);
121 rc = ll_unlock(LCK_PR, &lockh);
123 CERROR("ll_unlock: err: %d\n", rc);
125 } /* ll_dir_readpage */
127 struct address_space_operations ll_dir_aops = {
128 readpage: ll_dir_readpage,
129 prepare_write: ll_dir_prepare_write
132 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
133 int waitfor_one_page(struct page *page)
136 struct buffer_head *bh, *head = page->buffers;
141 if (buffer_req(bh) && !buffer_uptodate(bh))
143 } while ((bh = bh->b_this_page) != head);
147 int waitfor_one_page(struct page *page)
149 wait_on_page_locked(page);
155 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
156 * more robust, but we have what we have
158 static inline unsigned ext2_chunk_size(struct inode *inode)
160 return inode->i_sb->s_blocksize;
163 static inline void ext2_put_page(struct page *page)
166 page_cache_release(page);
169 static inline unsigned long dir_pages(struct inode *inode)
171 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
174 extern void set_page_clean(struct page *page);
176 static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
178 struct inode *dir = page->mapping->host;
179 loff_t new_size = (page->index << PAGE_CACHE_SHIFT) + to;
182 dir->i_version = ++event;
183 if (new_size > dir->i_size)
184 dir->i_size = new_size;
185 SetPageUptodate(page);
186 set_page_clean(page);
188 //page->mapping->a_ops->commit_write(NULL, page, from, to);
190 // err = waitfor_one_page(page);
194 static void ext2_check_page(struct page *page)
196 struct inode *dir = page->mapping->host;
197 unsigned chunk_size = ext2_chunk_size(dir);
198 char *kaddr = page_address(page);
199 // u32 max_inumber = le32_to_cpu(sb->u.ext2_sb.s_es->s_inodes_count);
200 unsigned offs, rec_len;
201 unsigned limit = PAGE_CACHE_SIZE;
205 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
206 limit = dir->i_size & ~PAGE_CACHE_MASK;
207 if (limit & (chunk_size - 1)) {
208 CERROR("limit %d dir size %lld index %ld\n",
209 limit, dir->i_size, page->index);
212 for (offs = limit; offs<PAGE_CACHE_SIZE; offs += chunk_size) {
213 ext2_dirent *p = (ext2_dirent*)(kaddr + offs);
214 p->rec_len = cpu_to_le16(chunk_size);
221 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
222 p = (ext2_dirent *)(kaddr + offs);
223 rec_len = le16_to_cpu(p->rec_len);
225 if (rec_len < EXT2_DIR_REC_LEN(1))
229 if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
231 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
233 // if (le32_to_cpu(p->inode) > max_inumber)
239 SetPageChecked(page);
242 /* Too bad, we had an error */
245 CERROR("ext2_check_page"
246 "size of directory #%lu is not a multiple of chunk size\n",
251 error = "rec_len is smaller than minimal";
254 error = "unaligned directory entry";
257 error = "rec_len is too small for name_len";
260 error = "directory entry across blocks";
263 // error = "inode out of bounds";
265 CERROR("ext2_check_page: bad entry in directory #%lu: %s - "
266 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
267 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
268 (unsigned long) le32_to_cpu(p->inode),
269 rec_len, p->name_len);
272 p = (ext2_dirent *)(kaddr + offs);
273 CERROR("ext2_check_page"
274 "entry in directory #%lu spans the page boundary"
275 "offset=%lu, inode=%lu",
276 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
277 (unsigned long) le32_to_cpu(p->inode));
279 SetPageChecked(page);
284 static struct page * ext2_get_page(struct inode *dir, unsigned long n)
286 struct address_space *mapping = dir->i_mapping;
287 struct page *page = read_cache_page(mapping, n,
288 (filler_t*)mapping->a_ops->readpage, NULL);
292 if (!PageUptodate(page))
294 if (!PageChecked(page))
295 ext2_check_page(page);
303 return ERR_PTR(-EIO);
307 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
309 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
311 static inline int ext2_match (int len, const char * const name,
312 struct ext2_dir_entry_2 * de)
314 if (len != de->name_len)
318 return !memcmp(name, de->name, len);
322 * p is at least 6 bytes before the end of page
324 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
326 return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
329 static inline unsigned
330 ext2_validate_entry(char *base, unsigned offset, unsigned mask)
332 ext2_dirent *de = (ext2_dirent*)(base + offset);
333 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
334 while ((char*)p < (char*)de)
335 p = ext2_next_entry(p);
336 return (char *)p - base;
339 static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
340 [EXT2_FT_UNKNOWN] DT_UNKNOWN,
341 [EXT2_FT_REG_FILE] DT_REG,
342 [EXT2_FT_DIR] DT_DIR,
343 [EXT2_FT_CHRDEV] DT_CHR,
344 [EXT2_FT_BLKDEV] DT_BLK,
345 [EXT2_FT_FIFO] DT_FIFO,
346 [EXT2_FT_SOCK] DT_SOCK,
347 [EXT2_FT_SYMLINK] DT_LNK,
350 static unsigned int ll_dt2fmt[DT_WHT + 1] = {
352 [EXT2_FT_REG_FILE] S_IFREG,
353 [EXT2_FT_DIR] S_IFDIR,
354 [EXT2_FT_CHRDEV] S_IFCHR,
355 [EXT2_FT_BLKDEV] S_IFBLK,
356 [EXT2_FT_FIFO] S_IFIFO,
357 [EXT2_FT_SOCK] S_IFSOCK,
358 [EXT2_FT_SYMLINK] S_IFLNK
362 static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
363 [S_IFREG >> S_SHIFT] EXT2_FT_REG_FILE,
364 [S_IFDIR >> S_SHIFT] EXT2_FT_DIR,
365 [S_IFCHR >> S_SHIFT] EXT2_FT_CHRDEV,
366 [S_IFBLK >> S_SHIFT] EXT2_FT_BLKDEV,
367 [S_IFIFO >> S_SHIFT] EXT2_FT_FIFO,
368 [S_IFSOCK >> S_SHIFT] EXT2_FT_SOCK,
369 [S_IFLNK >> S_SHIFT] EXT2_FT_SYMLINK,
372 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
374 mode_t mode = inode->i_mode;
375 de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
378 int ll_readdir(struct file * filp, void * dirent, filldir_t filldir)
380 loff_t pos = filp->f_pos;
381 struct inode *inode = filp->f_dentry->d_inode;
382 // XXX struct super_block *sb = inode->i_sb;
383 unsigned offset = pos & ~PAGE_CACHE_MASK;
384 unsigned long n = pos >> PAGE_CACHE_SHIFT;
385 unsigned long npages = dir_pages(inode);
386 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
387 unsigned char *types = NULL;
388 int need_revalidate = (filp->f_version != inode->i_version);
391 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
394 types = ext2_filetype_table;
396 for ( ; n < npages; n++, offset = 0) {
399 struct page *page = ext2_get_page(inode, n);
401 /* size might have been updated by mdc_readpage */
402 npages = dir_pages(inode);
406 kaddr = page_address(page);
407 if (need_revalidate) {
408 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
411 de = (ext2_dirent *)(kaddr+offset);
412 limit = kaddr + PAGE_CACHE_SIZE - EXT2_DIR_REC_LEN(1);
413 for ( ;(char*)de <= limit; de = ext2_next_entry(de))
416 unsigned char d_type = DT_UNKNOWN;
418 if (types && de->file_type < EXT2_FT_MAX)
419 d_type = types[de->file_type];
421 offset = (char *)de - kaddr;
422 over = filldir(dirent, de->name, de->name_len,
423 (n<<PAGE_CACHE_SHIFT) | offset,
424 le32_to_cpu(de->inode), d_type);
434 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
435 filp->f_version = inode->i_version;
443 * finds an entry in the specified directory with the wanted name. It
444 * returns the page in which the entry was found, and the entry itself
445 * (as a parameter - res_dir). Page is returned mapped and unlocked.
446 * Entry is guaranteed to be valid.
448 struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
449 struct dentry *dentry, struct page ** res_page)
451 const char *name = dentry->d_name.name;
452 int namelen = dentry->d_name.len;
453 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
454 unsigned long start, n;
455 unsigned long npages = dir_pages(dir);
456 struct page *page = NULL;
462 // start = dir->u.ext2_i.i_dir_start_lookup;
469 page = ext2_get_page(dir, n);
471 kaddr = page_address(page);
472 de = (ext2_dirent *) kaddr;
473 kaddr += PAGE_CACHE_SIZE - reclen;
474 while ((char *) de <= kaddr) {
475 if (ext2_match (namelen, name, de))
477 de = ext2_next_entry(de);
483 } while (n != start);
488 // dir->u.ext2_i.i_dir_start_lookup = n;
492 struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
494 struct page *page = ext2_get_page(dir, 0);
495 ext2_dirent *de = NULL;
498 de = ext2_next_entry((ext2_dirent *) page_address(page));
504 obd_id ll_inode_by_name(struct inode * dir, struct dentry *dentry, int *type)
507 struct ext2_dir_entry_2 * de;
510 de = ext2_find_entry (dir, dentry, &page);
512 res = le32_to_cpu(de->inode);
513 *type = ll_dt2fmt[de->file_type];
515 page_cache_release(page);
520 /* Releases the page */
521 void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
522 struct page *page, struct inode *inode)
524 unsigned from = (char *) de - (char *) page_address(page);
525 unsigned to = from + le16_to_cpu(de->rec_len);
529 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
532 de->inode = cpu_to_le32(inode->i_ino);
533 ext2_set_de_type (de, inode);
534 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
535 err = ext2_commit_chunk(page, from, to);
543 int ll_add_link (struct dentry *dentry, struct inode *inode)
545 struct inode *dir = dentry->d_parent->d_inode;
546 const char *name = dentry->d_name.name;
547 int namelen = dentry->d_name.len;
548 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
549 unsigned short rec_len, name_len;
550 struct page *page = NULL;
552 unsigned long npages = dir_pages(dir);
558 /* We take care of directory expansion in the same loop */
559 for (n = 0; n <= npages; n++) {
560 page = ext2_get_page(dir, n);
564 kaddr = page_address(page);
565 de = (ext2_dirent *)kaddr;
566 kaddr += PAGE_CACHE_SIZE - reclen;
567 while ((char *)de <= kaddr) {
569 if (ext2_match (namelen, name, de))
571 name_len = EXT2_DIR_REC_LEN(de->name_len);
572 rec_len = le16_to_cpu(de->rec_len);
573 if ( n==npages && rec_len == 0) {
574 CERROR("Fatal dir behaviour\n");
577 if (!de->inode && rec_len >= reclen)
579 if (rec_len >= name_len + reclen)
581 de = (ext2_dirent *) ((char *) de + rec_len);
589 from = (char*)de - (char*)page_address(page);
592 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
596 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
597 de1->rec_len = cpu_to_le16(rec_len - name_len);
598 de->rec_len = cpu_to_le16(name_len);
601 de->name_len = namelen;
602 memcpy (de->name, name, namelen);
603 de->inode = cpu_to_le32(inode->i_ino);
604 ext2_set_de_type (de, inode);
605 CDEBUG(D_INODE, "type set to %o\n", de->file_type);
606 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
607 err = ext2_commit_chunk(page, from, to);
609 // change_inode happens with the commit_chunk
610 /* XXX OFFSET_CACHE */
621 * ext2_delete_entry deletes a directory entry by merging it with the
622 * previous entry. Page is up-to-date. Releases the page.
624 int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
626 struct address_space *mapping = page->mapping;
627 struct inode *inode = mapping->host;
628 char *kaddr = page_address(page);
629 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
630 unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
631 ext2_dirent * pde = NULL;
632 ext2_dirent * de = (ext2_dirent *) (kaddr + from);
635 while ((char*)de < (char*)dir) {
637 de = ext2_next_entry(de);
640 from = (char*)pde - (char*)page_address(page);
642 err = mapping->a_ops->prepare_write(NULL, page, from, to);
646 pde->rec_len = cpu_to_le16(to-from);
648 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
649 err = ext2_commit_chunk(page, from, to);
656 * Set the first fragment of directory.
658 int ext2_make_empty(struct inode *inode, struct inode *parent)
660 struct address_space *mapping = inode->i_mapping;
661 struct page *page = grab_cache_page(mapping, 0);
662 unsigned chunk_size = ext2_chunk_size(inode);
663 struct ext2_dir_entry_2 * de;
674 err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
678 de = (struct ext2_dir_entry_2 *) base;
680 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
681 memcpy (de->name, ".\0\0", 4);
682 de->inode = cpu_to_le32(inode->i_ino);
683 ext2_set_de_type (de, inode);
685 de = (struct ext2_dir_entry_2 *) (base + EXT2_DIR_REC_LEN(1));
687 de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
688 de->inode = cpu_to_le32(parent->i_ino);
689 memcpy (de->name, "..\0", 4);
690 ext2_set_de_type (de, inode);
692 err = ext2_commit_chunk(page, 0, chunk_size);
696 page_cache_release(page);
702 * routine to check that the specified directory is empty (for rmdir)
704 int ext2_empty_dir (struct inode * inode)
706 struct page *page = NULL;
707 unsigned long i, npages = dir_pages(inode);
709 for (i = 0; i < npages; i++) {
712 page = ext2_get_page(inode, i);
717 kaddr = page_address(page);
718 de = (ext2_dirent *)kaddr;
719 kaddr += PAGE_CACHE_SIZE-EXT2_DIR_REC_LEN(1);
721 while ((char *)de <= kaddr) {
722 if (de->inode != 0) {
723 /* check for . and .. */
724 if (de->name[0] != '.')
726 if (de->name_len > 2)
728 if (de->name_len < 2) {
730 cpu_to_le32(inode->i_ino))
732 } else if (de->name[1] != '.')
735 de = ext2_next_entry(de);
746 struct file_operations ll_dir_operations = {
747 read: generic_read_dir,