3 * Copyright (C) 1992, 1993, 1994, 1995
4 * Remy Card (card@masi.ibp.fr)
5 * Laboratoire MASI - Institut Blaise Pascal
6 * Universite Pierre et Marie Curie (Paris VI)
10 * linux/fs/minix/dir.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * ext2 directory handling functions
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
20 * All code that works with directory layout had been switched to pagecache
23 * Adapted for Lustre Light
24 * Copyright (C) 2002, Cluster File Systems, Inc.
29 #include <linux/ext2_fs.h>
30 #include <linux/pagemap.h>
32 #include <linux/locks.h>
33 #include <asm/uaccess.h>
35 #define DEBUG_SUBSYSTEM S_LLIGHT
37 #include <linux/obd_support.h>
38 #include <linux/lustre_lib.h>
39 #include <linux/lustre_idl.h>
40 #include <linux/lustre_mds.h>
41 #include <linux/lustre_light.h>
43 typedef struct ext2_dir_entry_2 ext2_dirent;
45 #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
46 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
49 static int ll_dir_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
54 /* returns the page unlocked, but with a reference */
55 static int ll_dir_readpage(struct file *file, struct page *page)
57 struct inode *inode = page->mapping->host;
58 struct ll_sb_info *sbi = ll_i2sbi(inode);
62 struct ptlrep_hdr *hdr;
66 if ( ((inode->i_size + PAGE_CACHE_SIZE -1)>>PAGE_SHIFT)
68 memset(kmap(page), 0, PAGE_CACHE_SIZE);
73 if (Page_Uptodate(page)) {
78 offset = page->index << PAGE_SHIFT;
80 rc = mdc_readpage(sbi->ll_peer_ptr, inode->i_ino, S_IFDIR, offset, buf,
88 if ((rc = hdr->status)) {
93 /* PDEBUG(page, "READ"); */
95 SetPageUptodate(page);
97 obd_unlock_page(page);
100 } /* ll_dir_readpage */
102 struct address_space_operations ll_dir_aops = {
103 readpage: ll_dir_readpage,
104 prepare_write: ll_dir_prepare_write
107 int waitfor_one_page(struct page *page)
110 struct buffer_head *bh, *head = page->buffers;
115 if (buffer_req(bh) && !buffer_uptodate(bh))
117 } while ((bh = bh->b_this_page) != head);
122 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
123 * more robust, but we have what we have
125 static inline unsigned ext2_chunk_size(struct inode *inode)
127 //return inode->i_sb->s_blocksize;
131 static inline void ext2_put_page(struct page *page)
134 page_cache_release(page);
137 static inline unsigned long dir_pages(struct inode *inode)
139 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
142 extern void set_page_clean(struct page *page);
144 static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
146 struct inode *dir = page->mapping->host;
149 dir->i_version = ++event;
150 dir->i_size = (page->index << PAGE_CACHE_SHIFT) + to;
151 SetPageUptodate(page);
152 set_page_clean(page);
154 //page->mapping->a_ops->commit_write(NULL, page, from, to);
156 // err = waitfor_one_page(page);
160 static void ext2_check_page(struct page *page)
162 struct inode *dir = page->mapping->host;
163 unsigned chunk_size = ext2_chunk_size(dir);
164 char *kaddr = page_address(page);
165 // u32 max_inumber = le32_to_cpu(sb->u.ext2_sb.s_es->s_inodes_count);
166 unsigned offs, rec_len;
167 unsigned limit = PAGE_CACHE_SIZE;
171 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
172 limit = dir->i_size & ~PAGE_CACHE_MASK;
173 if (limit & (chunk_size - 1))
175 for (offs = limit; offs<PAGE_CACHE_SIZE; offs += chunk_size) {
176 ext2_dirent *p = (ext2_dirent*)(kaddr + offs);
177 p->rec_len = cpu_to_le16(chunk_size);
182 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
183 p = (ext2_dirent *)(kaddr + offs);
184 rec_len = le16_to_cpu(p->rec_len);
186 if (rec_len < EXT2_DIR_REC_LEN(1))
190 if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
192 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
194 // if (le32_to_cpu(p->inode) > max_inumber)
200 SetPageChecked(page);
203 /* Too bad, we had an error */
206 printk("ext2_check_page"
207 "size of directory #%lu is not a multiple of chunk size",
212 error = "rec_len is smaller than minimal";
215 error = "unaligned directory entry";
218 error = "rec_len is too small for name_len";
221 error = "directory entry across blocks";
224 // error = "inode out of bounds";
226 printk("ext2_check_page: bad entry in directory #%lu: %s - "
227 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
228 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
229 (unsigned long) le32_to_cpu(p->inode),
230 rec_len, p->name_len);
233 p = (ext2_dirent *)(kaddr + offs);
234 printk("ext2_check_page"
235 "entry in directory #%lu spans the page boundary"
236 "offset=%lu, inode=%lu",
237 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
238 (unsigned long) le32_to_cpu(p->inode));
240 SetPageChecked(page);
244 static struct page * ext2_get_page(struct inode *dir, unsigned long n)
246 struct address_space *mapping = dir->i_mapping;
247 struct page *page = read_cache_page(mapping, n,
248 (filler_t*)mapping->a_ops->readpage, NULL);
252 if (!Page_Uptodate(page))
254 if (!PageChecked(page))
255 ext2_check_page(page);
263 return ERR_PTR(-EIO);
267 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
269 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
271 static inline int ext2_match (int len, const char * const name,
272 struct ext2_dir_entry_2 * de)
274 if (len != de->name_len)
278 return !memcmp(name, de->name, len);
282 * p is at least 6 bytes before the end of page
284 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
286 return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
289 static inline unsigned
290 ext2_validate_entry(char *base, unsigned offset, unsigned mask)
292 ext2_dirent *de = (ext2_dirent*)(base + offset);
293 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
294 while ((char*)p < (char*)de)
295 p = ext2_next_entry(p);
296 return (char *)p - base;
299 static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
300 [EXT2_FT_UNKNOWN] DT_UNKNOWN,
301 [EXT2_FT_REG_FILE] DT_REG,
302 [EXT2_FT_DIR] DT_DIR,
303 [EXT2_FT_CHRDEV] DT_CHR,
304 [EXT2_FT_BLKDEV] DT_BLK,
305 [EXT2_FT_FIFO] DT_FIFO,
306 [EXT2_FT_SOCK] DT_SOCK,
307 [EXT2_FT_SYMLINK] DT_LNK,
310 static unsigned int ll_dt2fmt[DT_WHT + 1] = {
312 [EXT2_FT_REG_FILE] S_IFREG,
313 [EXT2_FT_DIR] S_IFDIR,
314 [EXT2_FT_CHRDEV] S_IFCHR,
315 [EXT2_FT_BLKDEV] S_IFBLK,
316 [EXT2_FT_FIFO] S_IFIFO,
317 [EXT2_FT_SOCK] S_IFSOCK,
318 [EXT2_FT_SYMLINK] S_IFLNK
322 static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
323 [S_IFREG >> S_SHIFT] EXT2_FT_REG_FILE,
324 [S_IFDIR >> S_SHIFT] EXT2_FT_DIR,
325 [S_IFCHR >> S_SHIFT] EXT2_FT_CHRDEV,
326 [S_IFBLK >> S_SHIFT] EXT2_FT_BLKDEV,
327 [S_IFIFO >> S_SHIFT] EXT2_FT_FIFO,
328 [S_IFSOCK >> S_SHIFT] EXT2_FT_SOCK,
329 [S_IFLNK >> S_SHIFT] EXT2_FT_SYMLINK,
332 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
334 mode_t mode = inode->i_mode;
335 de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
339 new_ll_readdir (struct file * filp, void * dirent, filldir_t filldir)
341 loff_t pos = filp->f_pos;
342 struct inode *inode = filp->f_dentry->d_inode;
343 // XXX struct super_block *sb = inode->i_sb;
344 unsigned offset = pos & ~PAGE_CACHE_MASK;
345 unsigned long n = pos >> PAGE_CACHE_SHIFT;
346 unsigned long npages = dir_pages(inode);
347 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
348 unsigned char *types = NULL;
349 int need_revalidate = (filp->f_version != inode->i_version);
351 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
354 types = ext2_filetype_table;
356 for ( ; n < npages; n++, offset = 0) {
359 struct page *page = ext2_get_page(inode, n);
363 kaddr = page_address(page);
364 if (need_revalidate) {
365 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
368 de = (ext2_dirent *)(kaddr+offset);
369 limit = kaddr + PAGE_CACHE_SIZE - EXT2_DIR_REC_LEN(1);
370 for ( ;(char*)de <= limit; de = ext2_next_entry(de))
373 unsigned char d_type = DT_UNKNOWN;
375 if (types && de->file_type < EXT2_FT_MAX)
376 d_type = types[de->file_type];
378 offset = (char *)de - kaddr;
379 over = filldir(dirent, de->name, de->name_len,
380 (n<<PAGE_CACHE_SHIFT) | offset,
381 le32_to_cpu(de->inode), d_type);
391 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
392 filp->f_version = inode->i_version;
400 * finds an entry in the specified directory with the wanted name. It
401 * returns the page in which the entry was found, and the entry itself
402 * (as a parameter - res_dir). Page is returned mapped and unlocked.
403 * Entry is guaranteed to be valid.
405 struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
406 struct dentry *dentry, struct page ** res_page)
408 const char *name = dentry->d_name.name;
409 int namelen = dentry->d_name.len;
410 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
411 unsigned long start, n;
412 unsigned long npages = dir_pages(dir);
413 struct page *page = NULL;
419 // start = dir->u.ext2_i.i_dir_start_lookup;
426 page = ext2_get_page(dir, n);
428 kaddr = page_address(page);
429 de = (ext2_dirent *) kaddr;
430 kaddr += PAGE_CACHE_SIZE - reclen;
431 while ((char *) de <= kaddr) {
432 if (ext2_match (namelen, name, de))
434 de = ext2_next_entry(de);
440 } while (n != start);
445 // dir->u.ext2_i.i_dir_start_lookup = n;
449 struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
451 struct page *page = ext2_get_page(dir, 0);
452 ext2_dirent *de = NULL;
455 de = ext2_next_entry((ext2_dirent *) page_address(page));
461 ino_t ll_inode_by_name(struct inode * dir, struct dentry *dentry, int *type)
464 struct ext2_dir_entry_2 * de;
467 de = ext2_find_entry (dir, dentry, &page);
469 res = le32_to_cpu(de->inode);
470 *type = ll_dt2fmt[de->file_type];
472 page_cache_release(page);
477 /* Releases the page */
478 void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
479 struct page *page, struct inode *inode)
481 unsigned from = (char *) de - (char *) page_address(page);
482 unsigned to = from + le16_to_cpu(de->rec_len);
486 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
489 de->inode = cpu_to_le32(inode->i_ino);
490 ext2_set_de_type (de, inode);
491 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
492 err = ext2_commit_chunk(page, from, to);
500 int ll_add_link (struct dentry *dentry, struct inode *inode)
502 struct inode *dir = dentry->d_parent->d_inode;
503 const char *name = dentry->d_name.name;
504 int namelen = dentry->d_name.len;
505 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
506 unsigned short rec_len, name_len;
507 struct page *page = NULL;
509 unsigned long npages = dir_pages(dir);
515 /* We take care of directory expansion in the same loop */
516 for (n = 0; n <= npages; n++) {
517 page = ext2_get_page(dir, n);
521 kaddr = page_address(page);
522 de = (ext2_dirent *)kaddr;
523 kaddr += PAGE_CACHE_SIZE - reclen;
524 while ((char *)de <= kaddr) {
526 if (ext2_match (namelen, name, de))
528 name_len = EXT2_DIR_REC_LEN(de->name_len);
529 rec_len = le16_to_cpu(de->rec_len);
530 if ( n==npages && rec_len == 0) {
531 printk("Fatal dir behaviour\n");
534 if (!de->inode && rec_len >= reclen)
536 if (rec_len >= name_len + reclen)
538 de = (ext2_dirent *) ((char *) de + rec_len);
546 from = (char*)de - (char*)page_address(page);
549 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
553 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
554 de1->rec_len = cpu_to_le16(rec_len - name_len);
555 de->rec_len = cpu_to_le16(name_len);
558 de->name_len = namelen;
559 memcpy (de->name, name, namelen);
560 de->inode = cpu_to_le32(inode->i_ino);
561 ext2_set_de_type (de, inode);
562 CDEBUG(D_INODE, "type set to %o\n", de->file_type);
563 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
564 err = ext2_commit_chunk(page, from, to);
566 // change_inode happens with the commit_chunk
567 /* XXX OFFSET_CACHE */
578 * ext2_delete_entry deletes a directory entry by merging it with the
579 * previous entry. Page is up-to-date. Releases the page.
581 int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
583 struct address_space *mapping = page->mapping;
584 struct inode *inode = mapping->host;
585 char *kaddr = page_address(page);
586 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
587 unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
588 ext2_dirent * pde = NULL;
589 ext2_dirent * de = (ext2_dirent *) (kaddr + from);
592 while ((char*)de < (char*)dir) {
594 de = ext2_next_entry(de);
597 from = (char*)pde - (char*)page_address(page);
599 err = mapping->a_ops->prepare_write(NULL, page, from, to);
603 pde->rec_len = cpu_to_le16(to-from);
605 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
606 err = ext2_commit_chunk(page, from, to);
613 * Set the first fragment of directory.
615 int ext2_make_empty(struct inode *inode, struct inode *parent)
617 struct address_space *mapping = inode->i_mapping;
618 struct page *page = grab_cache_page(mapping, 0);
619 unsigned chunk_size = ext2_chunk_size(inode);
620 struct ext2_dir_entry_2 * de;
627 err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
631 base = page_address(page);
633 de = (struct ext2_dir_entry_2 *) base;
635 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
636 memcpy (de->name, ".\0\0", 4);
637 de->inode = cpu_to_le32(inode->i_ino);
638 ext2_set_de_type (de, inode);
640 de = (struct ext2_dir_entry_2 *) (base + EXT2_DIR_REC_LEN(1));
642 de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
643 de->inode = cpu_to_le32(parent->i_ino);
644 memcpy (de->name, "..\0", 4);
645 ext2_set_de_type (de, inode);
647 err = ext2_commit_chunk(page, 0, chunk_size);
650 page_cache_release(page);
656 * routine to check that the specified directory is empty (for rmdir)
658 int ext2_empty_dir (struct inode * inode)
660 struct page *page = NULL;
661 unsigned long i, npages = dir_pages(inode);
663 for (i = 0; i < npages; i++) {
666 page = ext2_get_page(inode, i);
671 kaddr = page_address(page);
672 de = (ext2_dirent *)kaddr;
673 kaddr += PAGE_CACHE_SIZE-EXT2_DIR_REC_LEN(1);
675 while ((char *)de <= kaddr) {
676 if (de->inode != 0) {
677 /* check for . and .. */
678 if (de->name[0] != '.')
680 if (de->name_len > 2)
682 if (de->name_len < 2) {
684 cpu_to_le32(inode->i_ino))
686 } else if (de->name[1] != '.')
689 de = ext2_next_entry(de);
700 struct file_operations ll_dir_operations = {
701 read: generic_read_dir,
702 readdir: new_ll_readdir