1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/dir.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * ext2 directory handling functions
18 * Big-endian to little-endian byte-swapping/bitmaps by
19 * David S. Miller (davem@caip.rutgers.edu), 1995
21 * All code that works with directory layout had been switched to pagecache
24 * Adapted for Lustre Light
25 * Copyright (C) 2002-2003, Cluster File Systems, Inc.
30 #include <linux/ext2_fs.h>
31 #include <linux/pagemap.h>
33 #include <linux/version.h>
34 #include <linux/smp_lock.h>
35 #include <asm/uaccess.h>
36 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
37 #include <linux/locks.h> // for wait_on_buffer
39 #include <linux/buffer_head.h> // for wait_on_buffer
42 #define DEBUG_SUBSYSTEM S_LLITE
44 #include <linux/obd_support.h>
45 #include <linux/obd_class.h>
46 #include <linux/lustre_lib.h>
47 #include <linux/lustre_idl.h>
48 #include <linux/lustre_mds.h>
49 #include <linux/lustre_lite.h>
50 #include <linux/lustre_dlm.h>
52 typedef struct ext2_dir_entry_2 ext2_dirent;
54 #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
55 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
58 static int ll_dir_prepare_write(struct file *file, struct page *page,
59 unsigned from, unsigned to)
61 CDEBUG(D_VFSTRACE, "VFS Op:\n");
65 /* returns the page unlocked, but with a reference */
66 static int ll_dir_readpage(struct file *file, struct page *page)
68 struct inode *inode = page->mapping->host;
69 struct ll_sb_info *sbi = ll_i2sbi(inode);
72 struct ptlrpc_request *request;
73 struct lustre_handle lockh;
74 struct mds_body *body;
75 struct lookup_intent it = { .it_op = IT_READDIR };
76 struct mdc_op_data data;
80 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
81 inode->i_generation, inode);
82 if ((inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT <= page->index){
83 /* XXX why do we need this exactly, and why do we think that
84 * an all-zero directory page is useful?
86 CERROR("memsetting dir page %lu to zero (size %lld)\n",
87 page->index, inode->i_size);
88 memset(kmap(page), 0, PAGE_CACHE_SIZE);
90 GOTO(readpage_out, rc);
93 ll_prepare_mdc_op_data(&data, inode, NULL, NULL, 0, 0);
95 rc = mdc_enqueue(&sbi->ll_mdc_conn, LDLM_PLAIN, &it, LCK_PR,
96 &data, &lockh, NULL, 0,
97 ldlm_completion_ast, ll_mdc_blocking_ast, inode);
98 request = (struct ptlrpc_request *)it.it_data;
100 ptlrpc_req_finished(request);
102 CERROR("lock enqueue: err: %d\n", rc);
106 ldlm_lock_dump_handle(D_OTHER, &lockh);
108 if (PageUptodate(page)) {
109 CERROR("Explain this please?\n");
110 GOTO(readpage_out, rc);
113 offset = page->index << PAGE_SHIFT;
114 rc = mdc_readpage(&sbi->ll_mdc_conn, inode->i_ino,
115 S_IFDIR, offset, page, &request);
117 body = lustre_msg_buf(request->rq_repmsg, 0, sizeof (*body));
118 LASSERT (body != NULL); /* checked by mdc_readpage() */
119 LASSERT_REPSWABBED (request, 0); /* swabbed by mdc_readpage() */
121 inode->i_size = body->size;
123 ptlrpc_req_finished(request);
128 SetPageUptodate(page);
131 ll_unlock(LCK_PR, &lockh);
133 CERROR("ll_unlock: err: %d\n", rc);
137 struct address_space_operations ll_dir_aops = {
138 readpage: ll_dir_readpage,
139 prepare_write: ll_dir_prepare_write
142 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3))
143 int waitfor_one_page(struct page *page)
146 struct buffer_head *bh, *head = page->buffers;
151 if (buffer_req(bh) && !buffer_uptodate(bh))
153 } while ((bh = bh->b_this_page) != head);
156 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
157 int waitfor_one_page(struct page *page)
159 wait_on_page_locked(page);
165 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
166 * more robust, but we have what we have
168 static inline unsigned ext2_chunk_size(struct inode *inode)
170 return inode->i_sb->s_blocksize;
173 static inline void ext2_put_page(struct page *page)
176 page_cache_release(page);
179 static inline unsigned long dir_pages(struct inode *inode)
181 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
184 extern void set_page_clean(struct page *page);
186 static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
188 struct inode *dir = page->mapping->host;
189 loff_t new_size = (page->index << PAGE_CACHE_SHIFT) + to;
192 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
193 dir->i_version = ++event;
195 if (new_size > dir->i_size)
196 dir->i_size = new_size;
197 SetPageUptodate(page);
198 set_page_clean(page);
200 //page->mapping->a_ops->commit_write(NULL, page, from, to);
202 // err = waitfor_one_page(page);
206 static void ext2_check_page(struct page *page)
208 struct inode *dir = page->mapping->host;
209 unsigned chunk_size = ext2_chunk_size(dir);
210 char *kaddr = page_address(page);
211 // u32 max_inumber = le32_to_cpu(sb->u.ext2_sb.s_es->s_inodes_count);
212 unsigned offs, rec_len;
213 unsigned limit = PAGE_CACHE_SIZE;
217 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
218 limit = dir->i_size & ~PAGE_CACHE_MASK;
219 if (limit & (chunk_size - 1)) {
220 CERROR("limit %d dir size %lld index %ld\n",
221 limit, dir->i_size, page->index);
224 for (offs = limit; offs<PAGE_CACHE_SIZE; offs += chunk_size) {
225 ext2_dirent *p = (ext2_dirent*)(kaddr + offs);
226 p->rec_len = cpu_to_le16(chunk_size);
233 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
234 p = (ext2_dirent *)(kaddr + offs);
235 rec_len = le16_to_cpu(p->rec_len);
237 if (rec_len < EXT2_DIR_REC_LEN(1))
241 if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
243 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
245 // if (le32_to_cpu(p->inode) > max_inumber)
251 SetPageChecked(page);
254 /* Too bad, we had an error */
257 CERROR("ext2_check_page"
258 "size of directory #%lu is not a multiple of chunk size\n",
263 error = "rec_len is smaller than minimal";
266 error = "unaligned directory entry";
269 error = "rec_len is too small for name_len";
272 error = "directory entry across blocks";
275 // error = "inode out of bounds";
277 CERROR("ext2_check_page: bad entry in directory #%lu: %s - "
278 "offset=%lu+%u, inode=%lu, rec_len=%d, name_len=%d",
279 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT), offs,
280 (unsigned long) le32_to_cpu(p->inode),
281 rec_len, p->name_len);
284 p = (ext2_dirent *)(kaddr + offs);
285 CERROR("ext2_check_page"
286 "entry in directory #%lu spans the page boundary"
287 "offset=%lu, inode=%lu",
288 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
289 (unsigned long) le32_to_cpu(p->inode));
291 SetPageChecked(page);
296 static struct page *ll_get_dir_page(struct inode *dir, unsigned long n)
298 struct address_space *mapping = dir->i_mapping;
299 struct page *page = read_cache_page(mapping, n,
300 (filler_t*)mapping->a_ops->readpage, NULL);
304 if (!PageUptodate(page))
306 if (!PageChecked(page))
307 ext2_check_page(page);
315 return ERR_PTR(-EIO);
319 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
321 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
323 static inline int ext2_match (int len, const char * const name,
324 struct ext2_dir_entry_2 * de)
326 if (len != de->name_len)
330 return !memcmp(name, de->name, len);
334 * p is at least 6 bytes before the end of page
336 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
338 return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
341 static inline unsigned
342 ext2_validate_entry(char *base, unsigned offset, unsigned mask)
344 ext2_dirent *de = (ext2_dirent*)(base + offset);
345 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
346 while ((char*)p < (char*)de)
347 p = ext2_next_entry(p);
348 return (char *)p - base;
351 static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
352 [EXT2_FT_UNKNOWN] DT_UNKNOWN,
353 [EXT2_FT_REG_FILE] DT_REG,
354 [EXT2_FT_DIR] DT_DIR,
355 [EXT2_FT_CHRDEV] DT_CHR,
356 [EXT2_FT_BLKDEV] DT_BLK,
357 [EXT2_FT_FIFO] DT_FIFO,
358 [EXT2_FT_SOCK] DT_SOCK,
359 [EXT2_FT_SYMLINK] DT_LNK,
362 static unsigned int ll_dt2fmt[DT_WHT + 1] = {
364 [EXT2_FT_REG_FILE] S_IFREG,
365 [EXT2_FT_DIR] S_IFDIR,
366 [EXT2_FT_CHRDEV] S_IFCHR,
367 [EXT2_FT_BLKDEV] S_IFBLK,
368 [EXT2_FT_FIFO] S_IFIFO,
369 [EXT2_FT_SOCK] S_IFSOCK,
370 [EXT2_FT_SYMLINK] S_IFLNK
374 static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
375 [S_IFREG >> S_SHIFT] EXT2_FT_REG_FILE,
376 [S_IFDIR >> S_SHIFT] EXT2_FT_DIR,
377 [S_IFCHR >> S_SHIFT] EXT2_FT_CHRDEV,
378 [S_IFBLK >> S_SHIFT] EXT2_FT_BLKDEV,
379 [S_IFIFO >> S_SHIFT] EXT2_FT_FIFO,
380 [S_IFSOCK >> S_SHIFT] EXT2_FT_SOCK,
381 [S_IFLNK >> S_SHIFT] EXT2_FT_SYMLINK,
384 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
386 mode_t mode = inode->i_mode;
387 de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
390 int ll_readdir(struct file * filp, void * dirent, filldir_t filldir)
392 loff_t pos = filp->f_pos;
393 struct inode *inode = filp->f_dentry->d_inode;
394 // XXX struct super_block *sb = inode->i_sb;
395 unsigned offset = pos & ~PAGE_CACHE_MASK;
396 unsigned long n = pos >> PAGE_CACHE_SHIFT;
397 unsigned long npages = dir_pages(inode);
398 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
399 unsigned char *types = NULL;
400 int need_revalidate = (filp->f_version != inode->i_version);
403 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
404 inode->i_generation, inode);
405 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
408 types = ext2_filetype_table;
410 for ( ; n < npages; n++, offset = 0) {
415 CDEBUG(D_EXT2, "reading %lu of dir %lu page %lu, size %llu\n",
416 PAGE_CACHE_SIZE, inode->i_ino, n, inode->i_size);
417 page = ll_get_dir_page(inode, n);
419 /* size might have been updated by mdc_readpage */
420 npages = dir_pages(inode);
424 kaddr = page_address(page);
425 if (need_revalidate) {
426 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
429 de = (ext2_dirent *)(kaddr+offset);
430 limit = kaddr + PAGE_CACHE_SIZE - EXT2_DIR_REC_LEN(1);
431 for ( ;(char*)de <= limit; de = ext2_next_entry(de))
434 unsigned char d_type = DT_UNKNOWN;
436 if (types && de->file_type < EXT2_FT_MAX)
437 d_type = types[de->file_type];
439 offset = (char *)de - kaddr;
440 over = filldir(dirent, de->name, de->name_len,
441 (n<<PAGE_CACHE_SHIFT) | offset,
442 le32_to_cpu(de->inode), d_type);
452 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
453 filp->f_version = inode->i_version;
461 * finds an entry in the specified directory with the wanted name. It
462 * returns the page in which the entry was found, and the entry itself
463 * (as a parameter - res_dir). Page is returned mapped and unlocked.
464 * Entry is guaranteed to be valid.
466 struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
467 struct dentry *dentry, struct page ** res_page)
469 const char *name = dentry->d_name.name;
470 int namelen = dentry->d_name.len;
471 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
472 unsigned long start, n;
473 unsigned long npages = dir_pages(dir);
474 struct page *page = NULL;
480 // start = dir->u.ext2_i.i_dir_start_lookup;
487 page = ll_get_dir_page(dir, n);
489 kaddr = page_address(page);
490 de = (ext2_dirent *) kaddr;
491 kaddr += PAGE_CACHE_SIZE - reclen;
492 while ((char *) de <= kaddr) {
493 if (ext2_match (namelen, name, de))
495 de = ext2_next_entry(de);
501 } while (n != start);
506 // dir->u.ext2_i.i_dir_start_lookup = n;
510 struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
512 struct page *page = ll_get_dir_page(dir, 0);
513 ext2_dirent *de = NULL;
516 de = ext2_next_entry((ext2_dirent *) page_address(page));
522 obd_id ll_inode_by_name(struct inode * dir, struct dentry *dentry, int *type)
525 struct ext2_dir_entry_2 * de;
528 de = ext2_find_entry (dir, dentry, &page);
530 res = le32_to_cpu(de->inode);
531 *type = ll_dt2fmt[de->file_type];
533 page_cache_release(page);
538 /* Releases the page */
539 void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
540 struct page *page, struct inode *inode)
542 unsigned from = (char *) de - (char *) page_address(page);
543 unsigned to = from + le16_to_cpu(de->rec_len);
547 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
550 de->inode = cpu_to_le32(inode->i_ino);
551 ext2_set_de_type (de, inode);
552 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
553 err = ext2_commit_chunk(page, from, to);
561 int ll_add_link (struct dentry *dentry, struct inode *inode)
563 struct inode *dir = dentry->d_parent->d_inode;
564 const char *name = dentry->d_name.name;
565 int namelen = dentry->d_name.len;
566 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
567 unsigned short rec_len, name_len;
568 struct page *page = NULL;
570 unsigned long npages = dir_pages(dir);
576 /* We take care of directory expansion in the same loop */
577 for (n = 0; n <= npages; n++) {
578 page = ll_get_dir_page(dir, n);
582 kaddr = page_address(page);
583 de = (ext2_dirent *)kaddr;
584 kaddr += PAGE_CACHE_SIZE - reclen;
585 while ((char *)de <= kaddr) {
587 if (ext2_match (namelen, name, de))
589 name_len = EXT2_DIR_REC_LEN(de->name_len);
590 rec_len = le16_to_cpu(de->rec_len);
591 if ( n==npages && rec_len == 0) {
592 CERROR("Fatal dir behaviour\n");
595 if (!de->inode && rec_len >= reclen)
597 if (rec_len >= name_len + reclen)
599 de = (ext2_dirent *) ((char *) de + rec_len);
607 from = (char*)de - (char*)page_address(page);
610 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
614 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
615 de1->rec_len = cpu_to_le16(rec_len - name_len);
616 de->rec_len = cpu_to_le16(name_len);
619 de->name_len = namelen;
620 memcpy (de->name, name, namelen);
621 de->inode = cpu_to_le32(inode->i_ino);
622 ext2_set_de_type (de, inode);
623 CDEBUG(D_INODE, "type set to %o\n", de->file_type);
624 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
625 err = ext2_commit_chunk(page, from, to);
627 // change_inode happens with the commit_chunk
628 /* XXX OFFSET_CACHE */
639 * ext2_delete_entry deletes a directory entry by merging it with the
640 * previous entry. Page is up-to-date. Releases the page.
642 int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
644 struct address_space *mapping = page->mapping;
645 struct inode *inode = mapping->host;
646 char *kaddr = page_address(page);
647 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
648 unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
649 ext2_dirent * pde = NULL;
650 ext2_dirent * de = (ext2_dirent *) (kaddr + from);
653 while ((char*)de < (char*)dir) {
655 de = ext2_next_entry(de);
658 from = (char*)pde - (char*)page_address(page);
660 err = mapping->a_ops->prepare_write(NULL, page, from, to);
664 pde->rec_len = cpu_to_le16(to-from);
666 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
667 err = ext2_commit_chunk(page, from, to);
674 * Set the first fragment of directory.
676 int ext2_make_empty(struct inode *inode, struct inode *parent)
678 struct address_space *mapping = inode->i_mapping;
679 struct page *page = grab_cache_page(mapping, 0);
680 unsigned chunk_size = ext2_chunk_size(inode);
681 struct ext2_dir_entry_2 * de;
692 err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
696 de = (struct ext2_dir_entry_2 *) base;
698 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
699 memcpy (de->name, ".\0\0", 4);
700 de->inode = cpu_to_le32(inode->i_ino);
701 ext2_set_de_type (de, inode);
703 de = (struct ext2_dir_entry_2 *) (base + EXT2_DIR_REC_LEN(1));
705 de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
706 de->inode = cpu_to_le32(parent->i_ino);
707 memcpy (de->name, "..\0", 4);
708 ext2_set_de_type (de, inode);
710 err = ext2_commit_chunk(page, 0, chunk_size);
714 page_cache_release(page);
720 * routine to check that the specified directory is empty (for rmdir)
722 int ext2_empty_dir (struct inode * inode)
724 struct page *page = NULL;
725 unsigned long i, npages = dir_pages(inode);
727 for (i = 0; i < npages; i++) {
730 page = ll_get_dir_page(inode, i);
735 kaddr = page_address(page);
736 de = (ext2_dirent *)kaddr;
737 kaddr += PAGE_CACHE_SIZE-EXT2_DIR_REC_LEN(1);
739 while ((char *)de <= kaddr) {
740 if (de->inode != 0) {
741 /* check for . and .. */
742 if (de->name[0] != '.')
744 if (de->name_len > 2)
746 if (de->name_len < 2) {
748 cpu_to_le32(inode->i_ino))
750 } else if (de->name[1] != '.')
753 de = ext2_next_entry(de);
764 static int ll_dir_ioctl(struct inode *inode, struct file *file,
765 unsigned int cmd, unsigned long arg)
767 struct ll_sb_info *sbi = ll_i2sbi(inode);
768 struct obd_ioctl_data *data;
770 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%u\n", inode->i_ino,
771 inode->i_generation, inode, cmd);
773 if ((cmd & 0xffffff00) == ((int)'T') << 8) /* tty ioctls */
777 case IOC_MDC_LOOKUP: {
778 struct ptlrpc_request *request = NULL;
781 struct mds_body *body;
783 int namelen, rc, err, len = 0;
786 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
791 filename = data->ioc_inlbuf1;
792 namelen = data->ioc_inllen1;
795 CERROR("IOC_MDC_LOOKUP missing filename\n");
796 GOTO(out, rc = -EINVAL);
799 valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE;
800 ll_inode2fid(&fid, inode);
801 rc = mdc_getattr_name(&sbi->ll_mdc_conn, &fid,
802 filename, namelen, valid, 0, &request);
804 CERROR("mdc_getattr_name: %d\n", rc);
808 body = lustre_msg_buf(request->rq_repmsg, 0, sizeof (*body));
809 LASSERT(body != NULL); /* checked by mdc_getattr_name */
810 LASSERT_REPSWABBED(request, 0);/* swabbed by mdc_getattr_name */
812 /* surely there's a better way -phik */
813 data->ioc_obdo1.o_mode = body->mode;
814 data->ioc_obdo1.o_uid = body->uid;
815 data->ioc_obdo1.o_gid = body->gid;
817 ptlrpc_req_finished(request);
819 err = copy_to_user((void *)arg, buf, len);
821 GOTO(out, rc = -EFAULT);
825 obd_ioctl_freedata(buf, len);
829 CERROR("unrecognized ioctl %#x\n", cmd);
834 int ll_dir_open(struct inode *inode, struct file *file)
836 return ll_file_open(inode, file);
839 int ll_dir_release(struct inode *inode, struct file *file)
841 return ll_file_release(inode, file);
844 struct file_operations ll_dir_operations = {
846 release: ll_dir_release,
847 read: generic_read_dir,