Whamcloud - gitweb
701398e6e429841dd26543721288849feb0b9f9f
[fs/lustre-release.git] / lustre / llite / dir.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/dir.c
12  *  linux/fs/ext2/dir.c
13  *
14  *  Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  *  ext2 directory handling functions
17  *
18  *  Big-endian to little-endian byte-swapping/bitmaps by
19  *        David S. Miller (davem@caip.rutgers.edu), 1995
20  *
21  *  All code that works with directory layout had been switched to pagecache
22  *  and moved here. AV
23  *
24  *  Adapted for Lustre Light
25  *  Copyright (C) 2002-2003, Cluster File Systems, Inc.
26  *
27  */
28
29 #include <linux/fs.h>
30 #include <linux/ext2_fs.h>
31 #include <linux/pagemap.h>
32 #include <linux/mm.h>
33 #include <linux/version.h>
34 #include <linux/smp_lock.h>
35 #include <asm/uaccess.h>
36 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
37 # include <linux/locks.h>   // for wait_on_buffer
38 #else
39 # include <linux/buffer_head.h>   // for wait_on_buffer
40 #endif
41
42 #define DEBUG_SUBSYSTEM S_LLITE
43
44 #include <obd_support.h>
45 #include <obd_class.h>
46 #include <lustre_lib.h>
47 #include <lustre/lustre_idl.h>
48 #include <lustre_lite.h>
49 #include <lustre_dlm.h>
50 #include <lustre_fid.h>
51 #include "llite_internal.h"
52
53 #ifdef HAVE_PG_FS_MISC
54 #define PageChecked(page)        test_bit(PG_fs_misc, &(page)->flags)
55 #define SetPageChecked(page)     set_bit(PG_fs_misc, &(page)->flags)
56 #endif
57
58 /*
59  * (new) readdir implementation overview.
60  *
61  * Original lustre readdir implementation cached exact copy of raw directory
62  * pages on the client. These pages were indexed in client page cache by
63  * logical offset in the directory file. This design, while very simple and
64  * intuitive had some inherent problems:
65  *
66  *     . it implies that byte offset to the directory entry serves as a
67  *     telldir(3)/seekdir(3) cookie, but that offset is not stable: in
68  *     ext3/htree directory entries may move due to splits, and more
69  *     importantly,
70  *
71  *     . it is incompatible with the design of split directories for cmd3,
72  *     that assumes that names are distributed across nodes based on their
73  *     hash, and so readdir should be done in hash order.
74  *
75  * New readdir implementation does readdir in hash order, and uses hash of a
76  * file name as a telldir/seekdir cookie. This led to number of complications:
77  *
78  *     . hash is not unique, so it cannot be used to index cached directory
79  *     pages on the client (note, that it requires a whole pageful of hash
80  *     collided entries to cause two pages to have identical hashes);
81  *
82  *     . hash is not unique, so it cannot, strictly speaking, be used as an
83  *     entry cookie. ext3/htree has the same problem and lustre implementation
84  *     mimics their solution: seekdir(hash) positions directory at the first
85  *     entry with the given hash.
86  *
87  * Client side.
88  *
89  * 0. caching
90  *
91  * Client caches directory pages using hash of the first entry as an index. As
92  * noted above hash is not unique, so this solution doesn't work as is:
93  * special processing is needed for "page hash chains" (i.e., sequences of
94  * pages filled with entries all having the same hash value).
95  *
96  * First, such chains have to be detected. To this end, server returns to the
97  * client the hash of the first entry on the page next to one returned. When
98  * client detects that this hash is the same as hash of the first entry on the
99  * returned page, page hash collision has to be handled. Pages in the
100  * hash chain, except first one, are termed "overflow pages".
101  *
102  * Solution to index uniqueness problem is to not cache overflow
103  * pages. Instead, when page hash collision is detected, all overflow pages
104  * from emerging chain are immediately requested from the server and placed in
105  * a special data structure (struct ll_dir_chain). This data structure is used
106  * by ll_readdir() to process entries from overflow pages. When readdir
107  * invocation finishes, overflow pages are discarded. If page hash collision
108  * chain weren't completely processed, next call to readdir will again detect
109  * page hash collision, again read overflow pages in, process next portion of
110  * entries and again discard the pages. This is not as wasteful as it looks,
111  * because, given reasonable hash, page hash collisions are extremely rare.
112  *
113  * 1. directory positioning
114  *
115  * When seekdir(hash) is called, original
116  *
117  *
118  *
119  *
120  *
121  *
122  *
123  *
124  * Server.
125  *
126  * identification of and access to overflow pages
127  *
128  * page format
129  *
130  *
131  *
132  *
133  *
134  */
135
136 static __u32 hash_x_index(__u32 value)
137 {
138         return ((__u32)~0) - value;
139 }
140 #ifdef HAVE_PG_FS_MISC
141 #define PageChecked(page)        test_bit(PG_fs_misc, &(page)->flags)
142 #define SetPageChecked(page)     set_bit(PG_fs_misc, &(page)->flags)
143 #endif
144
145 /* returns the page unlocked, but with a reference */
146 static int ll_dir_readpage(struct file *file, struct page *page)
147 {
148         struct inode *inode = page->mapping->host;
149         struct ptlrpc_request *request;
150         struct mdt_body *body;
151         struct obd_capa *oc;
152         __u64 hash;
153         int rc;
154         ENTRY;
155
156         hash = hash_x_index(page->index);
157         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) off %lu\n",
158                inode->i_ino, inode->i_generation, inode, (unsigned long)hash);
159
160         oc = ll_mdscapa_get(inode);
161         rc = md_readpage(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode),
162                          oc, hash, page, &request);
163         capa_put(oc);
164         if (!rc) {
165                 body = lustre_msg_buf(request->rq_repmsg, REPLY_REC_OFF,
166                                       sizeof(*body));
167                 /* Checked by mdc_readpage() */
168                 LASSERT(body != NULL);
169
170                 /* Swabbed by mdc_readpage() */
171                 LASSERT_REPSWABBED(request, REPLY_REC_OFF);
172
173                 if (body->valid & OBD_MD_FLSIZE)
174                         inode->i_size = body->size;
175                 SetPageUptodate(page);
176         }
177         ptlrpc_req_finished(request);
178
179         unlock_page(page);
180         EXIT;
181         return rc;
182 }
183
184 struct address_space_operations ll_dir_aops = {
185         .readpage  = ll_dir_readpage,
186 };
187
188 static inline unsigned long dir_pages(struct inode *inode)
189 {
190         return (inode->i_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
191 }
192
193 static inline unsigned ll_chunk_size(struct inode *inode)
194 {
195         return inode->i_sb->s_blocksize;
196 }
197
198 static void ll_check_page(struct inode *dir, struct page *page)
199 {
200         /* XXX: check page format later */
201         SetPageChecked(page);
202 }
203
204 static inline void ll_put_page(struct page *page)
205 {
206         kunmap(page);
207         page_cache_release(page);
208 }
209
210 /*
211  * Find, kmap and return page that contains given hash.
212  */
213 static struct page *ll_dir_page_locate(struct inode *dir, unsigned long hash,
214                                        __u32 *start, __u32 *end)
215 {
216         struct address_space *mapping = dir->i_mapping;
217         /*
218          * Complement of hash is used as an index so that
219          * radix_tree_gang_lookup() can be used to find a page with starting
220          * hash _smaller_ than one we are looking for.
221          */
222         unsigned long offset = hash_x_index(hash);
223         struct page *page;
224         int found;
225
226         spin_lock_irq(&mapping->tree_lock);
227         found = radix_tree_gang_lookup(&mapping->page_tree,
228                                        (void **)&page, offset, 1);
229         if (found > 0) {
230                 struct lu_dirpage *dp;
231
232                 page_cache_get(page);
233                 spin_unlock_irq(&mapping->tree_lock);
234                 /*
235                  * In contrast to find_lock_page() we are sure that directory
236                  * page cannot be truncated (while DLM lock is held) and,
237                  * hence, can avoid restart.
238                  *
239                  * In fact, page cannot be locked here at all, because
240                  * ll_dir_readpage() does synchronous io.
241                  */
242                 wait_on_page(page);
243                 if (PageUptodate(page)) {
244                         dp = kmap(page);
245                         *start = le32_to_cpu(dp->ldp_hash_start);
246                         *end   = le32_to_cpu(dp->ldp_hash_end);
247                         LASSERT(*start <= hash);
248                         if (hash > *end || (*end != *start && hash == *end)) {
249                                 kunmap(page);
250                                 lock_page(page);
251                                 ll_truncate_complete_page(page);
252                                 unlock_page(page);
253                                 page_cache_release(page);
254                                 page = NULL;
255                         }
256                 } else {
257                         page_cache_release(page);
258                         page = ERR_PTR(-EIO);
259                 }
260
261         } else {
262                 spin_unlock_irq(&mapping->tree_lock);
263                 page = NULL;
264         }
265         return page;
266 }
267
268 /*
269  * Chain of hash overflow pages.
270  */
271 struct ll_dir_chain {
272         /* XXX something. Later */
273 };
274
275 static void ll_dir_chain_init(struct ll_dir_chain *chain)
276 {
277 }
278
279 static void ll_dir_chain_fini(struct ll_dir_chain *chain)
280 {
281 }
282
283 static struct page *ll_get_dir_page(struct inode *dir, __u32 hash, int exact,
284                                     struct ll_dir_chain *chain)
285 {
286         ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
287         struct address_space *mapping = dir->i_mapping;
288         struct lustre_handle lockh;
289         struct lu_dirpage *dp;
290         struct page *page;
291         ldlm_mode_t mode;
292         int rc;
293         __u32 start;
294         __u32 end;
295
296         mode = LCK_PR;
297         rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
298                            ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
299         if (!rc) {
300                 struct lookup_intent it = { .it_op = IT_READDIR };
301                 struct ptlrpc_request *request;
302                 struct md_op_data *op_data;
303
304                 op_data = ll_prep_md_op_data(NULL, dir, NULL, NULL, 0, 0, 
305                                              LUSTRE_OPC_ANY);
306                 if (IS_ERR(op_data))
307                         return (void *)op_data;
308
309                 rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, LDLM_IBITS, &it,
310                                 mode, op_data, &lockh, NULL, 0,
311                                 ldlm_completion_ast, ll_md_blocking_ast, dir,
312                                 0);
313
314                 ll_finish_md_op_data(op_data);
315
316                 request = (struct ptlrpc_request *)it.d.lustre.it_data;
317                 if (request)
318                         ptlrpc_req_finished(request);
319                 if (rc < 0) {
320                         CERROR("lock enqueue: rc: %d\n", rc);
321                         return ERR_PTR(rc);
322                 }
323         } else {
324                 /* for cross-ref object, l_ast_data of the lock may not be set,
325                  * we reset it here */
326                 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie, dir);
327         }
328         ldlm_lock_dump_handle(D_OTHER, &lockh);
329
330         page = ll_dir_page_locate(dir, hash, &start, &end);
331         if (IS_ERR(page))
332                 GOTO(out_unlock, page);
333
334         if (page != NULL) {
335                 /*
336                  * XXX nikita: not entirely correct handling of a corner case:
337                  * suppose hash chain of entries with hash value HASH crosses
338                  * border between pages P0 and P1. First both P0 and P1 are
339                  * cached, seekdir() is called for some entry from the P0 part
340                  * of the chain. Later P0 goes out of cache. telldir(HASH)
341                  * happens and finds P1, as it starts with matching hash
342                  * value. Remaining entries from P0 part of the chain are
343                  * skipped. (Is that really a bug?)
344                  *
345                  * Possible solutions: 0. don't cache P1 is such case, handle
346                  * it as an "overflow" page. 1. invalidate all pages at
347                  * once. 2. use HASH|1 as an index for P1.
348                  */
349                 if (exact && hash != start) {
350                         /*
351                          * readdir asked for a page starting _exactly_ from
352                          * given hash, but cache contains stale page, with
353                          * entries with smaller hash values. Stale page should
354                          * be invalidated, and new one fetched.
355                          */
356                         CWARN("Stale readpage page %p: %#lx != %#lx\n", page,
357                               (unsigned long)hash, (unsigned long)start);
358                         lock_page(page);
359                         ll_truncate_complete_page(page);
360                         unlock_page(page);
361                         page_cache_release(page);
362                 } else
363                         GOTO(hash_collision, page);
364         }
365
366         page = read_cache_page(mapping, hash_x_index(hash),
367                                (filler_t*)mapping->a_ops->readpage, NULL);
368         if (IS_ERR(page))
369                 GOTO(out_unlock, page);
370
371         wait_on_page(page);
372         (void)kmap(page);
373         if (!PageUptodate(page))
374                 goto fail;
375         if (!PageChecked(page))
376                 ll_check_page(dir, page);
377         if (PageError(page))
378                 goto fail;
379 hash_collision:
380         dp = page_address(page);
381
382         start = le32_to_cpu(dp->ldp_hash_start);
383         end   = le32_to_cpu(dp->ldp_hash_end);
384         if (end == start) {
385                 LASSERT(start == hash);
386                 CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end);
387                 /*
388                  * Fetch whole overflow chain...
389                  *
390                  * XXX not yet.
391                  */
392                 goto fail;
393         }
394 out_unlock:
395         ldlm_lock_decref(&lockh, mode);
396         return page;
397
398 fail:
399         ll_put_page(page);
400         page = ERR_PTR(-EIO);
401         goto out_unlock;
402 }
403
404 int ll_readdir(struct file *filp, void *cookie, filldir_t filldir)
405 {
406         struct inode         *inode = filp->f_dentry->d_inode;
407         struct ll_inode_info *info  = ll_i2info(inode);
408         struct ll_sb_info    *sbi   = ll_i2sbi(inode);
409         __u32                 pos   = filp->f_pos;
410         struct page          *page;
411         struct ll_dir_chain   chain;
412         int rc;
413         int done;
414         int shift;
415         ENTRY;
416
417         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu\n",
418                inode->i_ino, inode->i_generation, inode,
419                (unsigned long)pos, inode->i_size);
420
421         if (pos == DIR_END_OFF)
422                 /*
423                  * end-of-file.
424                  */
425                 RETURN(0);
426
427         rc    = 0;
428         done  = 0;
429         shift = 0;
430         ll_dir_chain_init(&chain);
431
432         page = ll_get_dir_page(inode, pos, 0, &chain);
433
434         while (rc == 0 && !done) {
435                 struct lu_dirpage *dp;
436                 struct lu_dirent  *ent;
437
438                 if (!IS_ERR(page)) {
439                         /* 
440                          * If page is empty (end of directoryis reached),
441                          * use this value. 
442                          */
443                         __u32 hash = DIR_END_OFF; 
444                         __u32 next;
445
446                         dp = page_address(page);
447                         for (ent = lu_dirent_start(dp); ent != NULL && !done;
448                              ent = lu_dirent_next(ent)) {
449                                 char          *name;
450                                 int            namelen;
451                                 struct lu_fid  fid;
452                                 ino_t          ino;
453
454                                 /*
455                                  * XXX: implement correct swabbing here.
456                                  */
457
458                                 hash    = le32_to_cpu(ent->lde_hash);
459                                 namelen = le16_to_cpu(ent->lde_namelen);
460
461                                 if (hash < pos)
462                                         /*
463                                          * Skip until we find target hash
464                                          * value.
465                                          */
466                                         continue;
467
468                                 if (namelen == 0)
469                                         /*
470                                          * Skip dummy record.
471                                          */
472                                         continue;
473
474                                 fid  = ent->lde_fid;
475                                 name = ent->lde_name;
476                                 fid_le_to_cpu(&fid, &fid);
477                                 ino  = ll_fid_build_ino(sbi, &fid);
478
479                                 done = filldir(cookie, name, namelen,
480                                                (loff_t)hash, ino, DT_UNKNOWN);
481                         }
482                         next = le32_to_cpu(dp->ldp_hash_end);
483                         ll_put_page(page);
484                         if (!done) {
485                                 pos = next;
486                                 if (pos == DIR_END_OFF)
487                                         /*
488                                          * End of directory reached.
489                                          */
490                                         done = 1;
491                                 else if (1 /* chain is exhausted*/)
492                                         /*
493                                          * Normal case: continue to the next
494                                          * page.
495                                          */
496                                         page = ll_get_dir_page(inode, pos, 1,
497                                                                &chain);
498                                 else {
499                                         /*
500                                          * go into overflow page.
501                                          */
502                                 }
503                         } else
504                                 pos = hash;
505                 } else {
506                         rc = PTR_ERR(page);
507                         CERROR("error reading dir "DFID" at %lu: rc %d\n",
508                                PFID(&info->lli_fid), (unsigned long)pos, rc);
509                 }
510         }
511
512         filp->f_pos = (loff_t)(__s32)pos;
513         filp->f_version = inode->i_version;
514         touch_atime(filp->f_vfsmnt, filp->f_dentry);
515
516         ll_dir_chain_fini(&chain);
517
518         RETURN(rc);
519 }
520
521 #define QCTL_COPY(out, in)              \
522 do {                                    \
523         Q_COPY(out, in, qc_cmd);        \
524         Q_COPY(out, in, qc_type);       \
525         Q_COPY(out, in, qc_id);         \
526         Q_COPY(out, in, qc_stat);       \
527         Q_COPY(out, in, qc_dqinfo);     \
528         Q_COPY(out, in, qc_dqblk);      \
529 } while (0)
530
531 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump)
532 {
533         struct ll_sb_info *sbi = ll_i2sbi(inode);
534         struct md_op_data *op_data;
535         struct ptlrpc_request *req = NULL;
536         int rc = 0;
537
538         /*
539          * This is coming from userspace, so should be in
540          * local endian.  But the MDS would like it in little
541          * endian, so we swab it before we send it.
542          */
543         if (lump->lmm_magic != LOV_USER_MAGIC)
544                 RETURN(-EINVAL);
545
546         if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC))
547                 lustre_swab_lov_user_md(lump);
548
549         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
550                                      LUSTRE_OPC_ANY);
551         if (IS_ERR(op_data))
552                 RETURN(PTR_ERR(op_data));
553
554         /* swabbing is done in lov_setstripe() on server side */
555         rc = md_setattr(sbi->ll_md_exp, op_data, lump, sizeof(*lump),
556                         NULL, 0, &req);
557         ll_finish_md_op_data(op_data);
558         ptlrpc_req_finished(req);
559         if (rc) {
560                 if (rc != -EPERM && rc != -EACCES)
561                         CERROR("mdc_setattr fails: rc = %d\n", rc);
562         }
563         return rc;
564
565 }
566
567 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, 
568                      int *lmm_size, struct ptlrpc_request **request) 
569 {
570         struct ll_sb_info *sbi = ll_i2sbi(inode);
571         struct mdt_body   *body;
572         struct lov_mds_md *lmm = NULL;
573         struct ptlrpc_request *req = NULL;
574         int rc, lmmsize;
575         struct obd_capa *oc;
576         
577         rc = ll_get_max_mdsize(sbi, &lmmsize);
578         if (rc)
579                 RETURN(rc);
580
581         oc = ll_mdscapa_get(inode);
582         rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode),
583                         oc, OBD_MD_FLEASIZE | OBD_MD_FLDIREA,
584                         lmmsize, &req);
585         capa_put(oc);
586         if (rc < 0) {
587                 CDEBUG(D_INFO, "md_getattr failed on inode "
588                        "%lu/%u: rc %d\n", inode->i_ino,
589                        inode->i_generation, rc);
590                 GOTO(out, rc);
591         }
592
593         body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
594         LASSERT(body != NULL); /* checked by md_getattr_name */
595         /* swabbed by mdc_getattr_name */
596         LASSERT_REPSWABBED(req, REPLY_REC_OFF);
597
598         lmmsize = body->eadatasize;
599
600         if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
601             lmmsize == 0) {
602                 GOTO(out, rc = -ENODATA);
603         }
604
605         lmm = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF + 1, lmmsize);
606         LASSERT(lmm != NULL);
607         LASSERT_REPSWABBED(req, REPLY_REC_OFF + 1);
608
609         /*
610          * This is coming from the MDS, so is probably in
611          * little endian.  We convert it to host endian before
612          * passing it to userspace.
613          */
614         if (lmm->lmm_magic == __swab32(LOV_MAGIC)) {
615                 lustre_swab_lov_user_md((struct lov_user_md *)lmm);
616                 lustre_swab_lov_user_md_objects((struct lov_user_md *)lmm);
617         }
618 out:
619         *lmmp = lmm;
620         *lmm_size = lmmsize;
621         *request = req;
622         return rc;
623 }
624
625 static int ll_dir_ioctl(struct inode *inode, struct file *file,
626                         unsigned int cmd, unsigned long arg)
627 {
628         struct ll_sb_info *sbi = ll_i2sbi(inode);
629         struct obd_ioctl_data *data;
630         ENTRY;
631
632         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
633                inode->i_ino, inode->i_generation, inode, cmd);
634
635         /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
636         if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
637                 return -ENOTTY;
638
639         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
640         switch(cmd) {
641         case EXT3_IOC_GETFLAGS:
642         case EXT3_IOC_SETFLAGS:
643                 RETURN(ll_iocontrol(inode, file, cmd, arg));
644         case EXT3_IOC_GETVERSION_OLD:
645         case EXT3_IOC_GETVERSION:
646                 RETURN(put_user(inode->i_generation, (int *)arg));
647         /* We need to special case any other ioctls we want to handle,
648          * to send them to the MDS/OST as appropriate and to properly
649          * network encode the arg field.
650         case EXT3_IOC_SETVERSION_OLD:
651         case EXT3_IOC_SETVERSION:
652         */
653         case IOC_MDC_LOOKUP: {
654                 struct ptlrpc_request *request = NULL;
655                 int namelen, rc, len = 0;
656                 char *buf = NULL;
657                 char *filename;
658                 struct obd_capa *oc;
659
660                 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
661                 if (rc)
662                         RETURN(rc);
663                 data = (void *)buf;
664
665                 filename = data->ioc_inlbuf1;
666                 namelen = data->ioc_inllen1;
667
668                 if (namelen < 1) {
669                         CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
670                         GOTO(out, rc = -EINVAL);
671                 }
672
673                 oc = ll_mdscapa_get(inode);
674                 rc = md_getattr_name(sbi->ll_md_exp, ll_inode2fid(inode), oc,
675                                      filename, namelen, OBD_MD_FLID, 0,
676                                      &request);
677                 capa_put(oc);
678                 if (rc < 0) {
679                         CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
680                         GOTO(out, rc);
681                 }
682
683                 ptlrpc_req_finished(request);
684
685                 EXIT;
686         out:
687                 obd_ioctl_freedata(buf, len);
688                 return rc;
689         }
690         case LL_IOC_LOV_SETSTRIPE: {
691                 struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
692                 int rc = 0;
693
694                 LASSERT(sizeof(lum) == sizeof(*lump));
695                 LASSERT(sizeof(lum.lmm_objects[0]) ==
696                         sizeof(lump->lmm_objects[0]));
697                 rc = copy_from_user(&lum, lump, sizeof(lum));
698                 if (rc)
699                         RETURN(-EFAULT);
700
701                 rc = ll_dir_setstripe(inode, &lum);
702
703                 RETURN(rc);
704         }
705         case LL_IOC_OBD_STATFS:
706                 RETURN(ll_obd_statfs(inode, (void *)arg));
707         case LL_IOC_LOV_GETSTRIPE:
708         case LL_IOC_MDC_GETINFO:
709         case IOC_MDC_GETFILEINFO:
710         case IOC_MDC_GETFILESTRIPE: {
711                 struct ptlrpc_request *request = NULL;
712                 struct lov_user_md *lump;
713                 struct lov_mds_md *lmm = NULL;
714                 struct mdt_body *body;
715                 char *filename = NULL;
716                 int rc, lmmsize;
717
718                 if (cmd == IOC_MDC_GETFILEINFO ||
719                     cmd == IOC_MDC_GETFILESTRIPE) {
720                         filename = getname((const char *)arg);
721                         if (IS_ERR(filename))
722                                 RETURN(PTR_ERR(filename));
723
724                         rc = ll_lov_getstripe_ea_info(inode, filename, &lmm, 
725                                                       &lmmsize, &request);
726                 } else {
727                         rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
728                 }
729
730                 if (request) {
731                         body = lustre_msg_buf(request->rq_repmsg,
732                                               REPLY_REC_OFF, sizeof(*body));
733                         LASSERT(body != NULL); /* checked by md_getattr_name */
734                         /* swabbed by md_getattr_name */
735                         LASSERT_REPSWABBED(request, REPLY_REC_OFF);
736                 } else {
737                         GOTO(out_req, rc);
738                 }
739
740                 if (rc < 0) {
741                         if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO || 
742                                                cmd == LL_IOC_MDC_GETINFO))
743                                 GOTO(skip_lmm, rc = 0);
744                         else
745                                 GOTO(out_req, rc);
746                 }
747
748                 if (cmd == IOC_MDC_GETFILESTRIPE ||
749                     cmd == LL_IOC_LOV_GETSTRIPE) {
750                         lump = (struct lov_user_md *)arg;
751                 } else {
752                         struct lov_user_mds_data *lmdp;
753                         lmdp = (struct lov_user_mds_data *)arg;
754                         lump = &lmdp->lmd_lmm;
755                 }
756                 rc = copy_to_user(lump, lmm, lmmsize);
757                 if (rc)
758                         GOTO(out_lmm, rc = -EFAULT);
759         skip_lmm:
760                 if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
761                         struct lov_user_mds_data *lmdp;
762                         lstat_t st = { 0 };
763
764                         st.st_dev     = inode->i_sb->s_dev;
765                         st.st_mode    = body->mode;
766                         st.st_nlink   = body->nlink;
767                         st.st_uid     = body->uid;
768                         st.st_gid     = body->gid;
769                         st.st_rdev    = body->rdev;
770                         st.st_size    = body->size;
771                         st.st_blksize = CFS_PAGE_SIZE;
772                         st.st_blocks  = body->blocks;
773                         st.st_atime   = body->atime;
774                         st.st_mtime   = body->mtime;
775                         st.st_ctime   = body->ctime;
776                         st.st_ino     = inode->i_ino;
777
778                         lmdp = (struct lov_user_mds_data *)arg;
779                         rc = copy_to_user(&lmdp->lmd_st, &st, sizeof(st));
780                         if (rc)
781                                 GOTO(out_lmm, rc = -EFAULT);
782                 }
783
784                 EXIT;
785         out_lmm:
786                 if (lmm && lmm->lmm_magic == LOV_MAGIC_JOIN)
787                         OBD_FREE(lmm, lmmsize);
788         out_req:
789                 ptlrpc_req_finished(request);
790                 if (filename)
791                         putname(filename);
792                 return rc;
793         }
794         case IOC_LOV_GETINFO: {
795                 struct lov_user_mds_data *lumd;
796                 struct lov_stripe_md *lsm;
797                 struct lov_user_md *lum;
798                 struct lov_mds_md *lmm;
799                 int lmmsize;
800                 lstat_t st;
801                 int rc;
802
803                 lumd = (struct lov_user_mds_data *)arg;
804                 lum = &lumd->lmd_lmm;
805
806                 rc = ll_get_max_mdsize(sbi, &lmmsize);
807                 if (rc)
808                         RETURN(rc);
809
810                 OBD_ALLOC(lmm, lmmsize);
811                 rc = copy_from_user(lmm, lum, lmmsize);
812                 if (rc)
813                         GOTO(free_lmm, rc = -EFAULT);
814
815                 rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
816                 if (rc < 0)
817                         GOTO(free_lmm, rc = -ENOMEM);
818
819                 rc = obd_checkmd(sbi->ll_dt_exp, sbi->ll_md_exp, lsm);
820                 if (rc)
821                         GOTO(free_lsm, rc);
822
823                 /* Perform glimpse_size operation. */
824                 memset(&st, 0, sizeof(st));
825
826                 rc = ll_glimpse_ioctl(sbi, lsm, &st);
827                 if (rc)
828                         GOTO(free_lsm, rc);
829
830                 rc = copy_to_user(&lumd->lmd_st, &st, sizeof(st));
831                 if (rc)
832                         GOTO(free_lsm, rc = -EFAULT);
833
834                 EXIT;
835         free_lsm:
836                 obd_free_memmd(sbi->ll_dt_exp, &lsm);
837         free_lmm:
838                 OBD_FREE(lmm, lmmsize);
839                 return rc;
840         }
841         case OBD_IOC_LLOG_CATINFO: {
842                 struct ptlrpc_request *req = NULL;
843                 char *buf = NULL;
844                 int rc, len = 0;
845                 char *bufs[3] = { NULL }, *str;
846                 int lens[3] = { sizeof(struct ptlrpc_body) };
847                 int size[2] = { sizeof(struct ptlrpc_body) };
848
849                 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
850                 if (rc)
851                         RETURN(rc);
852                 data = (void *)buf;
853
854                 if (!data->ioc_inlbuf1) {
855                         obd_ioctl_freedata(buf, len);
856                         RETURN(-EINVAL);
857                 }
858
859                 lens[REQ_REC_OFF] = data->ioc_inllen1;
860                 bufs[REQ_REC_OFF] = data->ioc_inlbuf1;
861                 if (data->ioc_inllen2) {
862                         lens[REQ_REC_OFF + 1] = data->ioc_inllen2;
863                         bufs[REQ_REC_OFF + 1] = data->ioc_inlbuf2;
864                 } else {
865                         lens[REQ_REC_OFF + 1] = 0;
866                         bufs[REQ_REC_OFF + 1] = NULL;
867                 }
868
869                 req = ptlrpc_prep_req(sbi2mdc(sbi)->cl_import,
870                                       LUSTRE_LOG_VERSION, LLOG_CATINFO, 3, lens,
871                                       bufs);
872                 if (!req)
873                         GOTO(out_catinfo, rc = -ENOMEM);
874
875                 size[REPLY_REC_OFF] = data->ioc_plen1;
876                 ptlrpc_req_set_repsize(req, 2, size);
877
878                 rc = ptlrpc_queue_wait(req);
879                 if (!rc) {
880                         str = lustre_msg_string(req->rq_repmsg, REPLY_REC_OFF,
881                                                 data->ioc_plen1);
882                         rc = copy_to_user(data->ioc_pbuf1, str, data->ioc_plen1);
883                 }
884                 ptlrpc_req_finished(req);
885         out_catinfo:
886                 obd_ioctl_freedata(buf, len);
887                 RETURN(rc);
888         }
889         case OBD_IOC_QUOTACHECK: {
890                 struct obd_quotactl *oqctl;
891                 int rc, error = 0;
892
893                 if (!capable(CAP_SYS_ADMIN))
894                         RETURN(-EPERM);
895
896                 OBD_ALLOC_PTR(oqctl);
897                 if (!oqctl)
898                         RETURN(-ENOMEM);
899                 oqctl->qc_type = arg;
900                 rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
901                 if (rc < 0) {
902                         CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
903                         error = rc;
904                 }
905
906                 rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
907                 if (rc < 0)
908                         CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
909
910                 OBD_FREE_PTR(oqctl);
911                 return error ?: rc;
912         }
913         case OBD_IOC_POLL_QUOTACHECK: {
914                 struct if_quotacheck *check;
915                 int rc;
916
917                 if (!capable(CAP_SYS_ADMIN))
918                         RETURN(-EPERM);
919
920                 OBD_ALLOC_PTR(check);
921                 if (!check)
922                         RETURN(-ENOMEM);
923
924                 rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
925                                    NULL);
926                 if (rc) {
927                         CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
928                         if (copy_to_user((void *)arg, check, sizeof(*check)))
929                                 rc = -EFAULT;
930                         GOTO(out_poll, rc);
931                 }
932
933                 rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
934                                    NULL);
935                 if (rc) {
936                         CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
937                         if (copy_to_user((void *)arg, check, sizeof(*check)))
938                                 rc = -EFAULT;
939                         GOTO(out_poll, rc);
940                 }
941         out_poll:
942                 OBD_FREE_PTR(check);
943                 RETURN(rc);
944         }
945 #ifdef HAVE_QUOTA_SUPPORT
946         case OBD_IOC_QUOTACTL: {
947                 struct if_quotactl *qctl;
948                 struct obd_quotactl *oqctl;
949
950                 int cmd, type, id, rc = 0;
951
952                 OBD_ALLOC_PTR(qctl);
953                 if (!qctl)
954                         RETURN(-ENOMEM);
955
956                 OBD_ALLOC_PTR(oqctl);
957                 if (!oqctl) {
958                         OBD_FREE_PTR(qctl);
959                         RETURN(-ENOMEM);
960                 }
961                 if (copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
962                         GOTO(out_quotactl, rc = -EFAULT);
963
964                 cmd = qctl->qc_cmd;
965                 type = qctl->qc_type;
966                 id = qctl->qc_id;
967                 switch (cmd) {
968                 case Q_QUOTAON:
969                 case Q_QUOTAOFF:
970                 case Q_SETQUOTA:
971                 case Q_SETINFO:
972                         if (!capable(CAP_SYS_ADMIN))
973                                 GOTO(out_quotactl, rc = -EPERM);
974                         break;
975                 case Q_GETQUOTA:
976                         if (((type == USRQUOTA && current->euid != id) ||
977                              (type == GRPQUOTA && !in_egroup_p(id))) &&
978                             !capable(CAP_SYS_ADMIN))
979                                 GOTO(out_quotactl, rc = -EPERM);
980
981                         /* XXX: dqb_valid is borrowed as a flag to mark that
982                          *      only mds quota is wanted */
983                         if (qctl->qc_dqblk.dqb_valid)
984                                 qctl->obd_uuid = sbi->ll_md_exp->exp_obd->
985                                                         u.cli.cl_target_uuid;
986                         break;
987                 case Q_GETINFO:
988                         break;
989                 default:
990                         CERROR("unsupported quotactl op: %#x\n", cmd);
991                         GOTO(out_quotactl, rc = -ENOTTY);
992                 }
993
994                 QCTL_COPY(oqctl, qctl);
995
996                 if (qctl->obd_uuid.uuid[0]) {
997                         struct obd_device *obd;
998                         struct obd_uuid *uuid = &qctl->obd_uuid;
999
1000                         obd = class_find_client_notype(uuid,
1001                                          &sbi->ll_dt_exp->exp_obd->obd_uuid);
1002                         if (!obd)
1003                                 GOTO(out_quotactl, rc = -ENOENT);
1004
1005                         if (cmd == Q_GETINFO)
1006                                 oqctl->qc_cmd = Q_GETOINFO;
1007                         else if (cmd == Q_GETQUOTA)
1008                                 oqctl->qc_cmd = Q_GETOQUOTA;
1009                         else
1010                                 GOTO(out_quotactl, rc = -EINVAL);
1011
1012                         if (sbi->ll_md_exp->exp_obd == obd) {
1013                                 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1014                         } else {
1015                                 int i;
1016                                 struct obd_export *exp;
1017                                 struct lov_obd *lov = &sbi->ll_dt_exp->
1018                                                             exp_obd->u.lov;
1019
1020                                 for (i = 0; i < lov->desc.ld_tgt_count; i++) {
1021                                         if (!lov->lov_tgts[i] ||
1022                                             !lov->lov_tgts[i]->ltd_active)
1023                                                 continue;
1024                                         exp = lov->lov_tgts[i]->ltd_exp;
1025                                         if (exp->exp_obd == obd) {
1026                                                 rc = obd_quotactl(exp, oqctl);
1027                                                 break;
1028                                         }
1029                                 }
1030                         }
1031
1032                         oqctl->qc_cmd = cmd;
1033                         QCTL_COPY(qctl, oqctl);
1034
1035                         if (copy_to_user((void *)arg, qctl, sizeof(*qctl)))
1036                                 rc = -EFAULT;
1037
1038                         GOTO(out_quotactl, rc);
1039                 }
1040
1041                 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1042                 if (rc && rc != -EBUSY && cmd == Q_QUOTAON) {
1043                         oqctl->qc_cmd = Q_QUOTAOFF;
1044                         obd_quotactl(sbi->ll_md_exp, oqctl);
1045                 }
1046
1047                 QCTL_COPY(qctl, oqctl);
1048
1049                 if (copy_to_user((void *)arg, qctl, sizeof(*qctl)))
1050                         rc = -EFAULT;
1051         out_quotactl:
1052                 OBD_FREE_PTR(qctl);
1053                 OBD_FREE_PTR(oqctl);
1054                 RETURN(rc);
1055         }
1056 #endif /* HAVE_QUOTA_SUPPORT */
1057         case OBD_IOC_GETNAME: {
1058                 struct obd_device *obd = class_exp2obd(sbi->ll_dt_exp);
1059                 if (!obd)
1060                         RETURN(-EFAULT);
1061                 if (copy_to_user((void *)arg, obd->obd_name,
1062                                 strlen(obd->obd_name) + 1))
1063                         RETURN (-EFAULT);
1064                 RETURN(0);
1065         }
1066         case LL_IOC_FLUSHCTX:
1067                 RETURN(ll_flush_ctx(inode));
1068         case LL_IOC_GETFACL: {
1069                 struct rmtacl_ioctl_data ioc;
1070
1071                 if (copy_from_user(&ioc, (void *)arg, sizeof(ioc)))
1072                         RETURN(-EFAULT);
1073
1074                 RETURN(ll_ioctl_getfacl(inode, &ioc));
1075         }
1076         case LL_IOC_SETFACL: {
1077                 struct rmtacl_ioctl_data ioc;
1078
1079                 if (copy_from_user(&ioc, (void *)arg, sizeof(ioc)))
1080                         RETURN(-EFAULT);
1081
1082                 RETURN(ll_ioctl_setfacl(inode, &ioc));
1083         }
1084         default:
1085                 RETURN(obd_iocontrol(cmd, sbi->ll_dt_exp,0,NULL,(void *)arg));
1086         }
1087 }
1088
1089 int ll_dir_open(struct inode *inode, struct file *file)
1090 {
1091         ENTRY;
1092         RETURN(ll_file_open(inode, file));
1093 }
1094
1095 int ll_dir_release(struct inode *inode, struct file *file)
1096 {
1097         ENTRY;
1098         RETURN(ll_file_release(inode, file));
1099 }
1100
1101 struct file_operations ll_dir_operations = {
1102         .open     = ll_dir_open,
1103         .release  = ll_dir_release,
1104         .read     = generic_read_dir,
1105         .readdir  = ll_readdir,
1106         .ioctl    = ll_dir_ioctl
1107 };
1108