Whamcloud - gitweb
LU-2446 build: Update Whamcloud copyright messages for Intel
[fs/lustre-release.git] / lustre / llite / dir.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/llite/dir.c
37  *
38  * Directory code for lustre client.
39  */
40
41 #include <linux/fs.h>
42 #include <linux/pagemap.h>
43 #include <linux/mm.h>
44 #include <linux/version.h>
45 #include <asm/uaccess.h>
46 #include <linux/buffer_head.h>   // for wait_on_buffer
47 #include <linux/pagevec.h>
48
49 #define DEBUG_SUBSYSTEM S_LLITE
50
51 #include <lustre/lustre_idl.h>
52 #include <obd_support.h>
53 #include <obd_class.h>
54 #include <lustre_lib.h>
55 #include <lustre/lustre_idl.h>
56 #include <lustre_lite.h>
57 #include <lustre_dlm.h>
58 #include <lustre_fid.h>
59 #include "llite_internal.h"
60
61 /*
62  * (new) readdir implementation overview.
63  *
64  * Original lustre readdir implementation cached exact copy of raw directory
65  * pages on the client. These pages were indexed in client page cache by
66  * logical offset in the directory file. This design, while very simple and
67  * intuitive had some inherent problems:
68  *
69  *     . it implies that byte offset to the directory entry serves as a
70  *     telldir(3)/seekdir(3) cookie, but that offset is not stable: in
71  *     ext3/htree directory entries may move due to splits, and more
72  *     importantly,
73  *
74  *     . it is incompatible with the design of split directories for cmd3,
75  *     that assumes that names are distributed across nodes based on their
76  *     hash, and so readdir should be done in hash order.
77  *
78  * New readdir implementation does readdir in hash order, and uses hash of a
79  * file name as a telldir/seekdir cookie. This led to number of complications:
80  *
81  *     . hash is not unique, so it cannot be used to index cached directory
82  *     pages on the client (note, that it requires a whole pageful of hash
83  *     collided entries to cause two pages to have identical hashes);
84  *
85  *     . hash is not unique, so it cannot, strictly speaking, be used as an
86  *     entry cookie. ext3/htree has the same problem and lustre implementation
87  *     mimics their solution: seekdir(hash) positions directory at the first
88  *     entry with the given hash.
89  *
90  * Client side.
91  *
92  * 0. caching
93  *
94  * Client caches directory pages using hash of the first entry as an index. As
95  * noted above hash is not unique, so this solution doesn't work as is:
96  * special processing is needed for "page hash chains" (i.e., sequences of
97  * pages filled with entries all having the same hash value).
98  *
99  * First, such chains have to be detected. To this end, server returns to the
100  * client the hash of the first entry on the page next to one returned. When
101  * client detects that this hash is the same as hash of the first entry on the
102  * returned page, page hash collision has to be handled. Pages in the
103  * hash chain, except first one, are termed "overflow pages".
104  *
105  * Solution to index uniqueness problem is to not cache overflow
106  * pages. Instead, when page hash collision is detected, all overflow pages
107  * from emerging chain are immediately requested from the server and placed in
108  * a special data structure (struct ll_dir_chain). This data structure is used
109  * by ll_readdir() to process entries from overflow pages. When readdir
110  * invocation finishes, overflow pages are discarded. If page hash collision
111  * chain weren't completely processed, next call to readdir will again detect
112  * page hash collision, again read overflow pages in, process next portion of
113  * entries and again discard the pages. This is not as wasteful as it looks,
114  * because, given reasonable hash, page hash collisions are extremely rare.
115  *
116  * 1. directory positioning
117  *
118  * When seekdir(hash) is called, original
119  *
120  *
121  *
122  *
123  *
124  *
125  *
126  *
127  * Server.
128  *
129  * identification of and access to overflow pages
130  *
131  * page format
132  *
133  * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
134  * a header lu_dirpage which describes the start/end hash, and whether this
135  * page is empty (contains no dir entry) or hash collide with next page.
136  * After client receives reply, several pages will be integrated into dir page
137  * in CFS_PAGE_SIZE (if CFS_PAGE_SIZE greater than LU_PAGE_SIZE), and the
138  * lu_dirpage for this integrated page will be adjusted.
139  *
140  */
141
142 /* returns the page unlocked, but with a reference */
143 static int ll_dir_filler(void *_hash, struct page *page0)
144 {
145         struct inode *inode = page0->mapping->host;
146         int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
147         struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
148         struct ptlrpc_request *request;
149         struct mdt_body *body;
150         struct md_op_data *op_data;
151         __u64 hash = *((__u64 *)_hash);
152         struct page **page_pool;
153         struct page *page;
154 #ifndef HAVE_ADD_TO_PAGE_CACHE_LRU
155         struct pagevec lru_pvec;
156 #endif
157         struct lu_dirpage *dp;
158         int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> CFS_PAGE_SHIFT;
159         int nrdpgs = 0; /* number of pages read actually */
160         int npages;
161         int i;
162         int rc;
163         ENTRY;
164
165         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash "LPU64"\n",
166                inode->i_ino, inode->i_generation, inode, hash);
167
168         LASSERT(max_pages > 0 && max_pages <= PTLRPC_MAX_BRW_PAGES);
169
170         OBD_ALLOC(page_pool, sizeof(page) * max_pages);
171         if (page_pool != NULL) {
172                 page_pool[0] = page0;
173         } else {
174                 page_pool = &page0;
175                 max_pages = 1;
176         }
177         for (npages = 1; npages < max_pages; npages++) {
178                 page = page_cache_alloc_cold(inode->i_mapping);
179                 if (!page)
180                         break;
181                 page_pool[npages] = page;
182         }
183
184         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
185                                      LUSTRE_OPC_ANY, NULL);
186         op_data->op_npages = npages;
187         op_data->op_offset = hash;
188         rc = md_readpage(exp, op_data, page_pool, &request);
189         ll_finish_md_op_data(op_data);
190         if (rc == 0) {
191                 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
192                 /* Checked by mdc_readpage() */
193                 LASSERT(body != NULL);
194
195                 if (body->valid & OBD_MD_FLSIZE)
196                         cl_isize_write(inode, body->size);
197
198                 nrdpgs = (request->rq_bulk->bd_nob_transferred+CFS_PAGE_SIZE-1)
199                          >> CFS_PAGE_SHIFT;
200                 SetPageUptodate(page0);
201         }
202         unlock_page(page0);
203         ptlrpc_req_finished(request);
204
205         CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
206
207         ll_pagevec_init(&lru_pvec, 0);
208         for (i = 1; i < npages; i++) {
209                 unsigned long offset;
210                 int ret;
211
212                 page = page_pool[i];
213
214                 if (rc < 0 || i >= nrdpgs) {
215                         page_cache_release(page);
216                         continue;
217                 }
218
219                 SetPageUptodate(page);
220
221                 dp = cfs_kmap(page);
222                 hash = le64_to_cpu(dp->ldp_hash_start);
223                 cfs_kunmap(page);
224
225                 offset = hash_x_index(hash, hash64);
226
227                 prefetchw(&page->flags);
228                 ret = ll_add_to_page_cache_lru(page, inode->i_mapping, offset,
229                                                GFP_KERNEL);
230                 if (ret == 0) {
231                         unlock_page(page);
232                         if (ll_pagevec_add(&lru_pvec, page) == 0)
233                                 ll_pagevec_lru_add_file(&lru_pvec);
234                 } else {
235                         CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
236                                " %d\n", offset, ret);
237                 }
238                 page_cache_release(page);
239         }
240         ll_pagevec_lru_add_file(&lru_pvec);
241
242         if (page_pool != &page0)
243                 OBD_FREE(page_pool, sizeof(struct page *) * max_pages);
244         EXIT;
245         return rc;
246 }
247
248 static void ll_check_page(struct inode *dir, struct page *page)
249 {
250         /* XXX: check page format later */
251         SetPageChecked(page);
252 }
253
254 void ll_release_page(struct page *page, int remove)
255 {
256         kunmap(page);
257         if (remove) {
258                 lock_page(page);
259                 if (likely(page->mapping != NULL))
260                         truncate_complete_page(page->mapping, page);
261                 unlock_page(page);
262         }
263         page_cache_release(page);
264 }
265
266 /*
267  * Find, kmap and return page that contains given hash.
268  */
269 static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
270                                        __u64 *start, __u64 *end)
271 {
272         int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
273         struct address_space *mapping = dir->i_mapping;
274         /*
275          * Complement of hash is used as an index so that
276          * radix_tree_gang_lookup() can be used to find a page with starting
277          * hash _smaller_ than one we are looking for.
278          */
279         unsigned long offset = hash_x_index(*hash, hash64);
280         struct page *page;
281         int found;
282
283         TREE_READ_LOCK_IRQ(mapping);
284         found = radix_tree_gang_lookup(&mapping->page_tree,
285                                        (void **)&page, offset, 1);
286         if (found > 0) {
287                 struct lu_dirpage *dp;
288
289                 page_cache_get(page);
290                 TREE_READ_UNLOCK_IRQ(mapping);
291                 /*
292                  * In contrast to find_lock_page() we are sure that directory
293                  * page cannot be truncated (while DLM lock is held) and,
294                  * hence, can avoid restart.
295                  *
296                  * In fact, page cannot be locked here at all, because
297                  * ll_dir_filler() does synchronous io.
298                  */
299                 wait_on_page(page);
300                 if (PageUptodate(page)) {
301                         dp = cfs_kmap(page);
302                         if (BITS_PER_LONG == 32 && hash64) {
303                                 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
304                                 *end   = le64_to_cpu(dp->ldp_hash_end) >> 32;
305                                 *hash  = *hash >> 32;
306                         } else {
307                                 *start = le64_to_cpu(dp->ldp_hash_start);
308                                 *end   = le64_to_cpu(dp->ldp_hash_end);
309                         }
310                         LASSERTF(*start <= *hash, "start = "LPX64",end = "
311                                  LPX64",hash = "LPX64"\n", *start, *end, *hash);
312                         CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash "LPU64"\n",
313                                offset, *start, *end, *hash);
314                         if (*hash > *end) {
315                                 ll_release_page(page, 0);
316                                 page = NULL;
317                         } else if (*end != *start && *hash == *end) {
318                                 /*
319                                  * upon hash collision, remove this page,
320                                  * otherwise put page reference, and
321                                  * ll_get_dir_page() will issue RPC to fetch
322                                  * the page we want.
323                                  */
324                                 ll_release_page(page,
325                                     le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
326                                 page = NULL;
327                         }
328                 } else {
329                         page_cache_release(page);
330                         page = ERR_PTR(-EIO);
331                 }
332
333         } else {
334                 TREE_READ_UNLOCK_IRQ(mapping);
335                 page = NULL;
336         }
337         return page;
338 }
339
340 struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
341                              struct ll_dir_chain *chain)
342 {
343         ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
344         struct address_space *mapping = dir->i_mapping;
345         struct lustre_handle lockh;
346         struct lu_dirpage *dp;
347         struct page *page;
348         ldlm_mode_t mode;
349         int rc;
350         __u64 start = 0;
351         __u64 end = 0;
352         __u64 lhash = hash;
353         struct ll_inode_info *lli = ll_i2info(dir);
354         int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
355
356         mode = LCK_PR;
357         rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
358                            ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
359         if (!rc) {
360                 struct ldlm_enqueue_info einfo = { LDLM_IBITS, mode,
361                        ll_md_blocking_ast, ldlm_completion_ast,
362                        NULL, NULL, dir };
363                 struct lookup_intent it = { .it_op = IT_READDIR };
364                 struct ptlrpc_request *request;
365                 struct md_op_data *op_data;
366
367                 op_data = ll_prep_md_op_data(NULL, dir, NULL, NULL, 0, 0,
368                                              LUSTRE_OPC_ANY, NULL);
369                 if (IS_ERR(op_data))
370                         return (void *)op_data;
371
372                 rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
373                                 op_data, &lockh, NULL, 0, NULL, 0);
374
375                 ll_finish_md_op_data(op_data);
376
377                 request = (struct ptlrpc_request *)it.d.lustre.it_data;
378                 if (request)
379                         ptlrpc_req_finished(request);
380                 if (rc < 0) {
381                         CERROR("lock enqueue: "DFID" at "LPU64": rc %d\n",
382                                PFID(ll_inode2fid(dir)), hash, rc);
383                         return ERR_PTR(rc);
384                 }
385         } else {
386                 /* for cross-ref object, l_ast_data of the lock may not be set,
387                  * we reset it here */
388                 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
389                                  dir, NULL);
390         }
391         ldlm_lock_dump_handle(D_OTHER, &lockh);
392
393         mutex_lock(&lli->lli_readdir_mutex);
394         page = ll_dir_page_locate(dir, &lhash, &start, &end);
395         if (IS_ERR(page)) {
396                 CERROR("dir page locate: "DFID" at "LPU64": rc %ld\n",
397                        PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
398                 GOTO(out_unlock, page);
399         } else if (page != NULL) {
400                 /*
401                  * XXX nikita: not entirely correct handling of a corner case:
402                  * suppose hash chain of entries with hash value HASH crosses
403                  * border between pages P0 and P1. First both P0 and P1 are
404                  * cached, seekdir() is called for some entry from the P0 part
405                  * of the chain. Later P0 goes out of cache. telldir(HASH)
406                  * happens and finds P1, as it starts with matching hash
407                  * value. Remaining entries from P0 part of the chain are
408                  * skipped. (Is that really a bug?)
409                  *
410                  * Possible solutions: 0. don't cache P1 is such case, handle
411                  * it as an "overflow" page. 1. invalidate all pages at
412                  * once. 2. use HASH|1 as an index for P1.
413                  */
414                 GOTO(hash_collision, page);
415         }
416
417         page = read_cache_page(mapping, hash_x_index(hash, hash64),
418                                ll_dir_filler, &lhash);
419         if (IS_ERR(page)) {
420                 CERROR("read cache page: "DFID" at "LPU64": rc %ld\n",
421                        PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
422                 GOTO(out_unlock, page);
423         }
424
425         wait_on_page(page);
426         (void)kmap(page);
427         if (!PageUptodate(page)) {
428                 CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
429                        PFID(ll_inode2fid(dir)), hash, -5);
430                 goto fail;
431         }
432         if (!PageChecked(page))
433                 ll_check_page(dir, page);
434         if (PageError(page)) {
435                 CERROR("page error: "DFID" at "LPU64": rc %d\n",
436                        PFID(ll_inode2fid(dir)), hash, -5);
437                 goto fail;
438         }
439 hash_collision:
440         dp = page_address(page);
441         if (BITS_PER_LONG == 32 && hash64) {
442                 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
443                 end   = le64_to_cpu(dp->ldp_hash_end) >> 32;
444                 lhash = hash >> 32;
445         } else {
446                 start = le64_to_cpu(dp->ldp_hash_start);
447                 end   = le64_to_cpu(dp->ldp_hash_end);
448                 lhash = hash;
449         }
450         if (end == start) {
451                 LASSERT(start == lhash);
452                 CWARN("Page-wide hash collision: "LPU64"\n", end);
453                 if (BITS_PER_LONG == 32 && hash64)
454                         CWARN("Real page-wide hash collision at ["LPU64" "LPU64
455                               "] with hash "LPU64"\n",
456                               le64_to_cpu(dp->ldp_hash_start),
457                               le64_to_cpu(dp->ldp_hash_end), hash);
458                 /*
459                  * Fetch whole overflow chain...
460                  *
461                  * XXX not yet.
462                  */
463                 goto fail;
464         }
465 out_unlock:
466         mutex_unlock(&lli->lli_readdir_mutex);
467         ldlm_lock_decref(&lockh, mode);
468         return page;
469
470 fail:
471         ll_release_page(page, 1);
472         page = ERR_PTR(-EIO);
473         goto out_unlock;
474 }
475
476 int ll_dir_read(struct inode *inode, __u64 *_pos, void *cookie,
477                 filldir_t filldir)
478 {
479         struct ll_inode_info *info       = ll_i2info(inode);
480         struct ll_sb_info    *sbi        = ll_i2sbi(inode);
481         __u64                 pos        = *_pos;
482         int                   api32      = ll_need_32bit_api(sbi);
483         int                   hash64     = sbi->ll_flags & LL_SBI_64BIT_HASH;
484         struct page          *page;
485         struct ll_dir_chain   chain;
486         int                   done = 0;
487         int                   rc = 0;
488         ENTRY;
489
490         ll_dir_chain_init(&chain);
491
492         page = ll_get_dir_page(inode, pos, &chain);
493
494         while (rc == 0 && !done) {
495                 struct lu_dirpage *dp;
496                 struct lu_dirent  *ent;
497
498                 if (!IS_ERR(page)) {
499                         /*
500                          * If page is empty (end of directory is reached),
501                          * use this value.
502                          */
503                         __u64 hash = MDS_DIR_END_OFF;
504                         __u64 next;
505
506                         dp = page_address(page);
507                         for (ent = lu_dirent_start(dp); ent != NULL && !done;
508                              ent = lu_dirent_next(ent)) {
509                                 __u16          type;
510                                 int            namelen;
511                                 struct lu_fid  fid;
512                                 __u64          lhash;
513                                 __u64          ino;
514
515                                 /*
516                                  * XXX: implement correct swabbing here.
517                                  */
518
519                                 hash = le64_to_cpu(ent->lde_hash);
520                                 if (hash < pos)
521                                         /*
522                                          * Skip until we find target hash
523                                          * value.
524                                          */
525                                         continue;
526
527                                 namelen = le16_to_cpu(ent->lde_namelen);
528                                 if (namelen == 0)
529                                         /*
530                                          * Skip dummy record.
531                                          */
532                                         continue;
533
534                                 if (api32 && hash64)
535                                         lhash = hash >> 32;
536                                 else
537                                         lhash = hash;
538                                 fid_le_to_cpu(&fid, &ent->lde_fid);
539                                 ino = cl_fid_build_ino(&fid, api32);
540                                 type = ll_dirent_type_get(ent);
541                                 /* For 'll_nfs_get_name_filldir()', it will try
542                                  * to access the 'ent' through its 'lde_name',
543                                  * so the parameter 'name' for 'filldir()' must
544                                  * be part of the 'ent'. */
545                                 done = filldir(cookie, ent->lde_name, namelen,
546                                                lhash, ino, type);
547                         }
548                         next = le64_to_cpu(dp->ldp_hash_end);
549                         if (!done) {
550                                 pos = next;
551                                 if (pos == MDS_DIR_END_OFF) {
552                                         /*
553                                          * End of directory reached.
554                                          */
555                                         done = 1;
556                                         ll_release_page(page, 0);
557                                 } else if (1 /* chain is exhausted*/) {
558                                         /*
559                                          * Normal case: continue to the next
560                                          * page.
561                                          */
562                                         ll_release_page(page,
563                                             le32_to_cpu(dp->ldp_flags) &
564                                                         LDF_COLLIDE);
565                                         next = pos;
566                                         page = ll_get_dir_page(inode, pos,
567                                                                &chain);
568                                 } else {
569                                         /*
570                                          * go into overflow page.
571                                          */
572                                         LASSERT(le32_to_cpu(dp->ldp_flags) &
573                                                 LDF_COLLIDE);
574                                         ll_release_page(page, 1);
575                                 }
576                         } else {
577                                 pos = hash;
578                                 ll_release_page(page, 0);
579                         }
580                 } else {
581                         rc = PTR_ERR(page);
582                         CERROR("error reading dir "DFID" at %lu: rc %d\n",
583                                PFID(&info->lli_fid), (unsigned long)pos, rc);
584                 }
585         }
586
587         *_pos = pos;
588         ll_dir_chain_fini(&chain);
589         RETURN(rc);
590 }
591
592 static int ll_readdir(struct file *filp, void *cookie, filldir_t filldir)
593 {
594         struct inode            *inode  = filp->f_dentry->d_inode;
595         struct ll_file_data     *lfd    = LUSTRE_FPRIVATE(filp);
596         struct ll_sb_info       *sbi    = ll_i2sbi(inode);
597         __u64                   pos     = lfd->lfd_pos;
598         int                     hash64  = sbi->ll_flags & LL_SBI_64BIT_HASH;
599         int                     api32   = ll_need_32bit_api(sbi);
600         int                     rc;
601 #ifdef HAVE_TOUCH_ATIME_1ARG
602         struct path             path;
603 #endif
604         ENTRY;
605
606         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu "
607                " 32bit_api %d\n", inode->i_ino, inode->i_generation,
608                inode, (unsigned long)pos, i_size_read(inode), api32);
609
610         if (pos == MDS_DIR_END_OFF)
611                 /*
612                  * end-of-file.
613                  */
614                 GOTO(out, rc = 0);
615
616         rc = ll_dir_read(inode, &pos, cookie, filldir);
617         lfd->lfd_pos = pos;
618         if (pos == MDS_DIR_END_OFF) {
619                 if (api32)
620                         filp->f_pos = LL_DIR_END_OFF_32BIT;
621                 else
622                         filp->f_pos = LL_DIR_END_OFF;
623         } else {
624                 if (api32 && hash64)
625                         filp->f_pos = pos >> 32;
626                 else
627                         filp->f_pos = pos;
628         }
629         filp->f_version = inode->i_version;
630 #ifdef HAVE_TOUCH_ATIME_1ARG
631         path.mnt = filp->f_vfsmnt;
632         path.dentry = filp->f_dentry;
633         touch_atime(&path);
634 #else
635         touch_atime(filp->f_vfsmnt, filp->f_dentry);
636 #endif
637
638 out:
639         if (!rc)
640                 ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
641
642         RETURN(rc);
643 }
644
645 int ll_send_mgc_param(struct obd_export *mgc, char *string)
646 {
647         struct mgs_send_param *msp;
648         int rc = 0;
649
650         OBD_ALLOC_PTR(msp);
651         if (!msp)
652                 return -ENOMEM;
653
654         strncpy(msp->mgs_param, string, MGS_PARAM_MAXLEN);
655         rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
656                                 sizeof(struct mgs_send_param), msp, NULL);
657         if (rc)
658                 CERROR("Failed to set parameter: %d\n", rc);
659         OBD_FREE_PTR(msp);
660
661         return rc;
662 }
663
664 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
665                      int set_default)
666 {
667         struct ll_sb_info *sbi = ll_i2sbi(inode);
668         struct md_op_data *op_data;
669         struct ptlrpc_request *req = NULL;
670         int rc = 0;
671         struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
672         struct obd_device *mgc = lsi->lsi_mgc;
673         int lum_size;
674         ENTRY;
675
676         if (lump != NULL) {
677                 /*
678                  * This is coming from userspace, so should be in
679                  * local endian.  But the MDS would like it in little
680                  * endian, so we swab it before we send it.
681                  */
682                 switch (lump->lmm_magic) {
683                 case LOV_USER_MAGIC_V1: {
684                         if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
685                                 lustre_swab_lov_user_md_v1(lump);
686                         lum_size = sizeof(struct lov_user_md_v1);
687                         break;
688                         }
689                 case LOV_USER_MAGIC_V3: {
690                         if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
691                                 lustre_swab_lov_user_md_v3(
692                                         (struct lov_user_md_v3 *)lump);
693                         lum_size = sizeof(struct lov_user_md_v3);
694                         break;
695                         }
696                 default: {
697                         CDEBUG(D_IOCTL, "bad userland LOV MAGIC:"
698                                         " %#08x != %#08x nor %#08x\n",
699                                         lump->lmm_magic, LOV_USER_MAGIC_V1,
700                                         LOV_USER_MAGIC_V3);
701                         RETURN(-EINVAL);
702                         }
703                }
704         } else {
705                 lum_size = sizeof(struct lov_user_md_v1);
706         }
707
708         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
709                                      LUSTRE_OPC_ANY, NULL);
710         if (IS_ERR(op_data))
711                 RETURN(PTR_ERR(op_data));
712
713         /* swabbing is done in lov_setstripe() on server side */
714         rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
715                         NULL, 0, &req, NULL);
716         ll_finish_md_op_data(op_data);
717         ptlrpc_req_finished(req);
718         if (rc) {
719                 if (rc != -EPERM && rc != -EACCES)
720                         CERROR("mdc_setattr fails: rc = %d\n", rc);
721         }
722
723         /* In the following we use the fact that LOV_USER_MAGIC_V1 and
724          LOV_USER_MAGIC_V3 have the same initial fields so we do not
725          need the make the distiction between the 2 versions */
726         if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
727                 char *param = NULL;
728                 char *buf;
729
730                 OBD_ALLOC(param, MGS_PARAM_MAXLEN);
731                 if (param == NULL)
732                         GOTO(end, rc = -ENOMEM);
733
734                 buf = param;
735                 /* Get fsname and assume devname to be -MDT0000. */
736                 ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
737                 strcat(buf, "-MDT0000.lov");
738                 buf += strlen(buf);
739
740                 /* Set root stripesize */
741                 sprintf(buf, ".stripesize=%u",
742                         lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
743                 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
744                 if (rc)
745                         GOTO(end, rc);
746
747                 /* Set root stripecount */
748                 sprintf(buf, ".stripecount=%hd",
749                         lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
750                 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
751                 if (rc)
752                         GOTO(end, rc);
753
754                 /* Set root stripeoffset */
755                 sprintf(buf, ".stripeoffset=%hd",
756                         lump ? le16_to_cpu(lump->lmm_stripe_offset) :
757                         (typeof(lump->lmm_stripe_offset))(-1));
758                 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
759
760 end:
761                 if (param != NULL)
762                         OBD_FREE(param, MGS_PARAM_MAXLEN);
763         }
764         RETURN(rc);
765 }
766
767 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
768                      int *lmm_size, struct ptlrpc_request **request)
769 {
770         struct ll_sb_info *sbi = ll_i2sbi(inode);
771         struct mdt_body   *body;
772         struct lov_mds_md *lmm = NULL;
773         struct ptlrpc_request *req = NULL;
774         int rc, lmmsize;
775         struct md_op_data *op_data;
776
777         rc = ll_get_max_mdsize(sbi, &lmmsize);
778         if (rc)
779                 RETURN(rc);
780
781         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
782                                      0, lmmsize, LUSTRE_OPC_ANY,
783                                      NULL);
784         if (IS_ERR(op_data))
785                 RETURN(PTR_ERR(op_data));
786
787         op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
788         rc = md_getattr(sbi->ll_md_exp, op_data, &req);
789         ll_finish_md_op_data(op_data);
790         if (rc < 0) {
791                 CDEBUG(D_INFO, "md_getattr failed on inode "
792                        "%lu/%u: rc %d\n", inode->i_ino,
793                        inode->i_generation, rc);
794                 GOTO(out, rc);
795         }
796
797         body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
798         LASSERT(body != NULL);
799
800         lmmsize = body->eadatasize;
801
802         if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
803             lmmsize == 0) {
804                 GOTO(out, rc = -ENODATA);
805         }
806
807         lmm = req_capsule_server_sized_get(&req->rq_pill,
808                                            &RMF_MDT_MD, lmmsize);
809         LASSERT(lmm != NULL);
810
811         /*
812          * This is coming from the MDS, so is probably in
813          * little endian.  We convert it to host endian before
814          * passing it to userspace.
815          */
816         /* We don't swab objects for directories */
817         switch (le32_to_cpu(lmm->lmm_magic)) {
818         case LOV_MAGIC_V1:
819                 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
820                         lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
821                 break;
822         case LOV_MAGIC_V3:
823                 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
824                         lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
825                 break;
826         default:
827                 CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
828                 rc = -EPROTO;
829         }
830 out:
831         *lmmp = lmm;
832         *lmm_size = lmmsize;
833         *request = req;
834         return rc;
835 }
836
837 /*
838  *  Get MDT index for the inode.
839  */
840 int ll_get_mdt_idx(struct inode *inode)
841 {
842         struct ll_sb_info *sbi = ll_i2sbi(inode);
843         struct md_op_data *op_data;
844         int rc, mdtidx;
845         ENTRY;
846
847         op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
848                                      0, LUSTRE_OPC_ANY, NULL);
849         if (IS_ERR(op_data))
850                 RETURN(PTR_ERR(op_data));
851
852         op_data->op_flags |= MF_GET_MDT_IDX;
853         rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
854         mdtidx = op_data->op_mds;
855         ll_finish_md_op_data(op_data);
856         if (rc < 0) {
857                 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
858                 RETURN(rc);
859         }
860         return mdtidx;
861 }
862
863 static int copy_and_ioctl(int cmd, struct obd_export *exp, void *data, int len)
864 {
865         void *ptr;
866         int rc;
867
868         OBD_ALLOC(ptr, len);
869         if (ptr == NULL)
870                 return -ENOMEM;
871         if (cfs_copy_from_user(ptr, data, len)) {
872                 OBD_FREE(ptr, len);
873                 return -EFAULT;
874         }
875         rc = obd_iocontrol(cmd, exp, len, data, NULL);
876         OBD_FREE(ptr, len);
877         return rc;
878 }
879
880 static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
881 {
882         int cmd = qctl->qc_cmd;
883         int type = qctl->qc_type;
884         int id = qctl->qc_id;
885         int valid = qctl->qc_valid;
886         int rc = 0;
887         ENTRY;
888
889         switch (cmd) {
890         case LUSTRE_Q_INVALIDATE:
891         case LUSTRE_Q_FINVALIDATE:
892         case Q_QUOTAON:
893         case Q_QUOTAOFF:
894         case Q_SETQUOTA:
895         case Q_SETINFO:
896                 if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
897                     sbi->ll_flags & LL_SBI_RMT_CLIENT)
898                         RETURN(-EPERM);
899                 break;
900         case Q_GETQUOTA:
901                 if (((type == USRQUOTA && cfs_curproc_euid() != id) ||
902                      (type == GRPQUOTA && !in_egroup_p(id))) &&
903                     (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
904                      sbi->ll_flags & LL_SBI_RMT_CLIENT))
905                         RETURN(-EPERM);
906                 break;
907         case Q_GETINFO:
908                 break;
909         default:
910                 CERROR("unsupported quotactl op: %#x\n", cmd);
911                 RETURN(-ENOTTY);
912         }
913
914         if (valid != QC_GENERAL) {
915                 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
916                         RETURN(-EOPNOTSUPP);
917
918                 if (cmd == Q_GETINFO)
919                         qctl->qc_cmd = Q_GETOINFO;
920                 else if (cmd == Q_GETQUOTA)
921                         qctl->qc_cmd = Q_GETOQUOTA;
922                 else
923                         RETURN(-EINVAL);
924
925                 switch (valid) {
926                 case QC_MDTIDX:
927                         rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
928                                            sizeof(*qctl), qctl, NULL);
929                         break;
930                 case QC_OSTIDX:
931                         rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
932                                            sizeof(*qctl), qctl, NULL);
933                         break;
934                 case QC_UUID:
935                         rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
936                                            sizeof(*qctl), qctl, NULL);
937                         if (rc == -EAGAIN)
938                                 rc = obd_iocontrol(OBD_IOC_QUOTACTL,
939                                                    sbi->ll_dt_exp,
940                                                    sizeof(*qctl), qctl, NULL);
941                         break;
942                 default:
943                         rc = -EINVAL;
944                         break;
945                 }
946
947                 if (rc)
948                         RETURN(rc);
949
950                 qctl->qc_cmd = cmd;
951         } else {
952                 struct obd_quotactl *oqctl;
953
954                 OBD_ALLOC_PTR(oqctl);
955                 if (oqctl == NULL)
956                         RETURN(-ENOMEM);
957
958                 QCTL_COPY(oqctl, qctl);
959                 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
960                 if (rc) {
961                         if (rc != -EALREADY && cmd == Q_QUOTAON) {
962                                 oqctl->qc_cmd = Q_QUOTAOFF;
963                                 obd_quotactl(sbi->ll_md_exp, oqctl);
964                         }
965                         OBD_FREE_PTR(oqctl);
966                         RETURN(rc);
967                 }
968                 /* If QIF_SPACE is not set, client should collect the
969                  * space usage from OSSs by itself */
970                 if (cmd == Q_GETQUOTA &&
971                     !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
972                     !oqctl->qc_dqblk.dqb_curspace) {
973                         struct obd_quotactl *oqctl_tmp;
974
975                         OBD_ALLOC_PTR(oqctl_tmp);
976                         if (oqctl_tmp == NULL)
977                                 GOTO(out, rc = -ENOMEM);
978
979                         oqctl_tmp->qc_cmd = Q_GETOQUOTA;
980                         oqctl_tmp->qc_id = oqctl->qc_id;
981                         oqctl_tmp->qc_type = oqctl->qc_type;
982
983                         /* collect space usage from OSTs */
984                         oqctl_tmp->qc_dqblk.dqb_curspace = 0;
985                         rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
986                         if (!rc || rc == -EREMOTEIO) {
987                                 oqctl->qc_dqblk.dqb_curspace =
988                                         oqctl_tmp->qc_dqblk.dqb_curspace;
989                                 oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
990                         }
991
992                         /* collect space & inode usage from MDTs */
993                         oqctl_tmp->qc_dqblk.dqb_curspace = 0;
994                         oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
995                         rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
996                         if (!rc || rc == -EREMOTEIO) {
997                                 oqctl->qc_dqblk.dqb_curspace +=
998                                         oqctl_tmp->qc_dqblk.dqb_curspace;
999                                 oqctl->qc_dqblk.dqb_curinodes =
1000                                         oqctl_tmp->qc_dqblk.dqb_curinodes;
1001                                 oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1002                         } else {
1003                                 oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1004                         }
1005
1006                         OBD_FREE_PTR(oqctl_tmp);
1007                 }
1008 out:
1009                 QCTL_COPY(qctl, oqctl);
1010                 OBD_FREE_PTR(oqctl);
1011         }
1012
1013         RETURN(rc);
1014 }
1015
1016 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1017 {
1018         struct inode *inode = file->f_dentry->d_inode;
1019         struct ll_sb_info *sbi = ll_i2sbi(inode);
1020         struct obd_ioctl_data *data;
1021         int rc = 0;
1022         ENTRY;
1023
1024         CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
1025                inode->i_ino, inode->i_generation, inode, cmd);
1026
1027         /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1028         if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1029                 return -ENOTTY;
1030
1031         ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1032         switch(cmd) {
1033         case FSFILT_IOC_GETFLAGS:
1034         case FSFILT_IOC_SETFLAGS:
1035                 RETURN(ll_iocontrol(inode, file, cmd, arg));
1036         case FSFILT_IOC_GETVERSION_OLD:
1037         case FSFILT_IOC_GETVERSION:
1038                 RETURN(put_user(inode->i_generation, (int *)arg));
1039         /* We need to special case any other ioctls we want to handle,
1040          * to send them to the MDS/OST as appropriate and to properly
1041          * network encode the arg field.
1042         case FSFILT_IOC_SETVERSION_OLD:
1043         case FSFILT_IOC_SETVERSION:
1044         */
1045         case LL_IOC_GET_MDTIDX: {
1046                 int mdtidx;
1047
1048                 mdtidx = ll_get_mdt_idx(inode);
1049                 if (mdtidx < 0)
1050                         RETURN(mdtidx);
1051
1052                 if (put_user((int)mdtidx, (int*)arg))
1053                         RETURN(-EFAULT);
1054
1055                 return 0;
1056         }
1057         case IOC_MDC_LOOKUP: {
1058                 struct ptlrpc_request *request = NULL;
1059                 int namelen, len = 0;
1060                 char *buf = NULL;
1061                 char *filename;
1062                 struct md_op_data *op_data;
1063
1064                 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1065                 if (rc)
1066                         RETURN(rc);
1067                 data = (void *)buf;
1068
1069                 filename = data->ioc_inlbuf1;
1070                 namelen = strlen(filename);
1071
1072                 if (namelen < 1) {
1073                         CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1074                         GOTO(out_free, rc = -EINVAL);
1075                 }
1076
1077                 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
1078                                              0, LUSTRE_OPC_ANY, NULL);
1079                 if (IS_ERR(op_data))
1080                         GOTO(out_free, rc = PTR_ERR(op_data));
1081
1082                 op_data->op_valid = OBD_MD_FLID;
1083                 rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
1084                 ll_finish_md_op_data(op_data);
1085                 if (rc < 0) {
1086                         CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
1087                         GOTO(out_free, rc);
1088                 }
1089                 ptlrpc_req_finished(request);
1090                 EXIT;
1091 out_free:
1092                 obd_ioctl_freedata(buf, len);
1093                 return rc;
1094         }
1095         case LL_IOC_LOV_SETSTRIPE: {
1096                 struct lov_user_md_v3 lumv3;
1097                 struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
1098                 struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
1099                 struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
1100
1101                 int set_default = 0;
1102
1103                 LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
1104                 LASSERT(sizeof(lumv3.lmm_objects[0]) ==
1105                         sizeof(lumv3p->lmm_objects[0]));
1106                 /* first try with v1 which is smaller than v3 */
1107                 if (cfs_copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
1108                         RETURN(-EFAULT);
1109
1110                 if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
1111                         if (cfs_copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
1112                                 RETURN(-EFAULT);
1113                 }
1114
1115                 if (inode->i_sb->s_root == file->f_dentry)
1116                         set_default = 1;
1117
1118                 /* in v1 and v3 cases lumv1 points to data */
1119                 rc = ll_dir_setstripe(inode, lumv1, set_default);
1120
1121                 RETURN(rc);
1122         }
1123         case LL_IOC_OBD_STATFS:
1124                 RETURN(ll_obd_statfs(inode, (void *)arg));
1125         case LL_IOC_LOV_GETSTRIPE:
1126         case LL_IOC_MDC_GETINFO:
1127         case IOC_MDC_GETFILEINFO:
1128         case IOC_MDC_GETFILESTRIPE: {
1129                 struct ptlrpc_request *request = NULL;
1130                 struct lov_user_md *lump;
1131                 struct lov_mds_md *lmm = NULL;
1132                 struct mdt_body *body;
1133                 char *filename = NULL;
1134                 int lmmsize;
1135
1136                 if (cmd == IOC_MDC_GETFILEINFO ||
1137                     cmd == IOC_MDC_GETFILESTRIPE) {
1138                         filename = getname((const char *)arg);
1139                         if (IS_ERR(filename))
1140                                 RETURN(PTR_ERR(filename));
1141
1142                         rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
1143                                                       &lmmsize, &request);
1144                 } else {
1145                         rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
1146                 }
1147
1148                 if (request) {
1149                         body = req_capsule_server_get(&request->rq_pill,
1150                                                       &RMF_MDT_BODY);
1151                         LASSERT(body != NULL);
1152                 } else {
1153                         GOTO(out_req, rc);
1154                 }
1155
1156                 if (rc < 0) {
1157                         if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
1158                                                cmd == LL_IOC_MDC_GETINFO))
1159                                 GOTO(skip_lmm, rc = 0);
1160                         else
1161                                 GOTO(out_req, rc);
1162                 }
1163
1164                 if (cmd == IOC_MDC_GETFILESTRIPE ||
1165                     cmd == LL_IOC_LOV_GETSTRIPE) {
1166                         lump = (struct lov_user_md *)arg;
1167                 } else {
1168                         struct lov_user_mds_data *lmdp;
1169                         lmdp = (struct lov_user_mds_data *)arg;
1170                         lump = &lmdp->lmd_lmm;
1171                 }
1172                 if (cfs_copy_to_user(lump, lmm, lmmsize)) {
1173                         if (cfs_copy_to_user(lump, lmm, sizeof(*lump)))
1174                                 GOTO(out_req, rc = -EFAULT);
1175                         rc = -EOVERFLOW;
1176                 }
1177         skip_lmm:
1178                 if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
1179                         struct lov_user_mds_data *lmdp;
1180                         lstat_t st = { 0 };
1181
1182                         st.st_dev     = inode->i_sb->s_dev;
1183                         st.st_mode    = body->mode;
1184                         st.st_nlink   = body->nlink;
1185                         st.st_uid     = body->uid;
1186                         st.st_gid     = body->gid;
1187                         st.st_rdev    = body->rdev;
1188                         st.st_size    = body->size;
1189                         st.st_blksize = CFS_PAGE_SIZE;
1190                         st.st_blocks  = body->blocks;
1191                         st.st_atime   = body->atime;
1192                         st.st_mtime   = body->mtime;
1193                         st.st_ctime   = body->ctime;
1194                         st.st_ino     = inode->i_ino;
1195
1196                         lmdp = (struct lov_user_mds_data *)arg;
1197                         if (cfs_copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
1198                                 GOTO(out_req, rc = -EFAULT);
1199                 }
1200
1201                 EXIT;
1202         out_req:
1203                 ptlrpc_req_finished(request);
1204                 if (filename)
1205                         putname(filename);
1206                 return rc;
1207         }
1208         case IOC_LOV_GETINFO: {
1209                 struct lov_user_mds_data *lumd;
1210                 struct lov_stripe_md *lsm;
1211                 struct lov_user_md *lum;
1212                 struct lov_mds_md *lmm;
1213                 int lmmsize;
1214                 lstat_t st;
1215
1216                 lumd = (struct lov_user_mds_data *)arg;
1217                 lum = &lumd->lmd_lmm;
1218
1219                 rc = ll_get_max_mdsize(sbi, &lmmsize);
1220                 if (rc)
1221                         RETURN(rc);
1222
1223                 OBD_ALLOC_LARGE(lmm, lmmsize);
1224                 if (cfs_copy_from_user(lmm, lum, lmmsize))
1225                         GOTO(free_lmm, rc = -EFAULT);
1226
1227                 switch (lmm->lmm_magic) {
1228                 case LOV_USER_MAGIC_V1:
1229                         if (LOV_USER_MAGIC_V1 == cpu_to_le32(LOV_USER_MAGIC_V1))
1230                                 break;
1231                         /* swab objects first so that stripes num will be sane */
1232                         lustre_swab_lov_user_md_objects(
1233                                 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
1234                                 ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
1235                         lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
1236                         break;
1237                 case LOV_USER_MAGIC_V3:
1238                         if (LOV_USER_MAGIC_V3 == cpu_to_le32(LOV_USER_MAGIC_V3))
1239                                 break;
1240                         /* swab objects first so that stripes num will be sane */
1241                         lustre_swab_lov_user_md_objects(
1242                                 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
1243                                 ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
1244                         lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
1245                         break;
1246                 default:
1247                         GOTO(free_lmm, rc = -EINVAL);
1248                 }
1249
1250                 rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
1251                 if (rc < 0)
1252                         GOTO(free_lmm, rc = -ENOMEM);
1253
1254                 /* Perform glimpse_size operation. */
1255                 memset(&st, 0, sizeof(st));
1256
1257                 rc = ll_glimpse_ioctl(sbi, lsm, &st);
1258                 if (rc)
1259                         GOTO(free_lsm, rc);
1260
1261                 if (cfs_copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
1262                         GOTO(free_lsm, rc = -EFAULT);
1263
1264                 EXIT;
1265         free_lsm:
1266                 obd_free_memmd(sbi->ll_dt_exp, &lsm);
1267         free_lmm:
1268                 OBD_FREE_LARGE(lmm, lmmsize);
1269                 return rc;
1270         }
1271         case OBD_IOC_LLOG_CATINFO: {
1272                 RETURN(-EOPNOTSUPP);
1273         }
1274         case OBD_IOC_QUOTACHECK: {
1275                 struct obd_quotactl *oqctl;
1276                 int error = 0;
1277
1278                 if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
1279                     sbi->ll_flags & LL_SBI_RMT_CLIENT)
1280                         RETURN(-EPERM);
1281
1282                 OBD_ALLOC_PTR(oqctl);
1283                 if (!oqctl)
1284                         RETURN(-ENOMEM);
1285                 oqctl->qc_type = arg;
1286                 rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
1287                 if (rc < 0) {
1288                         CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
1289                         error = rc;
1290                 }
1291
1292                 rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
1293                 if (rc < 0)
1294                         CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
1295
1296                 OBD_FREE_PTR(oqctl);
1297                 return error ?: rc;
1298         }
1299         case OBD_IOC_POLL_QUOTACHECK: {
1300                 struct if_quotacheck *check;
1301
1302                 if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
1303                     sbi->ll_flags & LL_SBI_RMT_CLIENT)
1304                         RETURN(-EPERM);
1305
1306                 OBD_ALLOC_PTR(check);
1307                 if (!check)
1308                         RETURN(-ENOMEM);
1309
1310                 rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
1311                                    NULL);
1312                 if (rc) {
1313                         CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
1314                         if (cfs_copy_to_user((void *)arg, check,
1315                                              sizeof(*check)))
1316                                 CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n");
1317                         GOTO(out_poll, rc);
1318                 }
1319
1320                 rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
1321                                    NULL);
1322                 if (rc) {
1323                         CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
1324                         if (cfs_copy_to_user((void *)arg, check,
1325                                              sizeof(*check)))
1326                                 CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n");
1327                         GOTO(out_poll, rc);
1328                 }
1329         out_poll:
1330                 OBD_FREE_PTR(check);
1331                 RETURN(rc);
1332         }
1333 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
1334         case LL_IOC_QUOTACTL_18: {
1335                 /* copy the old 1.x quota struct for internal use, then copy
1336                  * back into old format struct.  For 1.8 compatibility. */
1337                 struct if_quotactl_18 *qctl_18;
1338                 struct if_quotactl *qctl_20;
1339
1340                 OBD_ALLOC_PTR(qctl_18);
1341                 if (!qctl_18)
1342                         RETURN(-ENOMEM);
1343
1344                 OBD_ALLOC_PTR(qctl_20);
1345                 if (!qctl_20)
1346                         GOTO(out_quotactl_18, rc = -ENOMEM);
1347
1348                 if (cfs_copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18)))
1349                         GOTO(out_quotactl_20, rc = -ENOMEM);
1350
1351                 QCTL_COPY(qctl_20, qctl_18);
1352                 qctl_20->qc_idx = 0;
1353
1354                 /* XXX: dqb_valid was borrowed as a flag to mark that
1355                  *      only mds quota is wanted */
1356                 if (qctl_18->qc_cmd == Q_GETQUOTA &&
1357                     qctl_18->qc_dqblk.dqb_valid) {
1358                         qctl_20->qc_valid = QC_MDTIDX;
1359                         qctl_20->qc_dqblk.dqb_valid = 0;
1360                 } else if (qctl_18->obd_uuid.uuid[0] != '\0') {
1361                         qctl_20->qc_valid = QC_UUID;
1362                         qctl_20->obd_uuid = qctl_18->obd_uuid;
1363                 } else {
1364                         qctl_20->qc_valid = QC_GENERAL;
1365                 }
1366
1367                 rc = quotactl_ioctl(sbi, qctl_20);
1368
1369                 if (rc == 0) {
1370                         QCTL_COPY(qctl_18, qctl_20);
1371                         qctl_18->obd_uuid = qctl_20->obd_uuid;
1372
1373                         if (cfs_copy_to_user((void *)arg, qctl_18,
1374                                              sizeof(*qctl_18)))
1375                                 rc = -EFAULT;
1376                 }
1377
1378         out_quotactl_20:
1379                 OBD_FREE_PTR(qctl_20);
1380         out_quotactl_18:
1381                 OBD_FREE_PTR(qctl_18);
1382                 RETURN(rc);
1383         }
1384 #else
1385 #warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
1386 #endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
1387         case LL_IOC_QUOTACTL: {
1388                 struct if_quotactl *qctl;
1389
1390                 OBD_ALLOC_PTR(qctl);
1391                 if (!qctl)
1392                         RETURN(-ENOMEM);
1393
1394                 if (cfs_copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
1395                         GOTO(out_quotactl, rc = -EFAULT);
1396
1397                 rc = quotactl_ioctl(sbi, qctl);
1398
1399                 if (rc == 0 && cfs_copy_to_user((void *)arg,qctl,sizeof(*qctl)))
1400                         rc = -EFAULT;
1401
1402         out_quotactl:
1403                 OBD_FREE_PTR(qctl);
1404                 RETURN(rc);
1405         }
1406         case OBD_IOC_GETDTNAME:
1407         case OBD_IOC_GETMDNAME:
1408                 RETURN(ll_get_obd_name(inode, cmd, arg));
1409         case LL_IOC_FLUSHCTX:
1410                 RETURN(ll_flush_ctx(inode));
1411 #ifdef CONFIG_FS_POSIX_ACL
1412         case LL_IOC_RMTACL: {
1413             if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
1414                 inode == inode->i_sb->s_root->d_inode) {
1415                 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1416
1417                 LASSERT(fd != NULL);
1418                 rc = rct_add(&sbi->ll_rct, cfs_curproc_pid(), arg);
1419                 if (!rc)
1420                         fd->fd_flags |= LL_FILE_RMTACL;
1421                 RETURN(rc);
1422             } else
1423                 RETURN(0);
1424         }
1425 #endif
1426         case LL_IOC_GETOBDCOUNT: {
1427                 int count, vallen;
1428                 struct obd_export *exp;
1429
1430                 if (cfs_copy_from_user(&count, (int *)arg, sizeof(int)))
1431                         RETURN(-EFAULT);
1432
1433                 /* get ost count when count is zero, get mdt count otherwise */
1434                 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
1435                 vallen = sizeof(count);
1436                 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
1437                                   KEY_TGT_COUNT, &vallen, &count, NULL);
1438                 if (rc) {
1439                         CERROR("get target count failed: %d\n", rc);
1440                         RETURN(rc);
1441                 }
1442
1443                 if (cfs_copy_to_user((int *)arg, &count, sizeof(int)))
1444                         RETURN(-EFAULT);
1445
1446                 RETURN(0);
1447         }
1448         case LL_IOC_PATH2FID:
1449                 if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
1450                                      sizeof(struct lu_fid)))
1451                         RETURN(-EFAULT);
1452                 RETURN(0);
1453         case LL_IOC_GET_CONNECT_FLAGS: {
1454                 RETURN(obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void*)arg));
1455         }
1456         case OBD_IOC_CHANGELOG_SEND:
1457         case OBD_IOC_CHANGELOG_CLEAR:
1458                 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1459                                     sizeof(struct ioc_changelog));
1460                 RETURN(rc);
1461         case OBD_IOC_FID2PATH:
1462                 RETURN(ll_fid2path(inode, (void *)arg));
1463         case LL_IOC_HSM_CT_START:
1464                 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1465                                     sizeof(struct lustre_kernelcomm));
1466                 RETURN(rc);
1467
1468         default:
1469                 RETURN(obd_iocontrol(cmd, sbi->ll_dt_exp,0,NULL,(void *)arg));
1470         }
1471 }
1472
1473 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
1474 {
1475         struct inode *inode = file->f_mapping->host;
1476         struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1477         struct ll_sb_info *sbi = ll_i2sbi(inode);
1478         int api32 = ll_need_32bit_api(sbi);
1479         loff_t ret = -EINVAL;
1480         ENTRY;
1481
1482         mutex_lock(&inode->i_mutex);
1483         switch (origin) {
1484                 case SEEK_SET:
1485                         break;
1486                 case SEEK_CUR:
1487                         offset += file->f_pos;
1488                         break;
1489                 case SEEK_END:
1490                         if (offset > 0)
1491                                 GOTO(out, ret);
1492                         if (api32)
1493                                 offset += LL_DIR_END_OFF_32BIT;
1494                         else
1495                                 offset += LL_DIR_END_OFF;
1496                         break;
1497                 default:
1498                         GOTO(out, ret);
1499         }
1500
1501         if (offset >= 0 &&
1502             ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
1503              (!api32 && offset <= LL_DIR_END_OFF))) {
1504                 if (offset != file->f_pos) {
1505                         if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
1506                             (!api32 && offset == LL_DIR_END_OFF))
1507                                 fd->lfd_pos = MDS_DIR_END_OFF;
1508                         else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
1509                                 fd->lfd_pos = offset << 32;
1510                         else
1511                                 fd->lfd_pos = offset;
1512                         file->f_pos = offset;
1513                         file->f_version = 0;
1514                 }
1515                 ret = offset;
1516         }
1517         GOTO(out, ret);
1518
1519 out:
1520         mutex_unlock(&inode->i_mutex);
1521         return ret;
1522 }
1523
1524 int ll_dir_open(struct inode *inode, struct file *file)
1525 {
1526         ENTRY;
1527         RETURN(ll_file_open(inode, file));
1528 }
1529
1530 int ll_dir_release(struct inode *inode, struct file *file)
1531 {
1532         ENTRY;
1533         RETURN(ll_file_release(inode, file));
1534 }
1535
1536 struct file_operations ll_dir_operations = {
1537         .llseek   = ll_dir_seek,
1538         .open     = ll_dir_open,
1539         .release  = ll_dir_release,
1540         .read     = generic_read_dir,
1541         .readdir  = ll_readdir,
1542         .unlocked_ioctl   = ll_dir_ioctl,
1543         .fsync    = ll_fsync,
1544 };