1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
40 * Directory code for lustre client.
44 #include <linux/pagemap.h>
46 #include <linux/version.h>
47 #include <linux/smp_lock.h>
48 #include <asm/uaccess.h>
49 #include <linux/buffer_head.h> // for wait_on_buffer
50 #include <linux/pagevec.h>
52 #define DEBUG_SUBSYSTEM S_LLITE
54 #include <obd_support.h>
55 #include <obd_class.h>
56 #include <lustre_lib.h>
57 #include <lustre/lustre_idl.h>
58 #include <lustre_lite.h>
59 #include <lustre_dlm.h>
60 #include <lustre_fid.h>
61 #include "llite_internal.h"
63 #ifndef HAVE_PAGE_CHECKED
64 #ifdef HAVE_PG_FS_MISC
65 #define PageChecked(page) test_bit(PG_fs_misc, &(page)->flags)
66 #define SetPageChecked(page) set_bit(PG_fs_misc, &(page)->flags)
68 #error PageChecked or PageFsMisc not defined in kernel
73 * (new) readdir implementation overview.
75 * Original lustre readdir implementation cached exact copy of raw directory
76 * pages on the client. These pages were indexed in client page cache by
77 * logical offset in the directory file. This design, while very simple and
78 * intuitive had some inherent problems:
80 * . it implies that byte offset to the directory entry serves as a
81 * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
82 * ext3/htree directory entries may move due to splits, and more
85 * . it is incompatible with the design of split directories for cmd3,
86 * that assumes that names are distributed across nodes based on their
87 * hash, and so readdir should be done in hash order.
89 * New readdir implementation does readdir in hash order, and uses hash of a
90 * file name as a telldir/seekdir cookie. This led to number of complications:
92 * . hash is not unique, so it cannot be used to index cached directory
93 * pages on the client (note, that it requires a whole pageful of hash
94 * collided entries to cause two pages to have identical hashes);
96 * . hash is not unique, so it cannot, strictly speaking, be used as an
97 * entry cookie. ext3/htree has the same problem and lustre implementation
98 * mimics their solution: seekdir(hash) positions directory at the first
99 * entry with the given hash.
105 * Client caches directory pages using hash of the first entry as an index. As
106 * noted above hash is not unique, so this solution doesn't work as is:
107 * special processing is needed for "page hash chains" (i.e., sequences of
108 * pages filled with entries all having the same hash value).
110 * First, such chains have to be detected. To this end, server returns to the
111 * client the hash of the first entry on the page next to one returned. When
112 * client detects that this hash is the same as hash of the first entry on the
113 * returned page, page hash collision has to be handled. Pages in the
114 * hash chain, except first one, are termed "overflow pages".
116 * Solution to index uniqueness problem is to not cache overflow
117 * pages. Instead, when page hash collision is detected, all overflow pages
118 * from emerging chain are immediately requested from the server and placed in
119 * a special data structure (struct ll_dir_chain). This data structure is used
120 * by ll_readdir() to process entries from overflow pages. When readdir
121 * invocation finishes, overflow pages are discarded. If page hash collision
122 * chain weren't completely processed, next call to readdir will again detect
123 * page hash collision, again read overflow pages in, process next portion of
124 * entries and again discard the pages. This is not as wasteful as it looks,
125 * because, given reasonable hash, page hash collisions are extremely rare.
127 * 1. directory positioning
129 * When seekdir(hash) is called, original
140 * identification of and access to overflow pages
144 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
145 * a header lu_dirpage which describes the start/end hash, and whether this
146 * page is empty (contains no dir entry) or hash collide with next page.
147 * After client receives reply, several pages will be integrated into dir page
148 * in CFS_PAGE_SIZE (if CFS_PAGE_SIZE greater than LU_PAGE_SIZE), and the
149 * lu_dirpage for this integrated page will be adjusted.
153 /* returns the page unlocked, but with a reference */
154 static int ll_dir_readpage(struct file *file, struct page *page0)
156 struct inode *inode = page0->mapping->host;
157 int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
158 struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
159 struct ptlrpc_request *request;
160 struct mdt_body *body;
161 struct md_op_data *op_data;
163 struct page **page_pool;
165 #ifndef HAVE_ADD_TO_PAGE_CACHE_LRU
166 struct pagevec lru_pvec;
168 struct lu_dirpage *dp;
169 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> CFS_PAGE_SHIFT;
170 int nrdpgs = 0; /* number of pages read actually */
177 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
179 hash = fd->fd_dir.lfd_next;
181 hash = ll_i2info(inode)->lli_sa_pos;
183 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash "LPU64"\n",
184 inode->i_ino, inode->i_generation, inode, hash);
186 LASSERT(max_pages > 0 && max_pages <= PTLRPC_MAX_BRW_PAGES);
188 OBD_ALLOC(page_pool, sizeof(page) * max_pages);
189 if (page_pool != NULL) {
190 page_pool[0] = page0;
195 for (npages = 1; npages < max_pages; npages++) {
196 page = page_cache_alloc_cold(inode->i_mapping);
199 page_pool[npages] = page;
202 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
203 LUSTRE_OPC_ANY, NULL);
204 op_data->op_npages = npages;
205 op_data->op_offset = hash;
206 rc = md_readpage(exp, op_data, page_pool, &request);
207 ll_finish_md_op_data(op_data);
209 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
210 /* Checked by mdc_readpage() */
211 LASSERT(body != NULL);
213 if (body->valid & OBD_MD_FLSIZE)
214 cl_isize_write(inode, body->size);
216 nrdpgs = (request->rq_bulk->bd_nob_transferred+CFS_PAGE_SIZE-1)
218 SetPageUptodate(page0);
221 ptlrpc_req_finished(request);
223 CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
225 ll_pagevec_init(&lru_pvec, 0);
226 for (i = 1; i < npages; i++) {
227 unsigned long offset;
232 if (rc < 0 || i >= nrdpgs) {
233 page_cache_release(page);
237 SetPageUptodate(page);
240 hash = le64_to_cpu(dp->ldp_hash_start);
243 offset = hash_x_index(hash, hash64);
245 prefetchw(&page->flags);
246 ret = ll_add_to_page_cache_lru(page, inode->i_mapping, offset,
250 page_cache_get(page);
251 if (ll_pagevec_add(&lru_pvec, page) == 0)
252 ll_pagevec_lru_add_file(&lru_pvec);
254 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
255 " %d\n", offset, ret);
257 page_cache_release(page);
259 ll_pagevec_lru_add_file(&lru_pvec);
261 if (page_pool != &page0)
262 OBD_FREE(page_pool, sizeof(struct page *) * max_pages);
267 #ifndef MS_HAS_NEW_AOPS
268 struct address_space_operations ll_dir_aops = {
269 .readpage = ll_dir_readpage,
272 struct address_space_operations_ext ll_dir_aops = {
273 .orig_aops.readpage = ll_dir_readpage,
277 static void ll_check_page(struct inode *dir, struct page *page)
279 /* XXX: check page format later */
280 SetPageChecked(page);
283 void ll_release_page(struct page *page, int remove)
288 if (likely(page->mapping != NULL))
289 truncate_complete_page(page->mapping, page);
292 page_cache_release(page);
296 * Find, kmap and return page that contains given hash.
298 static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
299 __u64 *start, __u64 *end)
301 int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
302 struct address_space *mapping = dir->i_mapping;
304 * Complement of hash is used as an index so that
305 * radix_tree_gang_lookup() can be used to find a page with starting
306 * hash _smaller_ than one we are looking for.
308 unsigned long offset = hash_x_index(*hash, hash64);
312 TREE_READ_LOCK_IRQ(mapping);
313 found = radix_tree_gang_lookup(&mapping->page_tree,
314 (void **)&page, offset, 1);
316 struct lu_dirpage *dp;
318 page_cache_get(page);
319 TREE_READ_UNLOCK_IRQ(mapping);
321 * In contrast to find_lock_page() we are sure that directory
322 * page cannot be truncated (while DLM lock is held) and,
323 * hence, can avoid restart.
325 * In fact, page cannot be locked here at all, because
326 * ll_dir_readpage() does synchronous io.
329 if (PageUptodate(page)) {
331 if (BITS_PER_LONG == 32 && hash64) {
332 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
333 *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
336 *start = le64_to_cpu(dp->ldp_hash_start);
337 *end = le64_to_cpu(dp->ldp_hash_end);
339 LASSERTF(*start <= *hash, "start = "LPX64",end = "
340 LPX64",hash = "LPX64"\n", *start, *end, *hash);
341 CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash "LPU64"\n",
342 offset, *start, *end, *hash);
344 ll_release_page(page, 0);
346 } else if (*end != *start && *hash == *end) {
348 * upon hash collision, remove this page,
349 * otherwise put page reference, and
350 * ll_get_dir_page() will issue RPC to fetch
353 ll_release_page(page,
354 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
358 page_cache_release(page);
359 page = ERR_PTR(-EIO);
363 TREE_READ_UNLOCK_IRQ(mapping);
369 struct page *ll_get_dir_page(struct file *filp, struct inode *dir, __u64 hash,
370 struct ll_dir_chain *chain)
372 ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
373 struct address_space *mapping = dir->i_mapping;
374 struct lustre_handle lockh;
375 struct lu_dirpage *dp;
382 struct ll_inode_info *lli = ll_i2info(dir);
383 int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
386 rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
387 ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
389 struct ldlm_enqueue_info einfo = { LDLM_IBITS, mode,
390 ll_md_blocking_ast, ldlm_completion_ast,
392 struct lookup_intent it = { .it_op = IT_READDIR };
393 struct ptlrpc_request *request;
394 struct md_op_data *op_data;
396 op_data = ll_prep_md_op_data(NULL, dir, NULL, NULL, 0, 0,
397 LUSTRE_OPC_ANY, NULL);
399 return (void *)op_data;
401 rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
402 op_data, &lockh, NULL, 0, NULL, 0);
404 ll_finish_md_op_data(op_data);
406 request = (struct ptlrpc_request *)it.d.lustre.it_data;
408 ptlrpc_req_finished(request);
410 CERROR("lock enqueue: "DFID" at "LPU64": rc %d\n",
411 PFID(ll_inode2fid(dir)), hash, rc);
415 /* for cross-ref object, l_ast_data of the lock may not be set,
416 * we reset it here */
417 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
420 ldlm_lock_dump_handle(D_OTHER, &lockh);
422 cfs_down(&lli->lli_readdir_sem);
423 page = ll_dir_page_locate(dir, &lhash, &start, &end);
425 CERROR("dir page locate: "DFID" at "LPU64": rc %ld\n",
426 PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
427 GOTO(out_unlock, page);
428 } else if (page != NULL) {
430 * XXX nikita: not entirely correct handling of a corner case:
431 * suppose hash chain of entries with hash value HASH crosses
432 * border between pages P0 and P1. First both P0 and P1 are
433 * cached, seekdir() is called for some entry from the P0 part
434 * of the chain. Later P0 goes out of cache. telldir(HASH)
435 * happens and finds P1, as it starts with matching hash
436 * value. Remaining entries from P0 part of the chain are
437 * skipped. (Is that really a bug?)
439 * Possible solutions: 0. don't cache P1 is such case, handle
440 * it as an "overflow" page. 1. invalidate all pages at
441 * once. 2. use HASH|1 as an index for P1.
443 GOTO(hash_collision, page);
446 page = read_cache_page(mapping, hash_x_index(hash, hash64),
447 (filler_t*)mapping->a_ops->readpage, filp);
449 CERROR("read cache page: "DFID" at "LPU64": rc %ld\n",
450 PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
451 GOTO(out_unlock, page);
456 if (!PageUptodate(page)) {
457 CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
458 PFID(ll_inode2fid(dir)), hash, -5);
461 if (!PageChecked(page))
462 ll_check_page(dir, page);
463 if (PageError(page)) {
464 CERROR("page error: "DFID" at "LPU64": rc %d\n",
465 PFID(ll_inode2fid(dir)), hash, -5);
469 dp = page_address(page);
470 if (BITS_PER_LONG == 32 && hash64) {
471 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
472 end = le64_to_cpu(dp->ldp_hash_end) >> 32;
475 start = le64_to_cpu(dp->ldp_hash_start);
476 end = le64_to_cpu(dp->ldp_hash_end);
480 LASSERT(start == lhash);
481 CWARN("Page-wide hash collision: "LPU64"\n", end);
482 if (BITS_PER_LONG == 32 && hash64)
483 CWARN("Real page-wide hash collision at ["LPU64" "LPU64
484 "] with hash "LPU64"\n",
485 le64_to_cpu(dp->ldp_hash_start),
486 le64_to_cpu(dp->ldp_hash_end), hash);
488 * Fetch whole overflow chain...
495 cfs_up(&lli->lli_readdir_sem);
496 ldlm_lock_decref(&lockh, mode);
500 ll_release_page(page, 1);
501 page = ERR_PTR(-EIO);
505 int ll_readdir(struct file *filp, void *cookie, filldir_t filldir)
507 struct inode *inode = filp->f_dentry->d_inode;
508 struct ll_inode_info *info = ll_i2info(inode);
509 struct ll_sb_info *sbi = ll_i2sbi(inode);
510 struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
511 __u64 pos = fd->fd_dir.lfd_pos;
512 int api32 = ll_need_32bit_api(sbi);
513 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
515 struct ll_dir_chain chain;
520 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
521 inode->i_ino, inode->i_generation, inode,
522 (unsigned long)pos, i_size_read(inode), api32);
524 if (pos == MDS_DIR_END_OFF)
532 ll_dir_chain_init(&chain);
534 fd->fd_dir.lfd_next = pos;
535 page = ll_get_dir_page(filp, inode, pos, &chain);
537 while (rc == 0 && !done) {
538 struct lu_dirpage *dp;
539 struct lu_dirent *ent;
543 * If page is empty (end of directory is reached),
546 __u64 hash = MDS_DIR_END_OFF;
549 dp = page_address(page);
550 for (ent = lu_dirent_start(dp); ent != NULL && !done;
551 ent = lu_dirent_next(ent)) {
559 * XXX: implement correct swabbing here.
562 hash = le64_to_cpu(ent->lde_hash);
565 * Skip until we find target hash
570 namelen = le16_to_cpu(ent->lde_namelen);
581 fid_le_to_cpu(&fid, &ent->lde_fid);
582 ino = cl_fid_build_ino(&fid, api32);
583 type = ll_dirent_type_get(ent);
584 /* For 'll_nfs_get_name_filldir()', it will try
585 * to access the 'ent' through its 'lde_name',
586 * so the parameter 'name' for 'filldir()' must
587 * be part of the 'ent'. */
588 done = filldir(cookie, ent->lde_name, namelen,
591 next = le64_to_cpu(dp->ldp_hash_end);
594 if (pos == MDS_DIR_END_OFF) {
596 * End of directory reached.
599 ll_release_page(page, 0);
600 } else if (1 /* chain is exhausted*/) {
602 * Normal case: continue to the next
605 ll_release_page(page,
606 le32_to_cpu(dp->ldp_flags) &
608 fd->fd_dir.lfd_next = pos;
609 page = ll_get_dir_page(filp, inode, pos,
613 * go into overflow page.
615 LASSERT(le32_to_cpu(dp->ldp_flags) &
617 ll_release_page(page, 1);
621 ll_release_page(page, 0);
625 CERROR("error reading dir "DFID" at %lu: rc %d\n",
626 PFID(&info->lli_fid), (unsigned long)pos, rc);
630 fd->fd_dir.lfd_pos = pos;
631 if (pos == MDS_DIR_END_OFF) {
633 filp->f_pos = LL_DIR_END_OFF_32BIT;
635 filp->f_pos = LL_DIR_END_OFF;
638 filp->f_pos = pos >> 32;
642 filp->f_version = inode->i_version;
643 touch_atime(filp->f_vfsmnt, filp->f_dentry);
645 ll_dir_chain_fini(&chain);
650 int ll_send_mgc_param(struct obd_export *mgc, char *string)
652 struct mgs_send_param *msp;
659 strncpy(msp->mgs_param, string, MGS_PARAM_MAXLEN);
660 rc = obd_set_info_async(mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
661 sizeof(struct mgs_send_param), msp, NULL);
663 CERROR("Failed to set parameter: %d\n", rc);
669 char *ll_get_fsname(struct inode *inode)
671 struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
675 OBD_ALLOC(fsname, MGS_PARAM_MAXLEN);
676 len = strlen(lsi->lsi_lmd->lmd_profile);
677 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
678 if (ptr && (strcmp(ptr, "-client") == 0))
680 strncpy(fsname, lsi->lsi_lmd->lmd_profile, len);
686 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
689 struct ll_sb_info *sbi = ll_i2sbi(inode);
690 struct md_op_data *op_data;
691 struct ptlrpc_request *req = NULL;
693 struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
694 struct obd_device *mgc = lsi->lsi_mgc;
695 char *fsname = NULL, *param = NULL;
700 * This is coming from userspace, so should be in
701 * local endian. But the MDS would like it in little
702 * endian, so we swab it before we send it.
704 switch (lump->lmm_magic) {
705 case LOV_USER_MAGIC_V1: {
706 if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
707 lustre_swab_lov_user_md_v1(lump);
708 lum_size = sizeof(struct lov_user_md_v1);
711 case LOV_USER_MAGIC_V3: {
712 if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
713 lustre_swab_lov_user_md_v3(
714 (struct lov_user_md_v3 *)lump);
715 lum_size = sizeof(struct lov_user_md_v3);
719 CDEBUG(D_IOCTL, "bad userland LOV MAGIC:"
720 " %#08x != %#08x nor %#08x\n",
721 lump->lmm_magic, LOV_USER_MAGIC_V1,
727 lum_size = sizeof(struct lov_user_md_v1);
730 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
731 LUSTRE_OPC_ANY, NULL);
733 RETURN(PTR_ERR(op_data));
735 /* swabbing is done in lov_setstripe() on server side */
736 rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
737 NULL, 0, &req, NULL);
738 ll_finish_md_op_data(op_data);
739 ptlrpc_req_finished(req);
741 if (rc != -EPERM && rc != -EACCES)
742 CERROR("mdc_setattr fails: rc = %d\n", rc);
745 /* In the following we use the fact that LOV_USER_MAGIC_V1 and
746 LOV_USER_MAGIC_V3 have the same initial fields so we do not
747 need the make the distiction between the 2 versions */
748 if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
749 OBD_ALLOC(param, MGS_PARAM_MAXLEN);
751 /* Get fsname and assume devname to be -MDT0000. */
752 fsname = ll_get_fsname(inode);
753 /* Set root stripesize */
754 sprintf(param, "%s-MDT0000.lov.stripesize=%u", fsname,
755 lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
756 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
760 /* Set root stripecount */
761 sprintf(param, "%s-MDT0000.lov.stripecount=%hd", fsname,
762 lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
763 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
767 /* Set root stripeoffset */
768 sprintf(param, "%s-MDT0000.lov.stripeoffset=%hd", fsname,
769 lump ? le16_to_cpu(lump->lmm_stripe_offset) :
770 (typeof(lump->lmm_stripe_offset))(-1));
771 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
776 OBD_FREE(fsname, MGS_PARAM_MAXLEN);
778 OBD_FREE(param, MGS_PARAM_MAXLEN);
783 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
784 int *lmm_size, struct ptlrpc_request **request)
786 struct ll_sb_info *sbi = ll_i2sbi(inode);
787 struct mdt_body *body;
788 struct lov_mds_md *lmm = NULL;
789 struct ptlrpc_request *req = NULL;
791 struct md_op_data *op_data;
793 rc = ll_get_max_mdsize(sbi, &lmmsize);
797 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
798 0, lmmsize, LUSTRE_OPC_ANY,
801 RETURN(PTR_ERR(op_data));
803 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
804 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
805 ll_finish_md_op_data(op_data);
807 CDEBUG(D_INFO, "md_getattr failed on inode "
808 "%lu/%u: rc %d\n", inode->i_ino,
809 inode->i_generation, rc);
813 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
814 LASSERT(body != NULL);
816 lmmsize = body->eadatasize;
818 if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
820 GOTO(out, rc = -ENODATA);
823 lmm = req_capsule_server_sized_get(&req->rq_pill,
824 &RMF_MDT_MD, lmmsize);
825 LASSERT(lmm != NULL);
828 * This is coming from the MDS, so is probably in
829 * little endian. We convert it to host endian before
830 * passing it to userspace.
832 /* We don't swab objects for directories */
833 switch (le32_to_cpu(lmm->lmm_magic)) {
835 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
836 lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
839 if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
840 lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
843 CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
854 * Get MDT index for the inode.
856 int ll_get_mdt_idx(struct inode *inode)
858 struct ll_sb_info *sbi = ll_i2sbi(inode);
859 struct md_op_data *op_data;
863 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
864 0, LUSTRE_OPC_ANY, NULL);
866 RETURN(PTR_ERR(op_data));
868 op_data->op_valid |= OBD_MD_MDTIDX;
869 rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
870 mdtidx = op_data->op_mds;
871 ll_finish_md_op_data(op_data);
873 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
879 static int copy_and_ioctl(int cmd, struct obd_export *exp, void *data, int len)
887 if (cfs_copy_from_user(ptr, data, len)) {
891 rc = obd_iocontrol(cmd, exp, len, data, NULL);
896 static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
898 int cmd = qctl->qc_cmd;
899 int type = qctl->qc_type;
900 int id = qctl->qc_id;
901 int valid = qctl->qc_valid;
906 case LUSTRE_Q_INVALIDATE:
907 case LUSTRE_Q_FINVALIDATE:
912 if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
913 sbi->ll_flags & LL_SBI_RMT_CLIENT)
917 if (((type == USRQUOTA && cfs_curproc_euid() != id) ||
918 (type == GRPQUOTA && !in_egroup_p(id))) &&
919 (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
920 sbi->ll_flags & LL_SBI_RMT_CLIENT))
926 CERROR("unsupported quotactl op: %#x\n", cmd);
930 if (valid != QC_GENERAL) {
931 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
934 if (cmd == Q_GETINFO)
935 qctl->qc_cmd = Q_GETOINFO;
936 else if (cmd == Q_GETQUOTA)
937 qctl->qc_cmd = Q_GETOQUOTA;
943 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
944 sizeof(*qctl), qctl, NULL);
947 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
948 sizeof(*qctl), qctl, NULL);
951 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
952 sizeof(*qctl), qctl, NULL);
954 rc = obd_iocontrol(OBD_IOC_QUOTACTL,
956 sizeof(*qctl), qctl, NULL);
968 struct obd_quotactl *oqctl;
970 OBD_ALLOC_PTR(oqctl);
974 QCTL_COPY(oqctl, qctl);
975 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
977 if (rc != -EALREADY && cmd == Q_QUOTAON) {
978 oqctl->qc_cmd = Q_QUOTAOFF;
979 obd_quotactl(sbi->ll_md_exp, oqctl);
984 /* If QIF_SPACE is not set, client should collect the
985 * space usage from OSSs by itself */
986 if (cmd == Q_GETQUOTA &&
987 !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
988 !oqctl->qc_dqblk.dqb_curspace) {
989 struct obd_quotactl *oqctl_tmp;
991 OBD_ALLOC_PTR(oqctl_tmp);
992 if (oqctl_tmp == NULL)
993 GOTO(out, rc = -ENOMEM);
995 oqctl_tmp->qc_cmd = Q_GETOQUOTA;
996 oqctl_tmp->qc_id = oqctl->qc_id;
997 oqctl_tmp->qc_type = oqctl->qc_type;
999 /* collect space usage from OSTs */
1000 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1001 rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
1002 if (!rc || rc == -EREMOTEIO) {
1003 oqctl->qc_dqblk.dqb_curspace =
1004 oqctl_tmp->qc_dqblk.dqb_curspace;
1005 oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
1008 /* collect space & inode usage from MDTs */
1009 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1010 oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
1011 rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
1012 if (!rc || rc == -EREMOTEIO) {
1013 oqctl->qc_dqblk.dqb_curspace +=
1014 oqctl_tmp->qc_dqblk.dqb_curspace;
1015 oqctl->qc_dqblk.dqb_curinodes =
1016 oqctl_tmp->qc_dqblk.dqb_curinodes;
1017 oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1019 oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1022 OBD_FREE_PTR(oqctl_tmp);
1025 QCTL_COPY(qctl, oqctl);
1026 OBD_FREE_PTR(oqctl);
1032 #ifdef HAVE_UNLOCKED_IOCTL
1033 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1035 static int ll_dir_ioctl(struct inode *unuse, struct file *file,
1036 unsigned int cmd, unsigned long arg)
1039 struct inode *inode = file->f_dentry->d_inode;
1040 struct ll_sb_info *sbi = ll_i2sbi(inode);
1041 struct obd_ioctl_data *data;
1045 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
1046 inode->i_ino, inode->i_generation, inode, cmd);
1048 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1049 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1052 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1054 case FSFILT_IOC_GETFLAGS:
1055 case FSFILT_IOC_SETFLAGS:
1056 RETURN(ll_iocontrol(inode, file, cmd, arg));
1057 case FSFILT_IOC_GETVERSION_OLD:
1058 case FSFILT_IOC_GETVERSION:
1059 RETURN(put_user(inode->i_generation, (int *)arg));
1060 /* We need to special case any other ioctls we want to handle,
1061 * to send them to the MDS/OST as appropriate and to properly
1062 * network encode the arg field.
1063 case FSFILT_IOC_SETVERSION_OLD:
1064 case FSFILT_IOC_SETVERSION:
1066 case LL_IOC_GET_MDTIDX: {
1069 mdtidx = ll_get_mdt_idx(inode);
1073 if (put_user((int)mdtidx, (int*)arg))
1078 case IOC_MDC_LOOKUP: {
1079 struct ptlrpc_request *request = NULL;
1080 int namelen, len = 0;
1083 struct md_op_data *op_data;
1085 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1090 filename = data->ioc_inlbuf1;
1091 namelen = strlen(filename);
1094 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1095 GOTO(out_free, rc = -EINVAL);
1098 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
1099 0, LUSTRE_OPC_ANY, NULL);
1100 if (IS_ERR(op_data))
1101 GOTO(out_free, rc = PTR_ERR(op_data));
1103 op_data->op_valid = OBD_MD_FLID;
1104 rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
1105 ll_finish_md_op_data(op_data);
1107 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
1110 ptlrpc_req_finished(request);
1113 obd_ioctl_freedata(buf, len);
1116 case LL_IOC_LOV_SETSTRIPE: {
1117 struct lov_user_md_v3 lumv3;
1118 struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
1119 struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
1120 struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
1122 int set_default = 0;
1124 LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
1125 LASSERT(sizeof(lumv3.lmm_objects[0]) ==
1126 sizeof(lumv3p->lmm_objects[0]));
1127 /* first try with v1 which is smaller than v3 */
1128 if (cfs_copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
1131 if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
1132 if (cfs_copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
1136 if (inode->i_sb->s_root == file->f_dentry)
1139 /* in v1 and v3 cases lumv1 points to data */
1140 rc = ll_dir_setstripe(inode, lumv1, set_default);
1144 case LL_IOC_OBD_STATFS:
1145 RETURN(ll_obd_statfs(inode, (void *)arg));
1146 case LL_IOC_LOV_GETSTRIPE:
1147 case LL_IOC_MDC_GETINFO:
1148 case IOC_MDC_GETFILEINFO:
1149 case IOC_MDC_GETFILESTRIPE: {
1150 struct ptlrpc_request *request = NULL;
1151 struct lov_user_md *lump;
1152 struct lov_mds_md *lmm = NULL;
1153 struct mdt_body *body;
1154 char *filename = NULL;
1157 if (cmd == IOC_MDC_GETFILEINFO ||
1158 cmd == IOC_MDC_GETFILESTRIPE) {
1159 filename = getname((const char *)arg);
1160 if (IS_ERR(filename))
1161 RETURN(PTR_ERR(filename));
1163 rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
1164 &lmmsize, &request);
1166 rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
1170 body = req_capsule_server_get(&request->rq_pill,
1172 LASSERT(body != NULL);
1178 if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
1179 cmd == LL_IOC_MDC_GETINFO))
1180 GOTO(skip_lmm, rc = 0);
1185 if (cmd == IOC_MDC_GETFILESTRIPE ||
1186 cmd == LL_IOC_LOV_GETSTRIPE) {
1187 lump = (struct lov_user_md *)arg;
1189 struct lov_user_mds_data *lmdp;
1190 lmdp = (struct lov_user_mds_data *)arg;
1191 lump = &lmdp->lmd_lmm;
1193 if (cfs_copy_to_user(lump, lmm, lmmsize)) {
1194 if (cfs_copy_to_user(lump, lmm, sizeof(*lump)))
1195 GOTO(out_req, rc = -EFAULT);
1199 if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
1200 struct lov_user_mds_data *lmdp;
1203 st.st_dev = inode->i_sb->s_dev;
1204 st.st_mode = body->mode;
1205 st.st_nlink = body->nlink;
1206 st.st_uid = body->uid;
1207 st.st_gid = body->gid;
1208 st.st_rdev = body->rdev;
1209 st.st_size = body->size;
1210 st.st_blksize = CFS_PAGE_SIZE;
1211 st.st_blocks = body->blocks;
1212 st.st_atime = body->atime;
1213 st.st_mtime = body->mtime;
1214 st.st_ctime = body->ctime;
1215 st.st_ino = inode->i_ino;
1217 lmdp = (struct lov_user_mds_data *)arg;
1218 if (cfs_copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
1219 GOTO(out_req, rc = -EFAULT);
1224 ptlrpc_req_finished(request);
1229 case IOC_LOV_GETINFO: {
1230 struct lov_user_mds_data *lumd;
1231 struct lov_stripe_md *lsm;
1232 struct lov_user_md *lum;
1233 struct lov_mds_md *lmm;
1237 lumd = (struct lov_user_mds_data *)arg;
1238 lum = &lumd->lmd_lmm;
1240 rc = ll_get_max_mdsize(sbi, &lmmsize);
1244 OBD_ALLOC_LARGE(lmm, lmmsize);
1245 if (cfs_copy_from_user(lmm, lum, lmmsize))
1246 GOTO(free_lmm, rc = -EFAULT);
1248 switch (lmm->lmm_magic) {
1249 case LOV_USER_MAGIC_V1:
1250 if (LOV_USER_MAGIC_V1 == cpu_to_le32(LOV_USER_MAGIC_V1))
1252 /* swab objects first so that stripes num will be sane */
1253 lustre_swab_lov_user_md_objects(
1254 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
1255 ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
1256 lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
1258 case LOV_USER_MAGIC_V3:
1259 if (LOV_USER_MAGIC_V3 == cpu_to_le32(LOV_USER_MAGIC_V3))
1261 /* swab objects first so that stripes num will be sane */
1262 lustre_swab_lov_user_md_objects(
1263 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
1264 ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
1265 lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
1268 GOTO(free_lmm, rc = -EINVAL);
1271 rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
1273 GOTO(free_lmm, rc = -ENOMEM);
1275 /* Perform glimpse_size operation. */
1276 memset(&st, 0, sizeof(st));
1278 rc = ll_glimpse_ioctl(sbi, lsm, &st);
1282 if (cfs_copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
1283 GOTO(free_lsm, rc = -EFAULT);
1287 obd_free_memmd(sbi->ll_dt_exp, &lsm);
1289 OBD_FREE_LARGE(lmm, lmmsize);
1292 case OBD_IOC_LLOG_CATINFO: {
1293 struct ptlrpc_request *req = NULL;
1298 rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
1303 if (!data->ioc_inlbuf1) {
1304 obd_ioctl_freedata(buf, len);
1308 req = ptlrpc_request_alloc(sbi2mdc(sbi)->cl_import,
1311 GOTO(out_catinfo, rc = -ENOMEM);
1313 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
1315 req_capsule_set_size(&req->rq_pill, &RMF_STRING, RCL_CLIENT,
1318 rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION, LLOG_CATINFO);
1320 ptlrpc_request_free(req);
1321 GOTO(out_catinfo, rc);
1324 str = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
1325 memcpy(str, data->ioc_inlbuf1, data->ioc_inllen1);
1326 if (data->ioc_inllen2) {
1327 str = req_capsule_client_get(&req->rq_pill,
1329 memcpy(str, data->ioc_inlbuf2, data->ioc_inllen2);
1332 req_capsule_set_size(&req->rq_pill, &RMF_STRING, RCL_SERVER,
1334 ptlrpc_request_set_replen(req);
1336 rc = ptlrpc_queue_wait(req);
1338 str = req_capsule_server_get(&req->rq_pill,
1340 if (cfs_copy_to_user(data->ioc_pbuf1, str,
1344 ptlrpc_req_finished(req);
1346 obd_ioctl_freedata(buf, len);
1349 case OBD_IOC_QUOTACHECK: {
1350 struct obd_quotactl *oqctl;
1353 if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
1354 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1357 OBD_ALLOC_PTR(oqctl);
1360 oqctl->qc_type = arg;
1361 rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
1363 CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
1367 rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
1369 CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
1371 OBD_FREE_PTR(oqctl);
1374 case OBD_IOC_POLL_QUOTACHECK: {
1375 struct if_quotacheck *check;
1377 if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
1378 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1381 OBD_ALLOC_PTR(check);
1385 rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
1388 CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
1389 if (cfs_copy_to_user((void *)arg, check,
1391 CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n");
1395 rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
1398 CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
1399 if (cfs_copy_to_user((void *)arg, check,
1401 CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n");
1405 OBD_FREE_PTR(check);
1408 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2,7,50,0)
1409 case LL_IOC_QUOTACTL_18: {
1410 /* copy the old 1.x quota struct for internal use, then copy
1411 * back into old format struct. For 1.8 compatibility. */
1412 struct if_quotactl_18 *qctl_18;
1413 struct if_quotactl *qctl_20;
1415 OBD_ALLOC_PTR(qctl_18);
1419 OBD_ALLOC_PTR(qctl_20);
1421 GOTO(out_quotactl_18, rc = -ENOMEM);
1423 if (cfs_copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18)))
1424 GOTO(out_quotactl_20, rc = -ENOMEM);
1426 QCTL_COPY(qctl_20, qctl_18);
1427 qctl_20->qc_idx = 0;
1429 /* XXX: dqb_valid was borrowed as a flag to mark that
1430 * only mds quota is wanted */
1431 if (qctl_18->qc_cmd == Q_GETQUOTA &&
1432 qctl_18->qc_dqblk.dqb_valid) {
1433 qctl_20->qc_valid = QC_MDTIDX;
1434 qctl_20->qc_dqblk.dqb_valid = 0;
1435 } else if (qctl_18->obd_uuid.uuid[0] != '\0') {
1436 qctl_20->qc_valid = QC_UUID;
1437 qctl_20->obd_uuid = qctl_18->obd_uuid;
1439 qctl_20->qc_valid = QC_GENERAL;
1442 rc = quotactl_ioctl(sbi, qctl_20);
1445 QCTL_COPY(qctl_18, qctl_20);
1446 qctl_18->obd_uuid = qctl_20->obd_uuid;
1448 if (cfs_copy_to_user((void *)arg, qctl_18,
1454 OBD_FREE_PTR(qctl_20);
1456 OBD_FREE_PTR(qctl_18);
1460 #warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
1461 #endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2,7,50,0) */
1462 case LL_IOC_QUOTACTL: {
1463 struct if_quotactl *qctl;
1465 OBD_ALLOC_PTR(qctl);
1469 if (cfs_copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
1470 GOTO(out_quotactl, rc = -EFAULT);
1472 rc = quotactl_ioctl(sbi, qctl);
1474 if (rc == 0 && cfs_copy_to_user((void *)arg,qctl,sizeof(*qctl)))
1481 case OBD_IOC_GETDTNAME:
1482 case OBD_IOC_GETMDNAME:
1483 RETURN(ll_get_obd_name(inode, cmd, arg));
1484 case LL_IOC_FLUSHCTX:
1485 RETURN(ll_flush_ctx(inode));
1486 #ifdef CONFIG_FS_POSIX_ACL
1487 case LL_IOC_RMTACL: {
1488 if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
1489 inode == inode->i_sb->s_root->d_inode) {
1490 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1492 LASSERT(fd != NULL);
1493 rc = rct_add(&sbi->ll_rct, cfs_curproc_pid(), arg);
1495 fd->fd_flags |= LL_FILE_RMTACL;
1501 case LL_IOC_GETOBDCOUNT: {
1503 struct obd_export *exp;
1505 if (cfs_copy_from_user(&count, (int *)arg, sizeof(int)))
1508 /* get ost count when count is zero, get mdt count otherwise */
1509 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
1510 vallen = sizeof(count);
1511 rc = obd_get_info(exp, sizeof(KEY_TGT_COUNT), KEY_TGT_COUNT,
1512 &vallen, &count, NULL);
1514 CERROR("get target count failed: %d\n", rc);
1518 if (cfs_copy_to_user((int *)arg, &count, sizeof(int)))
1523 case LL_IOC_PATH2FID:
1524 if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
1525 sizeof(struct lu_fid)))
1528 case LL_IOC_GET_CONNECT_FLAGS: {
1529 RETURN(obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void*)arg));
1531 case OBD_IOC_CHANGELOG_SEND:
1532 case OBD_IOC_CHANGELOG_CLEAR:
1533 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1534 sizeof(struct ioc_changelog));
1536 case OBD_IOC_FID2PATH:
1537 RETURN(ll_fid2path(ll_i2mdexp(inode), (void *)arg));
1538 case LL_IOC_HSM_CT_START:
1539 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
1540 sizeof(struct lustre_kernelcomm));
1544 RETURN(obd_iocontrol(cmd, sbi->ll_dt_exp,0,NULL,(void *)arg));
1548 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
1550 struct inode *inode = file->f_mapping->host;
1551 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1552 struct ll_sb_info *sbi = ll_i2sbi(inode);
1553 int api32 = ll_need_32bit_api(sbi);
1554 loff_t ret = -EINVAL;
1557 cfs_mutex_lock(&inode->i_mutex);
1562 offset += file->f_pos;
1568 offset += LL_DIR_END_OFF_32BIT;
1570 offset += LL_DIR_END_OFF;
1577 ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
1578 (!api32 && offset <= LL_DIR_END_OFF))) {
1579 if (offset != file->f_pos) {
1580 if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
1581 (!api32 && offset == LL_DIR_END_OFF))
1582 fd->fd_dir.lfd_pos = MDS_DIR_END_OFF;
1583 else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
1584 fd->fd_dir.lfd_pos = offset << 32;
1586 fd->fd_dir.lfd_pos = offset;
1587 file->f_pos = offset;
1588 file->f_version = 0;
1595 cfs_mutex_unlock(&inode->i_mutex);
1599 int ll_dir_open(struct inode *inode, struct file *file)
1602 RETURN(ll_file_open(inode, file));
1605 int ll_dir_release(struct inode *inode, struct file *file)
1608 RETURN(ll_file_release(inode, file));
1611 struct file_operations ll_dir_operations = {
1612 .llseek = ll_dir_seek,
1613 .open = ll_dir_open,
1614 .release = ll_dir_release,
1615 .read = generic_read_dir,
1616 .readdir = ll_readdir,
1617 #ifdef HAVE_UNLOCKED_IOCTL
1618 .unlocked_ioctl = ll_dir_ioctl,
1620 .ioctl = ll_dir_ioctl,