/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Andreas Dilger <adilger@clusterfs.com>
+ * GPL HEADER START
*
- * This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/file.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Andreas Dilger <adilger@clusterfs.com>
*/
#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/pagemap.h>
#include <linux/file.h>
#include "llite_internal.h"
+#include <lustre/ll_fiemap.h>
+
+#include "cl_object.h"
-/* also used by llite/special.c:ll_special_open() */
struct ll_file_data *ll_file_data_get(void)
{
struct ll_file_data *fd;
- OBD_SLAB_ALLOC_PTR(fd, ll_file_data_slab);
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
return fd;
}
struct ptlrpc_request *req = NULL;
struct obd_device *obd = class_exp2obd(exp);
int epoch_close = 1;
- int seq_end = 0, rc;
+ int rc;
ENTRY;
if (obd == NULL) {
ll_prepare_close(inode, op_data, och);
epoch_close = (op_data->op_flags & MF_EPOCH_CLOSE);
rc = md_close(md_exp, op_data, och->och_mod, &req);
- if (rc != -EAGAIN)
- seq_end = 1;
-
if (rc == -EAGAIN) {
/* This close must have the epoch closed. */
LASSERT(exp->exp_connect_flags & OBD_CONNECT_SOM);
LASSERT(epoch_close);
/* MDS has instructed us to obtain Size-on-MDS attribute from
* OSTs and send setattr to back to MDS. */
- rc = ll_sizeonmds_update(inode, och->och_mod,
- &och->och_fh, op_data->op_ioepoch);
+ rc = ll_sizeonmds_update(inode, &och->och_fh,
+ op_data->op_ioepoch);
if (rc) {
CERROR("inode %lu mdc Size-on-MDS update failed: "
"rc = %d\n", inode->i_ino, rc);
EXIT;
out:
-
+
if ((exp->exp_connect_flags & OBD_CONNECT_SOM) && !epoch_close &&
S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
ll_queue_done_writing(inode, LLIF_DONE_WRITING);
} else {
- if (seq_end)
- ptlrpc_close_replay_seq(req);
md_clear_open_replay_data(md_exp, och);
/* Free @och if it is not waiting for DONE_WRITING. */
och->och_fh.cookie = DEAD_HANDLE_MAGIC;
ENTRY;
/* clear group lock, if present */
- if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
- rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP,
- &fd->fd_cwlockh);
- }
+ if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
+ ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
/* Let's see if we have good enough OPEN lock on the file and if
we can skip talking to MDS */
struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm = lli->lli_smd;
int rc;
-
ENTRY;
+
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
- /* don't do anything for / */
- if (inode->i_sb->s_root == file->f_dentry)
- RETURN(0);
+#ifdef CONFIG_FS_POSIX_ACL
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ inode == inode->i_sb->s_root->d_inode) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(fd != NULL);
+ if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
+ fd->fd_flags &= ~LL_FILE_RMTACL;
+ rct_del(&sbi->ll_rct, cfs_curproc_pid());
+ et_search_free(&sbi->ll_et, cfs_curproc_pid());
+ }
+ }
+#endif
- ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
+ if (inode->i_sb->s_root != file->f_dentry)
+ ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
fd = LUSTRE_FPRIVATE(file);
LASSERT(fd != NULL);
- /* don't do anything for / */
+ /* The last ref on @file, maybe not the the owner pid of statahead.
+ * Different processes can open the same dir, "ll_opendir_key" means:
+ * it is me that should stop the statahead thread. */
+ if (lli->lli_opendir_key == fd && lli->lli_opendir_pid != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
+
if (inode->i_sb->s_root == file->f_dentry) {
LUSTRE_FPRIVATE(file) = NULL;
ll_file_data_put(fd);
RETURN(0);
}
-
+
if (lsm)
lov_test_and_clear_async_rc(lsm);
lli->lli_async_rc = 0;
struct md_op_data *op_data;
struct ptlrpc_request *req;
int rc;
+ ENTRY;
if (!parent)
RETURN(-ENOENT);
/* reason for keep own exit path - don`t flood log
* with messages with -ESTALE errors.
*/
- if (!it_disposition(itp, DISP_OPEN_OPEN) ||
+ if (!it_disposition(itp, DISP_OPEN_OPEN) ||
it_open_error(DISP_OPEN_OPEN, itp))
GOTO(out, rc);
ll_release_openhandle(file->f_dentry, itp);
- GOTO(out_stale, rc);
+ GOTO(out, rc);
}
if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
if (itp->d.lustre.it_lock_mode)
md_set_lock_data(sbi->ll_md_exp,
- &itp->d.lustre.it_lock_handle,
+ &itp->d.lustre.it_lock_handle,
file->f_dentry->d_inode);
- rc = ll_prep_inode(&file->f_dentry->d_inode, req, DLM_REPLY_REC_OFF,
- NULL);
+ rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL);
out:
ptlrpc_req_finished(itp->d.lustre.it_data);
-
-out_stale:
it_clear_disposition(itp, DISP_ENQ_COMPLETE);
ll_intent_drop_lock(itp);
LASSERT(och);
- body = lustre_msg_buf(req->rq_repmsg, DLM_REPLY_REC_OFF, sizeof(*body));
- /* reply already checked out */
- LASSERT(body != NULL);
- /* and swabbed in md_enqueue */
- LASSERT(lustre_rep_swabbed(req, DLM_REPLY_REC_OFF));
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL); /* reply already checked out */
memcpy(&och->och_fh, &body->handle, sizeof(body->handle));
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
if (rc)
RETURN(rc);
- body = lustre_msg_buf(req->rq_repmsg,
- DLM_REPLY_REC_OFF, sizeof(*body));
-
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
if ((it->it_flags & FMODE_WRITE) &&
(body->valid & OBD_MD_FLSIZE))
- {
CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID"\n",
lli->lli_ioepoch, PFID(&lli->lli_fid));
- }
}
LUSTRE_FPRIVATE(file) = fd;
struct obd_client_handle **och_p;
__u64 *och_usecount;
struct ll_file_data *fd;
- int rc = 0;
+ int rc = 0, opendir_set = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
inode->i_generation, inode, file->f_flags);
- /* don't do anything for / */
- if (inode->i_sb->s_root == file->f_dentry)
- RETURN(0);
-
#ifdef HAVE_VFS_INTENT_PATCHES
it = file->f_it;
#else
if (fd == NULL)
RETURN(-ENOMEM);
- /* don't do anything for / */
+ fd->fd_file = file;
+ if (S_ISDIR(inode->i_mode)) {
+again:
+ spin_lock(&lli->lli_lock);
+ if (lli->lli_opendir_key == NULL && lli->lli_opendir_pid == 0) {
+ LASSERT(lli->lli_sai == NULL);
+ lli->lli_opendir_key = fd;
+ lli->lli_opendir_pid = cfs_curproc_pid();
+ opendir_set = 1;
+ } else if (unlikely(lli->lli_opendir_pid == cfs_curproc_pid() &&
+ lli->lli_opendir_key != NULL)) {
+ /* Two cases for this:
+ * (1) The same process open such directory many times.
+ * (2) The old process opened the directory, and exited
+ * before its children processes. Then new process
+ * with the same pid opens such directory before the
+ * old process's children processes exit.
+ * reset stat ahead for such cases. */
+ spin_unlock(&lli->lli_lock);
+ CDEBUG(D_INFO, "Conflict statahead for %.*s "DFID
+ " reset it.\n", file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name,
+ PFID(&lli->lli_fid));
+ ll_stop_statahead(inode, lli->lli_opendir_key);
+ goto again;
+ }
+ spin_unlock(&lli->lli_lock);
+ }
+
if (inode->i_sb->s_root == file->f_dentry) {
LUSTRE_FPRIVATE(file) = fd;
RETURN(0);
it = &oit;
}
+restart:
/* Let's see if we have file open on MDS already. */
if (it->it_flags & FMODE_WRITE) {
och_p = &lli->lli_mds_write_och;
och_p = &lli->lli_mds_read_och;
och_usecount = &lli->lli_open_fd_read_count;
}
-
+
down(&lli->lli_och_sem);
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
let's close it somehow. This will decref request. */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
+ up(&lli->lli_och_sem);
ll_file_data_put(fd);
- GOTO(out_och_free, rc);
- }
+ GOTO(out_openerr, rc);
+ }
ll_release_openhandle(file->f_dentry, it);
- lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
+ lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
LPROC_LL_OPEN);
}
(*och_usecount)++;
rc = ll_local_open(file, it, fd, NULL);
if (rc) {
+ (*och_usecount)--;
up(&lli->lli_och_sem);
ll_file_data_put(fd);
- RETURN(rc);
+ GOTO(out_openerr, rc);
}
} else {
LASSERT(*och_usecount == 0);
- OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
- if (!*och_p) {
- ll_file_data_put(fd);
- GOTO(out_och_free, rc = -ENOMEM);
- }
- (*och_usecount)++;
if (!it->d.lustre.it_disposition) {
- it->it_flags |= O_CHECK_STALE;
+ /* We cannot just request lock handle now, new ELC code
+ means that one of other OPEN locks for this file
+ could be cancelled, and since blocking ast handler
+ would attempt to grab och_sem as well, that would
+ result in a deadlock */
+ up(&lli->lli_och_sem);
+ it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
- it->it_flags &= ~O_CHECK_STALE;
+ it->it_create_mode &= ~M_CHECK_STALE;
if (rc) {
ll_file_data_put(fd);
- GOTO(out_och_free, rc);
+ GOTO(out_openerr, rc);
}
/* Got some error? Release the request */
md_set_lock_data(ll_i2sbi(inode)->ll_md_exp,
&it->d.lustre.it_lock_handle,
file->f_dentry->d_inode);
+ goto restart;
+ }
+ OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
+ if (!*och_p) {
+ ll_file_data_put(fd);
+ GOTO(out_och_free, rc = -ENOMEM);
}
+ (*och_usecount)++;
req = it->d.lustre.it_data;
/* md_intent_lock() didn't get a request ref if there was an
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
rc = ll_local_open(file, it, fd, *och_p);
if (rc) {
- up(&lli->lli_och_sem);
ll_file_data_put(fd);
GOTO(out_och_free, rc);
}
(*och_usecount)--;
}
up(&lli->lli_och_sem);
+out_openerr:
+ if (opendir_set != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
}
return rc;
}
-/* Fills the obdo with the attributes for the inode defined by lsm */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo)
+/* Fills the obdo with the attributes for the lsm */
+static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
+ struct obd_capa *capa, struct obdo *obdo)
{
struct ptlrpc_request_set *set;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
+ struct obd_info oinfo = { { { 0 } } };
+ int rc;
- struct obd_info oinfo = { { { 0 } } };
- int rc;
ENTRY;
LASSERT(lsm != NULL);
OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
OBD_MD_FLGROUP;
- oinfo.oi_capa = ll_mdscapa_get(inode);
+ oinfo.oi_capa = capa;
set = ptlrpc_prep_set();
if (set == NULL) {
CERROR("can't allocate ptlrpc set\n");
rc = -ENOMEM;
} else {
- rc = obd_getattr_async(ll_i2dtexp(inode), &oinfo, set);
+ rc = obd_getattr_async(exp, &oinfo, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
}
- capa_put(oinfo.oi_capa);
- if (rc)
- RETURN(rc);
-
- oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE);
-
- obdo_refresh_inode(inode, oinfo.oi_oa, oinfo.oi_oa->o_valid);
- CDEBUG(D_INODE, "objid "LPX64" size %Lu, blocks %lu, blksize %lu\n",
- lli->lli_smd->lsm_object_id, i_size_read(inode),
- inode->i_blocks, inode->i_blksize);
- RETURN(0);
-}
-
-static inline void ll_remove_suid(struct inode *inode)
-{
- unsigned int mode;
-
- /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
- mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
-
- /* was any of the uid bits set? */
- mode &= inode->i_mode;
- if (mode && !capable(CAP_FSETID)) {
- inode->i_mode &= ~mode;
- // XXX careful here - we cannot change the size
- }
+ if (rc == 0)
+ oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME | OBD_MD_FLSIZE);
+ RETURN(rc);
}
-static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
+/* Fills the obdo with the attributes for the inode defined by lsm */
+int ll_inode_getattr(struct inode *inode, struct obdo *obdo)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct obd_export *exp = ll_i2dtexp(inode);
- struct {
- char name[16];
- struct ldlm_lock *lock;
- struct lov_stripe_md *lsm;
- } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
- __u32 stripe, vallen = sizeof(stripe);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *capa = ll_mdscapa_get(inode);
int rc;
ENTRY;
- if (lsm->lsm_stripe_count == 1)
- GOTO(check, stripe = 0);
-
- /* get our offset in the lov */
- rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe);
- if (rc != 0) {
- CERROR("obd_get_info: rc = %d\n", rc);
- RETURN(rc);
- }
- LASSERT(stripe < lsm->lsm_stripe_count);
-
-check:
- if (lsm->lsm_oinfo[stripe]->loi_id != lock->l_resource->lr_name.name[0]||
- lsm->lsm_oinfo[stripe]->loi_gr != lock->l_resource->lr_name.name[2]){
- LDLM_ERROR(lock, "resource doesn't match object "LPU64"/"LPU64,
- lsm->lsm_oinfo[stripe]->loi_id,
- lsm->lsm_oinfo[stripe]->loi_gr);
- RETURN(-ELDLM_NO_LOCK_DATA);
+ rc = ll_lsm_getattr(lli->lli_smd, ll_i2dtexp(inode), capa, obdo);
+ capa_put(capa);
+ if (rc == 0) {
+ obdo_refresh_inode(inode, obdo, obdo->o_valid);
+ CDEBUG(D_INODE,
+ "objid "LPX64" size %Lu, blocks %llu, blksize %lu\n",
+ lli->lli_smd->lsm_object_id, i_size_read(inode),
+ (unsigned long long)inode->i_blocks,
+ (unsigned long)ll_inode_blksize(inode));
}
-
- RETURN(stripe);
+ RETURN(rc);
}
-/* Flush the page cache for an extent as its canceled. When we're on an LOV,
- * we get a lock cancellation for each stripe, so we have to map the obd's
- * region back onto the stripes in the file that it held.
- *
- * No one can dirty the extent until we've finished our work and they can
- * enqueue another lock. The DLM protects us from ll_file_read/write here,
- * but other kernel actors could have pages locked.
- *
- * Called with the DLM lock held. */
-void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
- struct ldlm_lock *lock, __u32 stripe)
+int ll_merge_lvb(struct inode *inode)
{
- ldlm_policy_data_t tmpex;
- unsigned long start, end, count, skip, i, j;
- struct page *page;
- int rc, rc2, discard = lock->l_flags & LDLM_FL_DISCARD_DATA;
- struct lustre_handle lockh;
- struct address_space *mapping = inode->i_mapping;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ost_lvb lvb;
+ int rc;
ENTRY;
- tmpex = lock->l_policy_data;
- CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
- inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
- i_size_read(inode));
-
- /* our locks are page granular thanks to osc_enqueue, we invalidate the
- * whole page. */
- if ((tmpex.l_extent.start & ~CFS_PAGE_MASK) != 0 ||
- ((tmpex.l_extent.end + 1) & ~CFS_PAGE_MASK) != 0)
- LDLM_ERROR(lock, "lock not aligned on PAGE_SIZE %lu",
- CFS_PAGE_SIZE);
- LASSERT((tmpex.l_extent.start & ~CFS_PAGE_MASK) == 0);
- LASSERT(((tmpex.l_extent.end + 1) & ~CFS_PAGE_MASK) == 0);
-
- count = ~0;
- skip = 0;
- start = tmpex.l_extent.start >> CFS_PAGE_SHIFT;
- end = tmpex.l_extent.end >> CFS_PAGE_SHIFT;
- if (lsm->lsm_stripe_count > 1) {
- count = lsm->lsm_stripe_size >> CFS_PAGE_SHIFT;
- skip = (lsm->lsm_stripe_count - 1) * count;
- start += start/count * skip + stripe * count;
- if (end != ~0)
- end += end/count * skip + stripe * count;
- }
- if (end < tmpex.l_extent.end >> CFS_PAGE_SHIFT)
- end = ~0;
-
- i = i_size_read(inode) ? (__u64)(i_size_read(inode) - 1) >>
- CFS_PAGE_SHIFT : 0;
- if (i < end)
- end = i;
-
- CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
- "count: %lu skip: %lu end: %lu%s\n", start, start % count,
- count, skip, end, discard ? " (DISCARDING)" : "");
-
- /* walk through the vmas on the inode and tear down mmaped pages that
- * intersect with the lock. this stops immediately if there are no
- * mmap()ed regions of the file. This is not efficient at all and
- * should be short lived. We'll associate mmap()ed pages with the lock
- * and will be able to find them directly */
- for (i = start; i <= end; i += (j + skip)) {
- j = min(count - (i % count), end - i + 1);
- LASSERT(j > 0);
- LASSERT(mapping);
- if (ll_teardown_mmaps(mapping,
- (__u64)i << CFS_PAGE_SHIFT,
- ((__u64)(i+j) << CFS_PAGE_SHIFT) - 1) )
- break;
- }
- /* this is the simplistic implementation of page eviction at
- * cancelation. It is careful to get races with other page
- * lockers handled correctly. fixes from bug 20 will make it
- * more efficient by associating locks with pages and with
- * batching writeback under the lock explicitly. */
- for (i = start, j = start % count; i <= end;
- j++, i++, tmpex.l_extent.start += CFS_PAGE_SIZE) {
- if (j == count) {
- CDEBUG(D_PAGE, "skip index %lu to %lu\n", i, i + skip);
- i += skip;
- j = 0;
- if (i > end)
- break;
- }
- LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
- LPU64" >= "LPU64" start %lu i %lu end %lu\n",
- tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
- start, i, end);
-
- if (!mapping_has_pages(mapping)) {
- CDEBUG(D_INODE|D_PAGE, "nothing left\n");
- break;
- }
-
- cond_resched();
+ ll_inode_size_lock(inode, 1);
+ inode_init_lvb(inode, &lvb);
+ rc = obd_merge_lvb(sbi->ll_dt_exp, lli->lli_smd, &lvb, 0);
+ i_size_write(inode, lvb.lvb_size);
+ inode->i_blocks = lvb.lvb_blocks;
- page = find_get_page(mapping, i);
- if (page == NULL)
- continue;
- LL_CDEBUG_PAGE(D_PAGE, page, "lock page idx %lu ext "LPU64"\n",
- i, tmpex.l_extent.start);
- lock_page(page);
-
- /* page->mapping to check with racing against teardown */
- if (!discard && clear_page_dirty_for_io(page)) {
- rc = ll_call_writepage(inode, page);
- /* either waiting for io to complete or reacquiring
- * the lock that the failed writepage released */
- lock_page(page);
- wait_on_page_writeback(page);
- if (rc != 0) {
- CERROR("writepage inode %lu(%p) of page %p "
- "failed: %d\n", inode->i_ino, inode,
- page, rc);
- if (rc == -ENOSPC)
- set_bit(AS_ENOSPC, &mapping->flags);
- else
- set_bit(AS_EIO, &mapping->flags);
- }
- }
+ LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
+ LTIME_S(inode->i_atime) = lvb.lvb_atime;
+ LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
+ ll_inode_size_unlock(inode, 1);
- tmpex.l_extent.end = tmpex.l_extent.start + CFS_PAGE_SIZE - 1;
- /* check to see if another DLM lock covers this page b=2765 */
- rc2 = ldlm_lock_match(lock->l_resource->lr_namespace,
- LDLM_FL_BLOCK_GRANTED|LDLM_FL_CBPENDING |
- LDLM_FL_TEST_LOCK,
- &lock->l_resource->lr_name, LDLM_EXTENT,
- &tmpex, LCK_PR | LCK_PW, &lockh);
-
- if (rc2 <= 0 && page->mapping != NULL) {
- struct ll_async_page *llap = llap_cast_private(page);
- /* checking again to account for writeback's
- * lock_page() */
- LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
- if (llap)
- ll_ra_accounting(llap, mapping);
- ll_truncate_complete_page(page);
- }
- unlock_page(page);
- page_cache_release(page);
- }
- LASSERTF(tmpex.l_extent.start <=
- (lock->l_policy_data.l_extent.end == ~0ULL ? ~0ULL :
- lock->l_policy_data.l_extent.end + 1),
- "loop too long "LPU64" > "LPU64" start %lu i %lu end %lu\n",
- tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
- start, i, end);
- EXIT;
+ RETURN(rc);
}
-static int ll_extent_lock_callback(struct ldlm_lock *lock,
- struct ldlm_lock_desc *new, void *data,
- int flag)
+int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
+ lstat_t *st)
{
- struct lustre_handle lockh = { 0 };
+ struct obdo obdo = { 0 };
int rc;
- ENTRY;
- if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
- LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
- LBUG();
- }
-
- switch (flag) {
- case LDLM_CB_BLOCKING:
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- if (rc != ELDLM_OK)
- CERROR("ldlm_cli_cancel failed: %d\n", rc);
- break;
- case LDLM_CB_CANCELING: {
- struct inode *inode;
- struct ll_inode_info *lli;
- struct lov_stripe_md *lsm;
- int stripe;
- __u64 kms;
-
- /* This lock wasn't granted, don't try to evict pages */
- if (lock->l_req_mode != lock->l_granted_mode)
- RETURN(0);
-
- inode = ll_inode_from_lock(lock);
- if (inode == NULL)
- RETURN(0);
- lli = ll_i2info(inode);
- if (lli == NULL)
- goto iput;
- if (lli->lli_smd == NULL)
- goto iput;
- lsm = lli->lli_smd;
-
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- goto iput;
-
- ll_pgcache_remove_extent(inode, lsm, lock, stripe);
-
- lov_stripe_lock(lsm);
- lock_res_and_lock(lock);
- kms = ldlm_extent_shift_kms(lock,
- lsm->lsm_oinfo[stripe]->loi_kms);
-
- if (lsm->lsm_oinfo[stripe]->loi_kms != kms)
- LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
- lsm->lsm_oinfo[stripe]->loi_kms, kms);
- lsm->lsm_oinfo[stripe]->loi_kms = kms;
- unlock_res_and_lock(lock);
- lov_stripe_unlock(lsm);
- iput:
- iput(inode);
- break;
- }
- default:
- LBUG();
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, &obdo);
+ if (rc == 0) {
+ st->st_size = obdo.o_size;
+ st->st_blocks = obdo.o_blocks;
+ st->st_mtime = obdo.o_mtime;
+ st->st_atime = obdo.o_atime;
+ st->st_ctime = obdo.o_ctime;
}
-
- RETURN(0);
+ return rc;
}
-#if 0
-int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+void ll_io_init(struct cl_io *io, const struct file *file, int write)
{
- /* XXX ALLOCATE - 160 bytes */
- struct inode *inode = ll_inode_from_lock(lock);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lustre_handle lockh = { 0 };
- struct ost_lvb *lvb;
- int stripe;
- ENTRY;
-
- if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV)) {
- LBUG(); /* not expecting any blocked async locks yet */
- LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
- "lock, returning");
- ldlm_lock_dump(D_OTHER, lock, 0);
- ldlm_reprocess_all(lock->l_resource);
- RETURN(0);
- }
-
- LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
-
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- goto iput;
-
- if (lock->l_lvb_len) {
- struct lov_stripe_md *lsm = lli->lli_smd;
- __u64 kms;
- lvb = lock->l_lvb_data;
- lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- lock_res_and_lock(lock);
- ll_inode_size_lock(inode, 1);
- kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
- kms = ldlm_extent_shift_kms(NULL, kms);
- if (lsm->lsm_oinfo[stripe].loi_kms != kms)
- LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
- lsm->lsm_oinfo[stripe].loi_kms, kms);
- lsm->lsm_oinfo[stripe].loi_kms = kms;
- ll_inode_size_unlock(inode, 1);
- unlock_res_and_lock(lock);
+ LASSERT(fd != NULL);
+ memset(io, 0, sizeof *io);
+ io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
+ if (write)
+ io->u.ci_wr.wr_append = file->f_flags & O_APPEND;
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_lockreq = CILR_MAYBE;
+ if (fd->fd_flags & LL_FILE_IGNORE_LOCK ||
+ sbi->ll_flags & LL_SBI_NOLCK) {
+ io->ci_lockreq = CILR_NEVER;
+ io->ci_no_srvlock = 1;
+ } else if (file->f_flags & O_APPEND) {
+ io->ci_lockreq = CILR_MANDATORY;
}
-
-iput:
- iput(inode);
- wake_up(&lock->l_waitq);
-
- ldlm_lock2handle(lock, &lockh);
- ldlm_lock_decref(&lockh, LCK_PR);
- RETURN(0);
}
-#endif
-static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
+static ssize_t ll_file_io_generic(const struct lu_env *env,
+ struct ccc_io_args *args, struct file *file,
+ enum cl_io_type iot, loff_t *ppos, size_t count)
{
- struct ptlrpc_request *req = reqp;
- struct inode *inode = ll_inode_from_lock(lock);
- struct ll_inode_info *lli;
- struct lov_stripe_md *lsm;
- struct ost_lvb *lvb;
- int rc, stripe;
- int size[2] = { sizeof(struct ptlrpc_body), sizeof(*lvb) };
+ struct cl_io *io;
+ ssize_t result;
ENTRY;
- if (inode == NULL)
- GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
- lli = ll_i2info(inode);
- if (lli == NULL)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
- lsm = lli->lli_smd;
- if (lsm == NULL)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+ io = &ccc_env_info(env)->cti_io;
+ ll_io_init(io, file, iot == CIT_WRITE);
- /* First, find out which stripe index this lock corresponds to. */
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+ if (iot == CIT_READ)
+ io->u.ci_rd.rd_is_sendfile = args->cia_is_sendfile;
- rc = lustre_pack_reply(req, 2, size, NULL);
- if (rc)
- GOTO(iput, rc);
+ if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
+ if (cl_io_is_sendfile(io)) {
+ vio->u.read.cui_actor = args->cia_actor;
+ vio->u.read.cui_target = args->cia_target;
+ } else {
+ cio->cui_iov = args->cia_iov;
+ cio->cui_nrsegs = args->cia_nrsegs;
+#ifndef HAVE_FILE_WRITEV
+ cio->cui_iocb = args->cia_iocb;
+#endif
+ }
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ result = cl_io_loop(env, io);
+ } else
+ /* cl_io_rw_init() handled IO */
+ result = io->ci_result;
+ if (io->ci_nob > 0) {
+ result = io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos;
+ }
+ cl_io_fini(env, io);
+ RETURN(result);
+}
- lvb = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*lvb));
- lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe]->loi_kms;
- lvb->lvb_mtime = LTIME_S(inode->i_mtime);
- lvb->lvb_atime = LTIME_S(inode->i_atime);
- lvb->lvb_ctime = LTIME_S(inode->i_ctime);
- LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64
- " atime "LPU64", mtime "LPU64", ctime "LPU64,
- i_size_read(inode), stripe, lvb->lvb_size, lvb->lvb_mtime,
- lvb->lvb_atime, lvb->lvb_ctime);
- iput:
- iput(inode);
+/*
+ * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
+ */
+static int ll_file_get_iov_count(const struct iovec *iov,
+ unsigned long *nr_segs, size_t *count)
+{
+ size_t cnt = 0;
+ unsigned long seg;
- out:
- /* These errors are normal races, so we don't want to fill the console
- * with messages by calling ptlrpc_error() */
- if (rc == -ELDLM_NO_LOCK_DATA)
- lustre_pack_reply(req, 1, NULL, NULL);
+ for (seg = 0; seg < *nr_segs; seg++) {
+ const struct iovec *iv = &iov[seg];
- req->rq_status = rc;
- return rc;
+ /*
+ * If any segment has a negative length, or the cumulative
+ * length ever wraps negative then return -EINVAL.
+ */
+ cnt += iv->iov_len;
+ if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
+ return -EINVAL;
+ if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
+ continue;
+ if (seg == 0)
+ return -EFAULT;
+ *nr_segs = seg;
+ cnt -= iv->iov_len; /* This segment is no good */
+ break;
+ }
+ *count = cnt;
+ return 0;
}
-static void ll_merge_lvb(struct inode *inode)
+#ifdef HAVE_FILE_READV
+static ssize_t ll_file_readv(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ost_lvb lvb;
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- ll_inode_size_lock(inode, 1);
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(sbi->ll_dt_exp, lli->lli_smd, &lvb, 0);
- i_size_write(inode, lvb.lvb_size);
- inode->i_blocks = lvb.lvb_blocks;
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
- ll_inode_size_unlock(inode, 1);
- EXIT;
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_is_sendfile = 0;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ result = ll_file_io_generic(env, args, file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-int ll_local_size(struct inode *inode)
+static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
{
- ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lustre_handle lockh = { 0 };
- int flags = 0;
- int rc;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (lli->lli_smd->lsm_stripe_count == 0)
- RETURN(0);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- rc = obd_match(sbi->ll_dt_exp, lli->lli_smd, LDLM_EXTENT,
- &policy, LCK_PR, &flags, inode, &lockh);
- if (rc < 0)
- RETURN(rc);
- else if (rc == 0)
- RETURN(-ENODATA);
-
- ll_merge_lvb(inode);
- obd_cancel(sbi->ll_dt_exp, lli->lli_smd, LCK_PR, &lockh);
- RETURN(0);
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ result = ll_file_readv(file, local_iov, 1, ppos);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
- lstat_t *st)
+#else
+static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
- struct lustre_handle lockh = { 0 };
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- struct ost_lvb lvb;
- int rc;
-
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = LCK_PR;
- einfo.ei_cb_bl = ll_extent_lock_callback;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = NULL;
-
- oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
- oinfo.oi_lockh = &lockh;
- oinfo.oi_md = lsm;
- oinfo.oi_flags = LDLM_FL_HAS_INTENT;
-
- rc = obd_enqueue_rqset(sbi->ll_dt_exp, &oinfo, &einfo);
- if (rc == -ENOENT)
- RETURN(rc);
- if (rc != 0) {
- CERROR("obd_enqueue returned rc %d, "
- "returning -EIO\n", rc);
- RETURN(rc > 0 ? -EIO : rc);
- }
-
- lov_stripe_lock(lsm);
- memset(&lvb, 0, sizeof(lvb));
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 0);
- st->st_size = lvb.lvb_size;
- st->st_blocks = lvb.lvb_blocks;
- st->st_mtime = lvb.lvb_mtime;
- st->st_atime = lvb.lvb_atime;
- st->st_ctime = lvb.lvb_ctime;
- lov_stripe_unlock(lsm);
-
- RETURN(rc);
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_is_sendfile = 0;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ args->cia_iocb = iocb;
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/* NB: obd_merge_lvb will prefer locally cached writes if they extend the
- * file (because it prefers KMS over RSS when larger) */
-int ll_glimpse_size(struct inode *inode, int ast_flags)
+static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lustre_handle lockh = { 0 };
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- int rc;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- RETURN(0);
-
- CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
-
- if (!lli->lli_smd) {
- CDEBUG(D_DLMTRACE, "No objects for inode %lu\n", inode->i_ino);
- RETURN(0);
- }
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- /* NOTE: this looks like DLM lock request, but it may not be one. Due
- * to LDLM_FL_HAS_INTENT flag, this is glimpse request, that
- * won't revoke any conflicting DLM locks held. Instead,
- * ll_glimpse_callback() will be called on each client
- * holding a DLM lock against this file, and resulting size
- * will be returned for each stripe. DLM lock on [0, EOF] is
- * acquired only if there were no conflicting locks. */
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = LCK_PR;
- einfo.ei_cb_bl = ll_extent_lock_callback;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = inode;
-
- oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
- oinfo.oi_lockh = &lockh;
- oinfo.oi_md = lli->lli_smd;
- oinfo.oi_flags = ast_flags | LDLM_FL_HAS_INTENT;
-
- rc = obd_enqueue_rqset(sbi->ll_dt_exp, &oinfo, &einfo);
- if (rc == -ENOENT)
- RETURN(rc);
- if (rc != 0) {
- CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
- RETURN(rc > 0 ? -EIO : rc);
- }
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_left = count;
- ll_merge_lvb(inode);
+ result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
- CDEBUG(D_DLMTRACE, "glimpse: size: %llu, blocks: %lu\n",
- i_size_read(inode), inode->i_blocks);
-
- RETURN(rc);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
+#endif
-int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm, int mode,
- ldlm_policy_data_t *policy, struct lustre_handle *lockh,
- int ast_flags)
+/*
+ * Write to a file (through the page cache).
+ */
+#ifdef HAVE_FILE_WRITEV
+static ssize_t ll_file_writev(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ost_lvb lvb;
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- int rc;
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- LASSERT(!lustre_handle_is_used(lockh));
- LASSERT(lsm != NULL);
-
- /* don't drop the mmapped file to LRU */
- if (mapping_mapped(inode->i_mapping))
- ast_flags |= LDLM_FL_NO_LRU;
-
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
-
- CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
- inode->i_ino, policy->l_extent.start, policy->l_extent.end);
-
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = mode;
- einfo.ei_cb_bl = ll_extent_lock_callback;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = inode;
-
- oinfo.oi_policy = *policy;
- oinfo.oi_lockh = lockh;
- oinfo.oi_md = lsm;
- oinfo.oi_flags = ast_flags;
-
- rc = obd_enqueue(sbi->ll_dt_exp, &oinfo, &einfo, NULL);
- *policy = oinfo.oi_policy;
- if (rc > 0)
- rc = -EIO;
-
- ll_inode_size_lock(inode, 1);
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 1);
-
- if (policy->l_extent.start == 0 &&
- policy->l_extent.end == OBD_OBJECT_EOF) {
- /* vmtruncate()->ll_truncate() first sets the i_size and then
- * the kms under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size from the kms before the truncating path has
- * updated the kms. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order. */
- i_size_write(inode, lvb.lvb_size);
- CDEBUG(D_INODE, "inode=%lu, updating i_size %llu\n",
- inode->i_ino, i_size_read(inode));
- }
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
- if (rc == 0) {
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
- }
- ll_inode_size_unlock(inode, 1);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- RETURN(rc);
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ result = ll_file_io_generic(env, args, file, CIT_WRITE, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm, int mode,
- struct lustre_handle *lockh)
+static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
+ loff_t *ppos)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ ssize_t result;
+ int refcheck;
ENTRY;
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- rc = obd_cancel(sbi->ll_dt_exp, lsm, mode, lockh);
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
- RETURN(rc);
+ result = ll_file_writev(file, local_iov, 1, ppos);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
- loff_t *ppos)
+#else /* AIO stuff */
+static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_lock_tree tree;
- struct ll_lock_tree_node *node;
- struct ost_lvb lvb;
- struct ll_ra_read bead;
- int rc, ra = 0;
- loff_t end;
- ssize_t retval, chunk, sum = 0;
-
- __u64 kms;
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
- /* "If nbyte is 0, read() will return 0 and have no other results."
- * -- Single Unix Spec */
- if (count == 0)
- RETURN(0);
-
- ll_stats_ops_tally(sbi, LPROC_LL_READ_BYTES, count);
-
- if (!lsm) {
- /* Read on file with no objects should return zero-filled
- * buffers up to file size (we can get non-zero sizes with
- * mknod + truncate, then opening file for read. This is a
- * common pattern in NFS case, it seems). Bug 6243 */
- int notzeroed;
- /* Since there are no objects on OSTs, we have nothing to get
- * lock on and so we are forced to access inode->i_size
- * unguarded */
-
- /* Read beyond end of file */
- if (*ppos >= i_size_read(inode))
- RETURN(0);
-
- if (count > i_size_read(inode) - *ppos)
- count = i_size_read(inode) - *ppos;
- /* Make sure to correctly adjust the file pos pointer for
- * EFAULT case */
- notzeroed = clear_user(buf, count);
- count -= notzeroed;
- *ppos += count;
- if (!count)
- RETURN(-EFAULT);
- RETURN(count);
- }
-
-repeat:
- if (sbi->ll_max_rw_chunk != 0) {
- /* first, let's know the end of the current stripe */
- end = *ppos;
- obd_extent_calc(sbi->ll_dt_exp, lsm, OBD_CALC_STRIPE_END,
- (obd_off *)&end);
-
- /* correct, the end is beyond the request */
- if (end > *ppos + count - 1)
- end = *ppos + count - 1;
-
- /* and chunk shouldn't be too large even if striping is wide */
- if (end - *ppos > sbi->ll_max_rw_chunk)
- end = *ppos + sbi->ll_max_rw_chunk - 1;
- } else {
- end = *ppos + count - 1;
- }
-
- node = ll_node_from_inode(inode, *ppos, end, LCK_PR);
- if (IS_ERR(node)){
- GOTO(out, retval = PTR_ERR(node));
- }
-
- tree.lt_fd = LUSTRE_FPRIVATE(file);
- rc = ll_tree_lock(&tree, node, buf, count,
- file->f_flags & O_NONBLOCK ? LDLM_FL_BLOCK_NOWAIT :0);
- if (rc != 0)
- GOTO(out, retval = rc);
- ll_inode_size_lock(inode, 1);
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being read and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock acquired by
- * ll_tree_lock() above, because to change class, other client has to
- * take DLM lock conflicting with our lock. Also, any updates to
- * ->i_size by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 1);
- kms = lvb.lvb_size;
- if (*ppos + count - 1 > kms) {
- /* A glimpse is necessary to determine whether we return a
- * short read (B) or some zeroes at the end of the buffer (C) */
- ll_inode_size_unlock(inode, 1);
- retval = ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
- if (retval) {
- ll_tree_unlock(&tree);
- goto out;
- }
- } else {
- /* region is within kms and, hence, within real file size (A).
- * We need to increase i_size to cover the read region so that
- * generic_file_read() will do its job, but that doesn't mean
- * the kms size is _correct_, it is only the _minimum_ size.
- * If someone does a stat they will get the correct size which
- * will always be >= the kms value here. b=11081 */
- if (i_size_read(inode) < kms)
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode, 1);
- }
-
- chunk = end - *ppos + 1;
- CDEBUG(D_INODE, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
- inode->i_ino, chunk, *ppos, i_size_read(inode));
-
- /* turn off the kernel's read-ahead */
- file->f_ra.ra_pages = 0;
-
- /* initialize read-ahead window once per syscall */
- if (ra == 0) {
- ra = 1;
- bead.lrr_start = *ppos >> CFS_PAGE_SHIFT;
- bead.lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- ll_ra_read_in(file, &bead);
- }
-
- /* BUG: 5972 */
- file_accessed(file);
- retval = generic_file_read(file, buf, chunk, ppos);
- ll_rw_stats_tally(sbi, current->pid, file, count, 0);
-
- ll_tree_unlock(&tree);
-
- if (retval > 0) {
- buf += retval;
- count -= retval;
- sum += retval;
- if (retval == chunk && count > 0)
- goto repeat;
- }
-
- out:
- if (ra != 0)
- ll_ra_read_ex(file, &bead);
- retval = (sum > 0) ? sum : retval;
- RETURN(retval);
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ args->cia_iocb = iocb;
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/*
- * Write to a file (through the page cache).
- */
static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- struct ll_lock_tree tree;
- struct ll_lock_tree_node *node;
- loff_t maxbytes = ll_file_maxbytes(inode);
- loff_t lock_start, lock_end, end;
- ssize_t retval, chunk, sum = 0;
- int rc;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
-
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
-
- /* POSIX, but surprised the VFS doesn't check this already */
- if (count == 0)
- RETURN(0);
-
- /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
- * called on the file, don't fail the below assertion (bug 2388). */
- if (file->f_flags & O_LOV_DELAY_CREATE &&
- ll_i2info(inode)->lli_smd == NULL)
- RETURN(-EBADF);
-
- LASSERT(ll_i2info(inode)->lli_smd != NULL);
-
- down(&ll_i2info(inode)->lli_write_sem);
-
-repeat:
- chunk = 0; /* just to fix gcc's warning */
- end = *ppos + count - 1;
-
- if (file->f_flags & O_APPEND) {
- lock_start = 0;
- lock_end = OBD_OBJECT_EOF;
- } else if (sbi->ll_max_rw_chunk != 0) {
- /* first, let's know the end of the current stripe */
- end = *ppos;
- obd_extent_calc(sbi->ll_dt_exp, lsm, OBD_CALC_STRIPE_END,
- (obd_off *)&end);
-
- /* correct, the end is beyond the request */
- if (end > *ppos + count - 1)
- end = *ppos + count - 1;
-
- /* and chunk shouldn't be too large even if striping is wide */
- if (end - *ppos > sbi->ll_max_rw_chunk)
- end = *ppos + sbi->ll_max_rw_chunk - 1;
- lock_start = *ppos;
- lock_end = end;
- } else {
- lock_start = *ppos;
- lock_end = *ppos + count - 1;
- }
- node = ll_node_from_inode(inode, lock_start, lock_end, LCK_PW);
-
- if (IS_ERR(node))
- GOTO(out, retval = PTR_ERR(node));
-
- tree.lt_fd = LUSTRE_FPRIVATE(file);
- rc = ll_tree_lock(&tree, node, buf, count,
- file->f_flags & O_NONBLOCK ? LDLM_FL_BLOCK_NOWAIT :0);
- if (rc != 0)
- GOTO(out, retval = rc);
-
- /* This is ok, g_f_w will overwrite this under i_sem if it races
- * with a local truncate, it just makes our maxbyte checking easier.
- * The i_size value gets updated in ll_extent_lock() as a consequence
- * of the [0,EOF] extent lock we requested above. */
- if (file->f_flags & O_APPEND) {
- *ppos = i_size_read(inode);
- end = *ppos + count - 1;
- }
-
- if (*ppos >= maxbytes) {
- send_sig(SIGXFSZ, current, 0);
- GOTO(out_unlock, retval = -EFBIG);
- }
- if (*ppos + count > maxbytes)
- count = maxbytes - *ppos;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- /* generic_file_write handles O_APPEND after getting i_mutex */
- chunk = end - *ppos + 1;
- CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
- inode->i_ino, chunk, *ppos);
- retval = generic_file_write(file, buf, chunk, ppos);
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid, file, count, 1);
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_left = count;
-out_unlock:
- ll_tree_unlock(&tree);
+ result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
-out:
- if (retval > 0) {
- buf += retval;
- count -= retval;
- sum += retval;
- if (retval == chunk && count > 0)
- goto repeat;
- }
-
- up(&ll_i2info(inode)->lli_write_sem);
-
- retval = (sum > 0) ? sum : retval;
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
- retval > 0 ? retval : 0);
- RETURN(retval);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
+#endif
+
/*
* Send file content (through pagecache) somewhere with helper
static ssize_t ll_file_sendfile(struct file *in_file, loff_t *ppos,size_t count,
read_actor_t actor, void *target)
{
- struct inode *inode = in_file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct ll_lock_tree tree;
- struct ll_lock_tree_node *node;
- struct ost_lvb lvb;
- struct ll_ra_read bead;
- int rc;
- ssize_t retval;
- __u64 kms;
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
-
- /* "If nbyte is 0, read() will return 0 and have no other results."
- * -- Single Unix Spec */
- if (count == 0)
- RETURN(0);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_READ_BYTES, count);
- /* turn off the kernel's read-ahead */
- in_file->f_ra.ra_pages = 0;
-
- /* File with no objects, nothing to lock */
- if (!lsm)
- RETURN(generic_file_sendfile(in_file, ppos, count, actor, target));
-
- node = ll_node_from_inode(inode, *ppos, *ppos + count - 1, LCK_PR);
- if (IS_ERR(node))
- RETURN(PTR_ERR(node));
-
- tree.lt_fd = LUSTRE_FPRIVATE(in_file);
- rc = ll_tree_lock(&tree, node, NULL, count,
- in_file->f_flags & O_NONBLOCK?LDLM_FL_BLOCK_NOWAIT:0);
- if (rc != 0)
- RETURN(rc);
- ll_inode_size_lock(inode, 1);
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being read and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock acquired by
- * ll_tree_lock() above, because to change class, other client has to
- * take DLM lock conflicting with our lock. Also, any updates to
- * ->i_size by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(ll_i2sbi(inode)->ll_dt_exp, lsm, &lvb, 1);
- kms = lvb.lvb_size;
- if (*ppos + count - 1 > kms) {
- /* A glimpse is necessary to determine whether we return a
- * short read (B) or some zeroes at the end of the buffer (C) */
- ll_inode_size_unlock(inode, 1);
- retval = ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
- if (retval)
- goto out;
- } else {
- /* region is within kms and, hence, within real file size (A) */
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode, 1);
- }
-
- CDEBUG(D_INFO, "Send ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
- inode->i_ino, count, *ppos, i_size_read(inode));
-
- bead.lrr_start = *ppos >> CFS_PAGE_SHIFT;
- bead.lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- ll_ra_read_in(in_file, &bead);
- /* BUG: 5972 */
- file_accessed(in_file);
- retval = generic_file_sendfile(in_file, ppos, count, actor, target);
- ll_ra_read_ex(in_file, &bead);
-
- out:
- ll_tree_unlock(&tree);
- RETURN(retval);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_is_sendfile = 1;
+ args->cia_target = target;
+ args->cia_actor = actor;
+ result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct ll_inode_info *lli = ll_i2info(inode);
struct obd_export *exp = ll_i2dtexp(inode);
struct ll_recreate_obj ucreatp;
struct obd_trans_info oti = { 0 };
struct lov_stripe_md *lsm, *lsm2;
ENTRY;
- if (!capable (CAP_SYS_ADMIN))
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
- rc = copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
- sizeof(struct ll_recreate_obj));
- if (rc) {
+ if (copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
+ sizeof(struct ll_recreate_obj)))
RETURN(-EFAULT);
- }
+
OBDO_ALLOC(oa);
if (oa == NULL)
RETURN(-ENOMEM);
- down(&lli->lli_size_sem);
- lsm = lli->lli_smd;
+ ll_inode_size_lock(inode, 0);
+ lsm = ll_i2info(inode)->lli_smd;
if (lsm == NULL)
GOTO(out, rc = -ENOENT);
lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
OBD_MD_FLMTIME | OBD_MD_FLCTIME);
- oti.oti_objid = NULL;
memcpy(lsm2, lsm, lsm_size);
rc = obd_create(exp, oa, &lsm2, &oti);
OBD_FREE(lsm2, lsm_size);
GOTO(out, rc);
out:
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
OBDO_FREE(oa);
return rc;
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
int flags, struct lov_user_md *lum, int lum_size)
{
- struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
int rc = 0;
ENTRY;
- down(&lli->lli_size_sem);
- lsm = lli->lli_smd;
+ ll_inode_size_lock(inode, 0);
+ lsm = ll_i2info(inode)->lli_smd;
if (lsm) {
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
RETURN(-EEXIST);
ll_release_openhandle(file->f_dentry, &oit);
out:
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
ll_intent_release(&oit);
RETURN(rc);
out_req_free:
goto out;
}
-int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
- struct lov_mds_md **lmmp, int *lmm_size,
+int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
+ struct lov_mds_md **lmmp, int *lmm_size,
struct ptlrpc_request **request)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
oc = ll_mdscapa_get(inode);
rc = md_getattr_name(sbi->ll_md_exp, ll_inode2fid(inode),
oc, filename, strlen(filename) + 1,
- OBD_MD_FLEASIZE | OBD_MD_FLDIREA, lmmsize, &req);
+ OBD_MD_FLEASIZE | OBD_MD_FLDIREA, lmmsize,
+ ll_i2suppgid(inode), &req);
capa_put(oc);
if (rc < 0) {
CDEBUG(D_INFO, "md_getattr_name failed "
GOTO(out, rc);
}
- body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL); /* checked by mdc_getattr_name */
- /* swabbed by mdc_getattr_name */
- LASSERT(lustre_rep_swabbed(req, REPLY_REC_OFF));
lmmsize = body->eadatasize;
GOTO(out, rc = -ENODATA);
}
- lmm = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF + 1, lmmsize);
+ lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
LASSERT(lmm != NULL);
- LASSERT(lustre_rep_swabbed(req, REPLY_REC_OFF + 1));
+
+ if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
+ (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3)) &&
+ (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_JOIN))) {
+ GOTO(out, rc = -EPROTO);
+ }
/*
* This is coming from the MDS, so is probably in
* little endian. We convert it to host endian before
* passing it to userspace.
*/
- if (lmm->lmm_magic == __swab32(LOV_MAGIC)) {
- lustre_swab_lov_user_md((struct lov_user_md *)lmm);
- lustre_swab_lov_user_md_objects((struct lov_user_md *)lmm);
- } else if (lmm->lmm_magic == __swab32(LOV_MAGIC_JOIN)) {
- lustre_swab_lov_user_md_join((struct lov_user_md_join *)lmm);
+ if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC)) {
+ /* if function called for directory - we should
+ * avoid swab not existent lsm objects */
+ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
+ if (S_ISREG(body->mode))
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v1 *)lmm)->lmm_objects,
+ ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
+ } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
+ lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
+ if (S_ISREG(body->mode))
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v3 *)lmm)->lmm_objects,
+ ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
+ } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_JOIN)) {
+ lustre_swab_lov_user_md_join((struct lov_user_md_join *)lmm);
+ }
}
if (lmm->lmm_magic == LOV_MAGIC_JOIN) {
int rc;
ENTRY;
- if (!capable (CAP_SYS_ADMIN))
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
OBD_ALLOC(lump, lum_size);
if (lump == NULL) {
RETURN(-ENOMEM);
}
- rc = copy_from_user(lump, (struct lov_user_md *)arg, lum_size);
- if (rc) {
+ if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
OBD_FREE(lump, lum_size);
RETURN(-EFAULT);
}
static int ll_lov_setstripe(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
+ struct lov_user_md_v3 lumv3;
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+ struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
+ struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
+ int lum_size;
int rc;
int flags = FMODE_WRITE;
ENTRY;
- /* Bug 1152: copy properly when this is no longer true */
- LASSERT(sizeof(lum) == sizeof(*lump));
- LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
- rc = copy_from_user(&lum, lump, sizeof(lum));
- if (rc)
+ /* first try with v1 which is smaller than v3 */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(lumv1, lumv1p, lum_size))
RETURN(-EFAULT);
- rc = ll_lov_setstripe_ea_info(inode, file, flags, &lum, sizeof(lum));
+ if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
+ lum_size = sizeof(struct lov_user_md_v3);
+ if (copy_from_user(&lumv3, lumv3p, lum_size))
+ RETURN(-EFAULT);
+ }
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size);
if (rc == 0) {
- put_user(0, &lump->lmm_stripe_count);
+ put_user(0, &lumv1p->lmm_stripe_count);
rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
- 0, ll_i2info(inode)->lli_smd, lump);
+ 0, ll_i2info(inode)->lli_smd,
+ (void *)arg);
}
RETURN(rc);
}
(void *)arg);
}
-static int ll_get_grouplock(struct inode *inode, struct file *file,
- unsigned long arg)
+int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- ldlm_policy_data_t policy = { .l_extent = { .start = 0,
- .end = OBD_OBJECT_EOF}};
- struct lustre_handle lockh = { 0 };
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int flags = 0, rc;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
+ int rc;
ENTRY;
+ spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ CERROR("group lock already existed with gid %lu\n",
+ fd->fd_grouplock.cg_gid);
+ spin_unlock(&lli->lli_lock);
RETURN(-EINVAL);
}
+ LASSERT(fd->fd_grouplock.cg_lock == NULL);
+ spin_unlock(&lli->lli_lock);
- policy.l_extent.gid = arg;
- if (file->f_flags & O_NONBLOCK)
- flags = LDLM_FL_BLOCK_NOWAIT;
-
- rc = ll_extent_lock(fd, inode, lsm, LCK_GROUP, &policy, &lockh, flags);
+ rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
RETURN(rc);
- fd->fd_flags |= LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK;
- fd->fd_gid = arg;
- memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
+ spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ spin_unlock(&lli->lli_lock);
+ CERROR("another thread just won the race\n");
+ cl_put_grouplock(&grouplock);
+ RETURN(-EINVAL);
+ }
+
+ fd->fd_flags |= (LL_FILE_GROUP_LOCKED | LL_FILE_IGNORE_LOCK);
+ fd->fd_grouplock = grouplock;
+ spin_unlock(&lli->lli_lock);
+ CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
RETURN(0);
}
-static int ll_put_grouplock(struct inode *inode, struct file *file,
- unsigned long arg)
+int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int rc;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
ENTRY;
+ spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- /* Ugh, it's already unlocked. */
+ spin_unlock(&lli->lli_lock);
+ CERROR("no group lock held\n");
RETURN(-EINVAL);
}
+ LASSERT(fd->fd_grouplock.cg_lock != NULL);
- if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
+ if (fd->fd_grouplock.cg_gid != arg) {
+ CERROR("group lock %lu doesn't match current id %lu\n",
+ arg, fd->fd_grouplock.cg_gid);
+ spin_unlock(&lli->lli_lock);
RETURN(-EINVAL);
+ }
- fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
-
- rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
- if (rc)
- RETURN(rc);
-
- fd->fd_gid = 0;
- memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
+ grouplock = fd->fd_grouplock;
+ fd->fd_grouplock.cg_env = NULL;
+ fd->fd_grouplock.cg_lock = NULL;
+ fd->fd_grouplock.cg_gid = 0;
+ fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED | LL_FILE_IGNORE_LOCK);
+ spin_unlock(&lli->lli_lock);
+ cl_put_grouplock(&grouplock);
+ CDEBUG(D_INFO, "group lock %lu released\n", arg);
RETURN(0);
}
+#if LUSTRE_FIX >= 50
static int join_sanity_check(struct inode *head, struct inode *tail)
{
ENTRY;
{
struct dentry *tail_dentry = tail_filp->f_dentry;
struct lookup_intent oit = {.it_op = IT_OPEN,
- .it_flags = head_filp->f_flags|O_JOIN_FILE};
+ .it_flags = head_filp->f_flags,
+ .it_create_mode = M_JOIN_FILE};
struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_CW,
- ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL };
+ ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
struct lustre_handle lockh;
struct md_op_data *op_data;
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- rc = md_enqueue(ll_i2mdexp(head_inode), &einfo, &oit,
- op_data, &lockh, NULL, 0, 0);
+ rc = md_enqueue(ll_i2mdexp(head_inode), &einfo, &oit,
+ op_data, &lockh, NULL, 0, NULL, 0);
ll_finish_md_op_data(op_data);
if (rc < 0)
ldlm_lock_decref(&lockh, oit.d.lustre.it_lock_mode);
oit.d.lustre.it_lock_mode = 0;
}
+ ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
+ it_clear_disposition(&oit, DISP_ENQ_COMPLETE);
ll_release_openhandle(head_filp->f_dentry, &oit);
out:
ll_intent_release(&oit);
struct file *tail_filp, *first_filp, *second_filp;
struct ll_lock_tree first_tree, second_tree;
struct ll_lock_tree_node *first_node, *second_node;
- struct ll_inode_info *hlli = ll_i2info(head), *tlli;
+ struct ll_inode_info *hlli = ll_i2info(head);
int rc = 0, cleanup_phase = 0;
ENTRY;
}
tail = igrab(tail_filp->f_dentry->d_inode);
- tlli = ll_i2info(tail);
tail_dentry = tail_filp->f_dentry;
LASSERT(tail_dentry);
cleanup_phase = 1;
}
RETURN(rc);
}
+#endif /* LUSTRE_FIX >= 50 */
+/**
+ * Close inode open handle
+ *
+ * \param dentry [in] dentry which contains the inode
+ * \param it [in,out] intent which contains open info and result
+ *
+ * \retval 0 success
+ * \retval <0 failure
+ */
int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
{
struct inode *inode = dentry->d_inode;
inode, och);
out:
/* this one is in place of ll_file_open */
- ptlrpc_req_finished(it->d.lustre.it_data);
+ if (it_disposition(it, DISP_ENQ_OPEN_REF))
+ ptlrpc_req_finished(it->d.lustre.it_data);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
RETURN(rc);
}
+/**
+ * Get size for inode for which FIEMAP mapping is requested.
+ * Make the FIEMAP get_info call and returns the result.
+ */
+int ll_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+ int num_bytes)
+{
+ struct obd_export *exp = ll_i2dtexp(inode);
+ struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
+ struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
+ int vallen = num_bytes;
+ int rc;
+ ENTRY;
+
+ /* If the stripe_count > 1 and the application does not understand
+ * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
+ */
+ if (lsm->lsm_stripe_count > 1 &&
+ !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER))
+ return -EOPNOTSUPP;
+
+ fm_key.oa.o_id = lsm->lsm_object_id;
+ fm_key.oa.o_gr = lsm->lsm_object_gr;
+ fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+ obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLFID | OBD_MD_FLGROUP |
+ OBD_MD_FLSIZE);
+
+ /* If filesize is 0, then there would be no objects for mapping */
+ if (fm_key.oa.o_size == 0) {
+ fiemap->fm_mapped_extents = 0;
+ RETURN(0);
+ }
+
+ memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap));
+
+ rc = obd_get_info(exp, sizeof(fm_key), &fm_key, &vallen, fiemap, lsm);
+ if (rc)
+ CERROR("obd_get_info failed: rc = %d\n", rc);
+
+ RETURN(rc);
+}
+
int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
RETURN(ll_lov_getstripe(inode, arg));
case LL_IOC_RECREATE_OBJ:
RETURN(ll_lov_recreate_obj(inode, file, arg));
+ case EXT3_IOC_FIEMAP: {
+ struct ll_user_fiemap *fiemap_s;
+ size_t num_bytes, ret_bytes;
+ unsigned int extent_count;
+ int rc = 0;
+
+ /* Get the extent count so we can calculate the size of
+ * required fiemap buffer */
+ if (get_user(extent_count,
+ &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
+ RETURN(-EFAULT);
+ num_bytes = sizeof(*fiemap_s) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+ OBD_VMALLOC(fiemap_s, num_bytes);
+ if (fiemap_s == NULL)
+ RETURN(-ENOMEM);
+
+ if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
+ sizeof(*fiemap_s)))
+ GOTO(error, rc = -EFAULT);
+
+ if (fiemap_s->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
+ fiemap_s->fm_flags = fiemap_s->fm_flags &
+ ~LUSTRE_FIEMAP_FLAGS_COMPAT;
+ if (copy_to_user((char *)arg, fiemap_s,
+ sizeof(*fiemap_s)))
+ GOTO(error, rc = -EFAULT);
+
+ GOTO(error, rc = -EBADR);
+ }
+
+ /* If fm_extent_count is non-zero, read the first extent since
+ * it is used to calculate end_offset and device from previous
+ * fiemap call. */
+ if (extent_count) {
+ if (copy_from_user(&fiemap_s->fm_extents[0],
+ (char __user *)arg + sizeof(*fiemap_s),
+ sizeof(struct ll_fiemap_extent)))
+ GOTO(error, rc = -EFAULT);
+ }
+
+ if (fiemap_s->fm_flags & FIEMAP_FLAG_SYNC) {
+ int rc;
+
+ rc = filemap_fdatawrite(inode->i_mapping);
+ if (rc)
+ GOTO(error, rc);
+ }
+
+ rc = ll_fiemap(inode, fiemap_s, num_bytes);
+ if (rc)
+ GOTO(error, rc);
+
+ ret_bytes = sizeof(struct ll_user_fiemap);
+
+ if (extent_count != 0)
+ ret_bytes += (fiemap_s->fm_mapped_extents *
+ sizeof(struct ll_fiemap_extent));
+
+ if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+ rc = -EFAULT;
+
+error:
+ OBD_VFREE(fiemap_s, num_bytes);
+ RETURN(rc);
+ }
case EXT3_IOC_GETFLAGS:
case EXT3_IOC_SETFLAGS:
RETURN(ll_iocontrol(inode, file, cmd, arg));
case EXT3_IOC_GETVERSION:
RETURN(put_user(inode->i_generation, (int *)arg));
case LL_IOC_JOIN: {
+#if LUSTRE_FIX >= 50
+ /* Allow file join in beta builds to allow debuggging */
char *ftail;
int rc;
rc = ll_file_join(inode, file, ftail);
putname(ftail);
RETURN(rc);
+#else
+ CWARN("file join is not supported in this version of Lustre\n");
+ RETURN(-ENOTTY);
+#endif
}
case LL_IOC_GROUP_LOCK:
RETURN(ll_get_grouplock(inode, file, arg));
*/
case LL_IOC_FLUSHCTX:
RETURN(ll_flush_ctx(inode));
- case LL_IOC_GETFACL: {
- struct rmtacl_ioctl_data ioc;
-
- if (copy_from_user(&ioc, (void *)arg, sizeof(ioc)))
+ case LL_IOC_PATH2FID: {
+ if (copy_to_user((void *)arg, &ll_i2info(inode)->lli_fid,
+ sizeof(struct lu_fid)))
RETURN(-EFAULT);
- RETURN(ll_ioctl_getfacl(inode, &ioc));
- }
- case LL_IOC_SETFACL: {
- struct rmtacl_ioctl_data ioc;
-
- if (copy_from_user(&ioc, (void *)arg, sizeof(ioc)))
- RETURN(-EFAULT);
-
- RETURN(ll_ioctl_setfacl(inode, &ioc));
+ RETURN(0);
}
default: {
int err;
- if (LLIOC_STOP ==
+ if (LLIOC_STOP ==
ll_iocontrol_call(inode, file, cmd, arg, &err))
RETURN(err);
loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
loff_t retval;
ENTRY;
retval = offset + ((origin == 2) ? i_size_read(inode) :
if (file->f_flags & O_NONBLOCK)
nonblock = LDLM_FL_BLOCK_NOWAIT;
- if (lsm != NULL) {
- rc = ll_glimpse_size(inode, nonblock);
- if (rc != 0)
- RETURN(rc);
- }
+ rc = cl_glimpse_size(inode);
+ if (rc != 0)
+ RETURN(rc);
ll_inode_size_lock(inode, 0);
offset += i_size_read(inode);
if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
if (offset != file->f_pos) {
file->f_pos = offset;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- file->f_reada = 0;
- file->f_version = ++event;
-#endif
}
retval = offset;
}
-
+
RETURN(retval);
}
if (data && lsm) {
struct obdo *oa;
-
+
OBDO_ALLOC(oa);
if (!oa)
RETURN(rc ? rc : -ENOMEM);
{
struct inode *inode = file->f_dentry->d_inode;
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ldlm_res_id res_id =
- { .name = { fid_seq(ll_inode2fid(inode)),
- fid_oid(ll_inode2fid(inode)),
- fid_ver(ll_inode2fid(inode)),
- LDLM_FLOCK} };
- struct ldlm_enqueue_info einfo = { LDLM_FLOCK, 0, NULL,
- ldlm_flock_completion_ast, NULL, file_lock };
+ struct ldlm_enqueue_info einfo = { .ei_type = LDLM_FLOCK,
+ .ei_cb_cp =ldlm_flock_completion_ast,
+ .ei_cbdata = file_lock };
+ struct md_op_data *op_data;
struct lustre_handle lockh = {0};
ldlm_policy_data_t flock;
int flags = 0;
inode->i_ino, file_lock);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
-
+
if (file_lock->fl_flags & FL_FLOCK) {
LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
/* set missing params for flock() calls */
break;
default:
CERROR("unknown fcntl lock type: %d\n", file_lock->fl_type);
- LBUG();
+ RETURN (-EINVAL);
}
switch (cmd) {
break;
default:
CERROR("unknown fcntl lock command: %d\n", cmd);
- LBUG();
+ RETURN (-EINVAL);
}
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#x, mode=%u, "
"start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
flags, einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
- rc = ldlm_cli_enqueue(sbi->ll_md_exp, NULL, &einfo, &res_id,
- &flock, &flags, NULL, 0, NULL, &lockh, 0);
- if ((file_lock->fl_flags & FL_FLOCK) && (rc == 0))
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
+ op_data, &lockh, &flock, 0, NULL /* req */, flags);
+
+ ll_finish_md_op_data(op_data);
+
+ if ((file_lock->fl_flags & FL_FLOCK) &&
+ (rc == 0 || file_lock->fl_type == F_UNLCK))
ll_flock_lock_file_wait(file, file_lock, (cmd == F_SETLKW));
#ifdef HAVE_F_OP_FLOCK
- if ((file_lock->fl_flags & FL_POSIX) && (rc == 0) &&
+ if ((file_lock->fl_flags & FL_POSIX) &&
+ (rc == 0 || file_lock->fl_type == F_UNLCK) &&
!(flags & LDLM_FL_TEST_LOCK))
posix_lock_file_wait(file, file_lock);
#endif
return 0;
}
-int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
+int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
+ __u64 ibits)
{
struct inode *inode = dentry->d_inode;
struct ptlrpc_request *req = NULL;
struct ll_sb_info *sbi;
struct obd_export *exp;
- int rc;
+ int rc = 0;
ENTRY;
if (!inode) {
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- oit.it_flags |= O_CHECK_STALE;
+ oit.it_create_mode |= M_CHECK_STALE;
rc = md_intent_lock(exp, op_data, NULL, 0,
/* we are not interested in name
based lookup */
&oit, 0, &req,
ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
- oit.it_flags &= ~O_CHECK_STALE;
+ oit.it_create_mode &= ~M_CHECK_STALE;
if (rc < 0) {
rc = ll_inode_revalidate_fini(inode, rc);
GOTO (out, rc);
}
- rc = ll_revalidate_it_finish(req, DLM_REPLY_REC_OFF, &oit, dentry);
+ rc = ll_revalidate_it_finish(req, &oit, dentry);
if (rc != 0) {
ll_intent_release(&oit);
GOTO(out, rc);
here to preserve get_cwd functionality on 2.6.
Bug 10503 */
if (!dentry->d_inode->i_nlink) {
+ spin_lock(&ll_lookup_lock);
spin_lock(&dcache_lock);
ll_drop_dentry(dentry);
spin_unlock(&dcache_lock);
+ spin_unlock(&ll_lookup_lock);
}
ll_lookup_finish_locks(&oit, dentry);
- } else if (!ll_have_md_lock(dentry->d_inode, MDS_INODELOCK_UPDATE)) {
+ } else if (!ll_have_md_lock(dentry->d_inode, ibits)) {
+
struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
obd_valid valid = OBD_MD_FLGETATTR;
struct obd_capa *oc;
RETURN(rc);
}
- rc = ll_prep_inode(&inode, req, REPLY_REC_OFF,
- NULL);
- if (rc)
- GOTO(out, rc);
+ rc = ll_prep_inode(&inode, req, NULL);
}
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
+{
+ int rc;
+ ENTRY;
+
+ rc = __ll_inode_revalidate_it(dentry, it, MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LOOKUP);
/* if object not yet allocated, don't validate size */
- if (ll_i2info(inode)->lli_smd == NULL)
- GOTO(out, rc = 0);
+ if (rc == 0 && ll_i2info(dentry->d_inode)->lli_smd == NULL)
+ RETURN(0);
- /* ll_glimpse_size will prefer locally cached writes if they extend
+ /* cl_glimpse_size will prefer locally cached writes if they extend
* the file */
- rc = ll_glimpse_size(inode, 0);
- EXIT;
-out:
- ptlrpc_req_finished(req);
- return rc;
+
+ if (rc == 0)
+ rc = cl_glimpse_size(dentry->d_inode);
+
+ RETURN(rc);
}
int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
{
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), mask %o\n",
- inode->i_ino, inode->i_generation, inode, mask);
+ int rc = 0;
+ ENTRY;
+
+ /* as root inode are NOT getting validated in lookup operation,
+ * need to do it before permission check. */
+
+ if (inode == inode->i_sb->s_root->d_inode) {
+ struct lookup_intent it = { .it_op = IT_GETATTR };
+
+ rc = __ll_inode_revalidate_it(inode->i_sb->s_root, &it,
+ MDS_INODELOCK_LOOKUP);
+ if (rc)
+ RETURN(rc);
+ }
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
+ inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
+
if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
return lustre_check_remote_perm(inode, mask);
-
+
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
- return generic_permission(inode, mask, lustre_check_acl);
+ rc = generic_permission(inode, mask, lustre_check_acl);
+
+ RETURN(rc);
}
#else
int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
check_capabilities:
if (!(mask & MAY_EXEC) ||
(inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
- if (capable(CAP_DAC_OVERRIDE))
+ if (cfs_capable(CFS_CAP_DAC_OVERRIDE))
return 0;
- if (capable(CAP_DAC_READ_SEARCH) && ((mask == MAY_READ) ||
+ if (cfs_capable(CFS_CAP_DAC_READ_SEARCH) && ((mask == MAY_READ) ||
(S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))))
return 0;
-
+
return -EACCES;
}
#endif
+#ifdef HAVE_FILE_READV
+#define READ_METHOD readv
+#define READ_FUNCTION ll_file_readv
+#define WRITE_METHOD writev
+#define WRITE_FUNCTION ll_file_writev
+#else
+#define READ_METHOD aio_read
+#define READ_FUNCTION ll_file_aio_read
+#define WRITE_METHOD aio_write
+#define WRITE_FUNCTION ll_file_aio_write
+#endif
+
/* -o localflock - only provides locally consistent flock locks */
struct file_operations ll_file_operations = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
struct file_operations ll_file_operations_flock = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
/* These are for -o noflock - to return ENOSYS on flock calls */
struct file_operations ll_file_operations_noflock = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
static struct llioc_ctl_data {
struct rw_semaphore ioc_sem;
struct list_head ioc_head;
-} llioc = {
- __RWSEM_INITIALIZER(llioc.ioc_sem),
+} llioc = {
+ __RWSEM_INITIALIZER(llioc.ioc_sem),
CFS_LIST_HEAD_INIT(llioc.ioc_head)
};
EXPORT_SYMBOL(ll_iocontrol_register);
EXPORT_SYMBOL(ll_iocontrol_unregister);
-enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
+enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg, int *rcp)
{
enum llioc_iter ret = LLIOC_CONT;
down_read(&llioc.ioc_sem);
list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
for (i = 0; i < data->iocd_count; i++) {
- if (cmd != data->iocd_cmd[i])
+ if (cmd != data->iocd_cmd[i])
continue;
ret = data->iocd_cb(inode, file, cmd, arg, data, &rc);