-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
#include <lustre_dlm.h>
#include <lustre_lite.h>
-#include <lustre_mdc.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include "llite_internal.h"
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
op_data->op_attr_blocks = inode->i_blocks;
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = inode->i_flags;
+ ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
+ ll_inode_to_ext_flags(inode->i_flags);
op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
- memcpy(&op_data->op_handle, fh, sizeof(op_data->op_handle));
+ if (fh)
+ op_data->op_handle = *fh;
op_data->op_capa1 = ll_mdscapa_get(inode);
}
+/**
+ * Closes the IO epoch and packs all the attributes into @op_data for
+ * the CLOSE rpc.
+ */
static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle *och)
{
if (!(och->och_flags & FMODE_WRITE))
goto out;
- if (!(exp_connect_som(ll_i2mdexp(inode))) || !S_ISREG(inode->i_mode))
+ if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
else
- ll_epoch_close(inode, op_data, &och, 0);
+ ll_ioepoch_close(inode, op_data, &och, 0);
out:
ll_pack_inode2opdata(inode, op_data, &och->och_fh);
+ ll_prep_md_op_data(op_data, inode, NULL, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
EXIT;
}
GOTO(out, rc = 0);
}
- /*
- * here we check if this is forced umount. If so this is called on
- * canceling "open lock" and we do not call md_close() in this case, as
- * it will not be successful, as import is already deactivated.
- */
- if (obd->obd_force)
- GOTO(out, rc = 0);
-
OBD_ALLOC_PTR(op_data);
if (op_data == NULL)
GOTO(out, rc = -ENOMEM); // XXX We leak openhandle and request here.
LASSERT(epoch_close);
/* MDS has instructed us to obtain Size-on-MDS attribute from
* OSTs and send setattr to back to MDS. */
- rc = ll_sizeonmds_update(inode, &och->och_fh,
- op_data->op_ioepoch);
+ rc = ll_som_update(inode, op_data);
if (rc) {
CERROR("inode %lu mdc Size-on-MDS update failed: "
"rc = %d\n", inode->i_ino, rc);
EXIT;
out:
- if ((exp->exp_connect_flags & OBD_CONNECT_SOM) && !epoch_close &&
+ if (exp_connect_som(exp) && !epoch_close &&
S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
ll_queue_done_writing(inode, LLIF_DONE_WRITING);
} else {
och_usecount = &lli->lli_open_fd_read_count;
}
- down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (*och_usecount) { /* There are still users of this handle, so
skip freeing it. */
- up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
RETURN(0);
}
och=*och_p;
*och_p = NULL;
- up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
if (och) { /* There might be a race and somebody have freed this och
already */
struct inode *inode = file->f_dentry->d_inode;
ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
- down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
LASSERT(lli->lli_open_fd_read_count);
lli->lli_open_fd_read_count--;
}
- up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode,
RETURN(rc);
}
-int lov_test_and_clear_async_rc(struct lov_stripe_md *lsm);
-
/* While this returns an error code, fput() the caller does not, so we need
* to make every effort to clean up all of our state here. Also, applications
* rarely check close errors and even if an error is returned they will not
struct ll_file_data *fd;
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
int rc;
ENTRY;
/* The last ref on @file, maybe not the the owner pid of statahead.
* Different processes can open the same dir, "ll_opendir_key" means:
* it is me that should stop the statahead thread. */
- if (lli->lli_opendir_key == fd && lli->lli_opendir_pid != 0)
+ if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
+ lli->lli_opendir_pid != 0)
ll_stop_statahead(inode, lli->lli_opendir_key);
if (inode->i_sb->s_root == file->f_dentry) {
RETURN(0);
}
- if (lsm)
- lov_test_and_clear_async_rc(lsm);
- lli->lli_async_rc = 0;
+ if (!S_ISDIR(inode->i_mode)) {
+ lov_read_and_clear_async_rc(lli->lli_clob);
+ lli->lli_async_rc = 0;
+ }
rc = ll_md_close(sbi->ll_md_exp, inode, file);
+
+ if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
+ libcfs_debug_dumplog();
+
RETURN(rc);
}
const int len = file->f_dentry->d_name.len;
struct md_op_data *op_data;
struct ptlrpc_request *req;
+ __u32 opc = LUSTRE_OPC_ANY;
int rc;
ENTRY;
* makes a good candidate for using OPEN lock */
/* If lmmsize & lmm are not 0, we are just setting stripe info
* parameters. No need for the open lock */
- if (!lmm && !lmmsize)
+ if (lmm == NULL && lmmsize == 0) {
itp->it_flags |= MDS_OPEN_LOCK;
+ if (itp->it_flags & FMODE_WRITE)
+ opc = LUSTRE_OPC_CREATE;
+ }
op_data = ll_prep_md_op_data(NULL, parent->d_inode,
file->f_dentry->d_inode, name, len,
- O_RDWR, LUSTRE_OPC_ANY, NULL);
+ O_RDWR, opc, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
GOTO(out, rc);
}
+ if (it_disposition(itp, DISP_LOOKUP_NEG))
+ GOTO(out, rc = -ENOENT);
+
if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
GOTO(out, rc);
}
- if (itp->d.lustre.it_lock_mode)
- md_set_lock_data(sbi->ll_md_exp,
- &itp->d.lustre.it_lock_handle,
- file->f_dentry->d_inode);
-
rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL);
+ if (!rc && itp->d.lustre.it_lock_mode)
+ ll_set_lock_data(sbi->ll_md_exp, file->f_dentry->d_inode,
+ itp, NULL);
+
out:
ptlrpc_req_finished(itp->d.lustre.it_data);
it_clear_disposition(itp, DISP_ENQ_COMPLETE);
RETURN(rc);
}
+/**
+ * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
+ * not believe attributes if a few ioepoch holders exist. Attributes for
+ * previous ioepoch if new one is opened are also skipped by MDS.
+ */
void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
{
if (ioepoch && lli->lli_ioepoch != ioepoch) {
/* Open a file, and (for the very first open) create objects on the OSTs at
* this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
- * creation or open until ll_lov_setstripe() ioctl is called. We grab
- * lli_open_sem to ensure no other process will create objects, send the
- * stripe MD to the MDS, or try to destroy the objects if that fails.
+ * creation or open until ll_lov_setstripe() ioctl is called.
*
* If we already have the stripe MD locally then we don't request it in
* md_open(), by passing a lmm_size = 0.
struct ll_inode_info *lli = ll_i2info(inode);
struct lookup_intent *it, oit = { .it_op = IT_OPEN,
.it_flags = file->f_flags };
- struct lov_stripe_md *lsm;
- struct ptlrpc_request *req = NULL;
- struct obd_client_handle **och_p;
- __u64 *och_usecount;
+ struct obd_client_handle **och_p = NULL;
+ __u64 *och_usecount = NULL;
struct ll_file_data *fd;
int rc = 0, opendir_set = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
inode->i_generation, inode, file->f_flags);
-#ifdef HAVE_VFS_INTENT_PATCHES
- it = file->f_it;
-#else
it = file->private_data; /* XXX: compat macro */
file->private_data = NULL; /* prevent ll_local_open assertion */
-#endif
fd = ll_file_data_get();
if (fd == NULL)
- RETURN(-ENOMEM);
+ GOTO(out_och_free, rc = -ENOMEM);
fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
-again:
- spin_lock(&lli->lli_lock);
- if (lli->lli_opendir_key == NULL && lli->lli_opendir_pid == 0) {
- LASSERT(lli->lli_sai == NULL);
+ cfs_spin_lock(&lli->lli_sa_lock);
+ if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
+ lli->lli_opendir_pid == 0) {
lli->lli_opendir_key = fd;
lli->lli_opendir_pid = cfs_curproc_pid();
opendir_set = 1;
- } else if (unlikely(lli->lli_opendir_pid == cfs_curproc_pid() &&
- lli->lli_opendir_key != NULL)) {
- /* Two cases for this:
- * (1) The same process open such directory many times.
- * (2) The old process opened the directory, and exited
- * before its children processes. Then new process
- * with the same pid opens such directory before the
- * old process's children processes exit.
- * reset stat ahead for such cases. */
- spin_unlock(&lli->lli_lock);
- CDEBUG(D_INFO, "Conflict statahead for %.*s "DFID
- " reset it.\n", file->f_dentry->d_name.len,
- file->f_dentry->d_name.name,
- PFID(&lli->lli_fid));
- ll_stop_statahead(inode, lli->lli_opendir_key);
- goto again;
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
if (inode->i_sb->s_root == file->f_dentry) {
* dentry_open after call to open_namei that checks permissions.
* Only nfsd_open call dentry_open directly without checking
* permissions and because of that this code below is safe. */
- if (oit.it_flags & FMODE_WRITE)
+ if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
/* We do not want O_EXCL here, presumably we opened the file
* already? XXX - NFS implications? */
oit.it_flags &= ~O_EXCL;
+ /* bug20584, if "it_flags" contains O_CREAT, the file will be
+ * created if necessary, then "IT_CREAT" should be set to keep
+ * consistent with it */
+ if (oit.it_flags & O_CREAT)
+ oit.it_op |= IT_CREAT;
+
it = &oit;
}
och_usecount = &lli->lli_open_fd_read_count;
}
- down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
let's close it somehow. This will decref request. */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
- up(&lli->lli_och_sem);
- ll_file_data_put(fd);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
+
ll_release_openhandle(file->f_dentry, it);
- lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
- LPROC_LL_OPEN);
}
(*och_usecount)++;
rc = ll_local_open(file, it, fd, NULL);
if (rc) {
(*och_usecount)--;
- up(&lli->lli_och_sem);
- ll_file_data_put(fd);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
} else {
/* We cannot just request lock handle now, new ELC code
means that one of other OPEN locks for this file
could be cancelled, and since blocking ast handler
- would attempt to grab och_sem as well, that would
+ would attempt to grab och_mutex as well, that would
result in a deadlock */
- up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
it->it_create_mode &= ~M_CHECK_STALE;
- if (rc) {
- ll_file_data_put(fd);
+ if (rc)
GOTO(out_openerr, rc);
- }
- /* Got some error? Release the request */
- if (it->d.lustre.it_status < 0) {
- req = it->d.lustre.it_data;
- ptlrpc_req_finished(req);
- }
- md_set_lock_data(ll_i2sbi(inode)->ll_md_exp,
- &it->d.lustre.it_lock_handle,
- file->f_dentry->d_inode);
goto restart;
}
OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
- if (!*och_p) {
- ll_file_data_put(fd);
+ if (!*och_p)
GOTO(out_och_free, rc = -ENOMEM);
- }
+
(*och_usecount)++;
- req = it->d.lustre.it_data;
/* md_intent_lock() didn't get a request ref if there was an
* open error, so don't do cleanup on the request here
/* XXX (green): Should not we bail out on any error here, not
* just open error? */
rc = it_open_error(DISP_OPEN_OPEN, it);
- if (rc) {
- ll_file_data_put(fd);
+ if (rc)
GOTO(out_och_free, rc);
- }
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
+ LASSERT(it_disposition(it, DISP_ENQ_OPEN_REF));
+
rc = ll_local_open(file, it, fd, *och_p);
- if (rc) {
- ll_file_data_put(fd);
+ if (rc)
GOTO(out_och_free, rc);
- }
}
- up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
+ fd = NULL;
- /* Must do this outside lli_och_sem lock to prevent deadlock where
+ /* Must do this outside lli_och_mutex lock to prevent deadlock where
different kind of OPEN lock for this same inode gets cancelled
by ldlm_cancel_lru */
if (!S_ISREG(inode->i_mode))
- GOTO(out, rc);
+ GOTO(out_och_free, rc);
ll_capa_open(inode);
- lsm = lli->lli_smd;
- if (lsm == NULL) {
+ if (!lli->lli_has_smd) {
if (file->f_flags & O_LOV_DELAY_CREATE ||
!(file->f_mode & FMODE_WRITE)) {
CDEBUG(D_INODE, "object creation was delayed\n");
- GOTO(out, rc);
+ GOTO(out_och_free, rc);
}
}
file->f_flags &= ~O_LOV_DELAY_CREATE;
- GOTO(out, rc);
-out:
- ptlrpc_req_finished(req);
- if (req)
- it_clear_disposition(it, DISP_ENQ_OPEN_REF);
+ GOTO(out_och_free, rc);
+
out_och_free:
+ if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
+ ptlrpc_req_finished(it->d.lustre.it_data);
+ it_clear_disposition(it, DISP_ENQ_OPEN_REF);
+ }
+
if (rc) {
- if (*och_p) {
+ if (och_p && *och_p) {
OBD_FREE(*och_p, sizeof (struct obd_client_handle));
*och_p = NULL; /* OBD_FREE writes some magic there */
(*och_usecount)--;
}
- up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
+
out_openerr:
if (opendir_set != 0)
ll_stop_statahead(inode, lli->lli_opendir_key);
+ if (fd != NULL)
+ ll_file_data_put(fd);
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
}
return rc;
/* Fills the obdo with the attributes for the lsm */
static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
- struct obd_capa *capa, struct obdo *obdo)
+ struct obd_capa *capa, struct obdo *obdo,
+ __u64 ioepoch, int sync)
{
struct ptlrpc_request_set *set;
struct obd_info oinfo = { { { 0 } } };
oinfo.oi_md = lsm;
oinfo.oi_oa = obdo;
oinfo.oi_oa->o_id = lsm->lsm_object_id;
- oinfo.oi_oa->o_gr = lsm->lsm_object_gr;
+ oinfo.oi_oa->o_seq = lsm->lsm_object_seq;
oinfo.oi_oa->o_mode = S_IFREG;
+ oinfo.oi_oa->o_ioepoch = ioepoch;
oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLGROUP;
+ OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
+ OBD_MD_FLDATAVERSION;
oinfo.oi_capa = capa;
+ if (sync) {
+ oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
+ oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
+ }
set = ptlrpc_prep_set();
if (set == NULL) {
if (rc == 0)
oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE);
+ OBD_MD_FLCTIME | OBD_MD_FLSIZE |
+ OBD_MD_FLDATAVERSION);
RETURN(rc);
}
-/* Fills the obdo with the attributes for the inode defined by lsm */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo)
+/**
+ * Performs the getattr on the inode and updates its fields.
+ * If @sync != 0, perform the getattr under the server-side lock.
+ */
+int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
+ __u64 ioepoch, int sync)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_capa *capa = ll_mdscapa_get(inode);
- int rc;
- ENTRY;
-
- rc = ll_lsm_getattr(lli->lli_smd, ll_i2dtexp(inode), capa, obdo);
- capa_put(capa);
- if (rc == 0) {
- obdo_refresh_inode(inode, obdo, obdo->o_valid);
- CDEBUG(D_INODE,
- "objid "LPX64" size %Lu, blocks %llu, blksize %lu\n",
- lli->lli_smd->lsm_object_id, i_size_read(inode),
- (unsigned long long)inode->i_blocks,
- (unsigned long)ll_inode_blksize(inode));
- }
- RETURN(rc);
+ struct obd_capa *capa = ll_mdscapa_get(inode);
+ struct lov_stripe_md *lsm;
+ int rc;
+ ENTRY;
+
+ lsm = ccc_inode_lsm_get(inode);
+ rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
+ capa, obdo, ioepoch, sync);
+ capa_put(capa);
+ if (rc == 0) {
+ obdo_refresh_inode(inode, obdo, obdo->o_valid);
+ CDEBUG(D_INODE,
+ "objid "LPX64" size %llu, blocks %llu, blksize %lu\n",
+ lsm ? lsm->lsm_object_id : 0, i_size_read(inode),
+ (unsigned long long)inode->i_blocks,
+ (unsigned long)ll_inode_blksize(inode));
+ }
+ ccc_inode_lsm_put(inode, lsm);
+ RETURN(rc);
}
int ll_merge_lvb(struct inode *inode)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ost_lvb lvb;
- int rc;
-
- ENTRY;
-
- ll_inode_size_lock(inode, 1);
- inode_init_lvb(inode, &lvb);
- rc = obd_merge_lvb(sbi->ll_dt_exp, lli->lli_smd, &lvb, 0);
- i_size_write(inode, lvb.lvb_size);
- inode->i_blocks = lvb.lvb_blocks;
-
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
- ll_inode_size_unlock(inode, 1);
-
- RETURN(rc);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct lov_stripe_md *lsm;
+ struct ost_lvb lvb;
+ int rc = 0;
+
+ ENTRY;
+
+ lsm = ccc_inode_lsm_get(inode);
+ ll_inode_size_lock(inode);
+ inode_init_lvb(inode, &lvb);
+
+ /* merge timestamps the most resently obtained from mds with
+ timestamps obtained from osts */
+ lvb.lvb_atime = lli->lli_lvb.lvb_atime;
+ lvb.lvb_mtime = lli->lli_lvb.lvb_mtime;
+ lvb.lvb_ctime = lli->lli_lvb.lvb_ctime;
+ if (lsm != NULL) {
+ rc = obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 0);
+ cl_isize_write_nolock(inode, lvb.lvb_size);
+
+ CDEBUG(D_VFSTRACE, DFID" updating i_size "LPU64"\n",
+ PFID(&lli->lli_fid), lvb.lvb_size);
+ inode->i_blocks = lvb.lvb_blocks;
+
+ LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
+ LTIME_S(inode->i_atime) = lvb.lvb_atime;
+ LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
+ }
+ ll_inode_size_unlock(inode);
+ ccc_inode_lsm_put(inode, lsm);
+
+ RETURN(rc);
}
int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
struct obdo obdo = { 0 };
int rc;
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, &obdo);
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, &obdo, 0, 0);
if (rc == 0) {
st->st_size = obdo.o_size;
st->st_blocks = obdo.o_blocks;
void ll_io_init(struct cl_io *io, const struct file *file, int write)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct inode *inode = file->f_dentry->d_inode;
- LASSERT(fd != NULL);
- memset(io, 0, sizeof *io);
io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
- if (write)
- io->u.ci_wr.wr_append = file->f_flags & O_APPEND;
+ if (write) {
+ io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
+ io->u.ci_wr.wr_sync = file->f_flags & O_SYNC || IS_SYNC(inode);
+ }
io->ci_obj = ll_i2info(inode)->lli_clob;
io->ci_lockreq = CILR_MAYBE;
- if (fd->fd_flags & LL_FILE_IGNORE_LOCK ||
- sbi->ll_flags & LL_SBI_NOLCK) {
+ if (ll_file_nolock(file)) {
io->ci_lockreq = CILR_NEVER;
io->ci_no_srvlock = 1;
} else if (file->f_flags & O_APPEND) {
}
static ssize_t ll_file_io_generic(const struct lu_env *env,
- struct ccc_io_args *args, struct file *file,
+ struct vvp_io_args *args, struct file *file,
enum cl_io_type iot, loff_t *ppos, size_t count)
{
- struct cl_io *io;
- ssize_t result;
+ struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct cl_io *io;
+ ssize_t result;
ENTRY;
- io = &ccc_env_info(env)->cti_io;
+ io = ccc_env_thread_io(env);
ll_io_init(io, file, iot == CIT_WRITE);
- if (iot == CIT_READ)
- io->u.ci_rd.rd_is_sendfile = args->cia_is_sendfile;
-
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env);
struct ccc_io *cio = ccc_env_io(env);
- if (cl_io_is_sendfile(io)) {
- vio->u.read.cui_actor = args->cia_actor;
- vio->u.read.cui_target = args->cia_target;
- } else {
- cio->cui_iov = args->cia_iov;
- cio->cui_nrsegs = args->cia_nrsegs;
+ int write_mutex_locked = 0;
+
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ vio->cui_io_subtype = args->via_io_subtype;
+
+ switch (vio->cui_io_subtype) {
+ case IO_NORMAL:
+ cio->cui_iov = args->u.normal.via_iov;
+ cio->cui_nrsegs = args->u.normal.via_nrsegs;
+ cio->cui_tot_nrsegs = cio->cui_nrsegs;
#ifndef HAVE_FILE_WRITEV
- cio->cui_iocb = args->cia_iocb;
+ cio->cui_iocb = args->u.normal.via_iocb;
#endif
+ if ((iot == CIT_WRITE) &&
+ !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ if (cfs_mutex_lock_interruptible(&lli->
+ lli_write_mutex))
+ GOTO(out, result = -ERESTARTSYS);
+ write_mutex_locked = 1;
+ } else if (iot == CIT_READ) {
+ cfs_down_read(&lli->lli_trunc_sem);
+ }
+ break;
+ case IO_SENDFILE:
+ vio->u.sendfile.cui_actor = args->u.sendfile.via_actor;
+ vio->u.sendfile.cui_target = args->u.sendfile.via_target;
+ break;
+ case IO_SPLICE:
+ vio->u.splice.cui_pipe = args->u.splice.via_pipe;
+ vio->u.splice.cui_flags = args->u.splice.via_flags;
+ break;
+ default:
+ CERROR("Unknow IO type - %u\n", vio->cui_io_subtype);
+ LBUG();
}
- cio->cui_fd = LUSTRE_FPRIVATE(file);
result = cl_io_loop(env, io);
- } else
+ if (write_mutex_locked)
+ cfs_mutex_unlock(&lli->lli_write_mutex);
+ else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
+ cfs_up_read(&lli->lli_trunc_sem);
+ } else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
+ }
+
if (io->ci_nob > 0) {
result = io->ci_nob;
*ppos = io->u.ci_wr.wr.crw_pos;
}
+ GOTO(out, result);
+out:
cl_io_fini(env, io);
- RETURN(result);
+
+ if (iot == CIT_READ) {
+ if (result >= 0)
+ ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode),
+ LPROC_LL_READ_BYTES, result);
+ } else if (iot == CIT_WRITE) {
+ if (result >= 0) {
+ ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode),
+ LPROC_LL_WRITE_BYTES, result);
+ lli->lli_write_rc = 0;
+ } else {
+ lli->lli_write_rc = result;
+ }
+ }
+
+ return result;
}
unsigned long nr_segs, loff_t *ppos)
{
struct lu_env *env;
- struct ccc_io_args *args;
+ struct vvp_io_args *args;
size_t count;
ssize_t result;
int refcheck;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- args = &vvp_env_info(env)->vti_args;
- args->cia_is_sendfile = 0;
- args->cia_iov = (struct iovec *)iov;
- args->cia_nrsegs = nr_segs;
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+
result = ll_file_io_generic(env, args, file, CIT_READ, ppos, count);
cl_env_put(env, &refcheck);
RETURN(result);
unsigned long nr_segs, loff_t pos)
{
struct lu_env *env;
- struct ccc_io_args *args;
+ struct vvp_io_args *args;
size_t count;
ssize_t result;
int refcheck;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- args = &vvp_env_info(env)->vti_args;
- args->cia_is_sendfile = 0;
- args->cia_iov = (struct iovec *)iov;
- args->cia_nrsegs = nr_segs;
- args->cia_iocb = iocb;
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iocb = iocb;
+
result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
&iocb->ki_pos, count);
cl_env_put(env, &refcheck);
unsigned long nr_segs, loff_t *ppos)
{
struct lu_env *env;
- struct ccc_io_args *args;
+ struct vvp_io_args *args;
size_t count;
ssize_t result;
int refcheck;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- args = &vvp_env_info(env)->vti_args;
- args->cia_iov = (struct iovec *)iov;
- args->cia_nrsegs = nr_segs;
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+
result = ll_file_io_generic(env, args, file, CIT_WRITE, ppos, count);
cl_env_put(env, &refcheck);
RETURN(result);
unsigned long nr_segs, loff_t pos)
{
struct lu_env *env;
- struct ccc_io_args *args;
+ struct vvp_io_args *args;
size_t count;
ssize_t result;
int refcheck;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- args = &vvp_env_info(env)->vti_args;
- args->cia_iov = (struct iovec *)iov;
- args->cia_nrsegs = nr_segs;
- args->cia_iocb = iocb;
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iocb = iocb;
+
result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
&iocb->ki_pos, count);
cl_env_put(env, &refcheck);
#endif
+#ifdef HAVE_KERNEL_SENDFILE
/*
* Send file content (through pagecache) somewhere with helper
*/
read_actor_t actor, void *target)
{
struct lu_env *env;
- struct ccc_io_args *args;
+ struct vvp_io_args *args;
ssize_t result;
int refcheck;
ENTRY;
if (IS_ERR(env))
RETURN(PTR_ERR(env));
- args = &vvp_env_info(env)->vti_args;
- args->cia_is_sendfile = 1;
- args->cia_target = target;
- args->cia_actor = actor;
+ args = vvp_env_args(env, IO_SENDFILE);
+ args->u.sendfile.via_target = target;
+ args->u.sendfile.via_actor = actor;
+
result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
cl_env_put(env, &refcheck);
RETURN(result);
}
+#endif
-static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
- unsigned long arg)
+#ifdef HAVE_KERNEL_SPLICE_READ
+/*
+ * Send file content (through pagecache) somewhere with helper
+ */
+static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t count,
+ unsigned int flags)
{
- struct obd_export *exp = ll_i2dtexp(inode);
- struct ll_recreate_obj ucreatp;
- struct obd_trans_info oti = { 0 };
- struct obdo *oa = NULL;
- int lsm_size;
- int rc = 0;
- struct lov_stripe_md *lsm, *lsm2;
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- if (copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
- sizeof(struct ll_recreate_obj)))
- RETURN(-EFAULT);
+ args = vvp_env_args(env, IO_SPLICE);
+ args->u.splice.via_pipe = pipe;
+ args->u.splice.via_flags = flags;
- OBDO_ALLOC(oa);
- if (oa == NULL)
- RETURN(-ENOMEM);
+ result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
+}
+#endif
- ll_inode_size_lock(inode, 0);
- lsm = ll_i2info(inode)->lli_smd;
+static int ll_lov_recreate(struct inode *inode, obd_id id, obd_seq seq,
+ obd_count ost_idx)
+{
+ struct obd_export *exp = ll_i2dtexp(inode);
+ struct obd_trans_info oti = { 0 };
+ struct obdo *oa = NULL;
+ int lsm_size;
+ int rc = 0;
+ struct lov_stripe_md *lsm = NULL, *lsm2;
+ ENTRY;
+
+ OBDO_ALLOC(oa);
+ if (oa == NULL)
+ RETURN(-ENOMEM);
+
+ lsm = ccc_inode_lsm_get(inode);
if (lsm == NULL)
GOTO(out, rc = -ENOENT);
+
lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
(lsm->lsm_stripe_count));
- OBD_ALLOC(lsm2, lsm_size);
+ OBD_ALLOC_LARGE(lsm2, lsm_size);
if (lsm2 == NULL)
GOTO(out, rc = -ENOMEM);
- oa->o_id = ucreatp.lrc_id;
- oa->o_gr = ucreatp.lrc_group;
- oa->o_nlink = ucreatp.lrc_ost_idx;
+ oa->o_id = id;
+ oa->o_seq = seq;
+ oa->o_nlink = ost_idx;
oa->o_flags |= OBD_FL_RECREATE_OBJS;
oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
-
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+ obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
memcpy(lsm2, lsm, lsm_size);
- rc = obd_create(exp, oa, &lsm2, &oti);
+ ll_inode_size_lock(inode);
+ rc = obd_create(NULL, exp, oa, &lsm2, &oti);
+ ll_inode_size_unlock(inode);
- OBD_FREE(lsm2, lsm_size);
- GOTO(out, rc);
+ OBD_FREE_LARGE(lsm2, lsm_size);
+ GOTO(out, rc);
out:
- ll_inode_size_unlock(inode, 0);
- OBDO_FREE(oa);
- return rc;
+ ccc_inode_lsm_put(inode, lsm);
+ OBDO_FREE(oa);
+ return rc;
}
-int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
- int flags, struct lov_user_md *lum, int lum_size)
+static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
{
- struct lov_stripe_md *lsm;
- struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
- int rc = 0;
+ struct ll_recreate_obj ucreat;
ENTRY;
- ll_inode_size_lock(inode, 0);
- lsm = ll_i2info(inode)->lli_smd;
- if (lsm) {
- ll_inode_size_unlock(inode, 0);
- CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
- inode->i_ino);
- RETURN(-EEXIST);
- }
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+ if (cfs_copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
+ sizeof(struct ll_recreate_obj)))
+ RETURN(-EFAULT);
+
+ RETURN(ll_lov_recreate(inode, ucreat.lrc_id, 0,
+ ucreat.lrc_ost_idx));
+}
+
+static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
+{
+ struct lu_fid fid;
+ obd_id id;
+ obd_count ost_idx;
+ ENTRY;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+
+ if (cfs_copy_from_user(&fid, (struct lu_fid *)arg,
+ sizeof(struct lu_fid)))
+ RETURN(-EFAULT);
+
+ id = fid_oid(&fid) | ((fid_seq(&fid) & 0xffff) << 32);
+ ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
+ RETURN(ll_lov_recreate(inode, id, 0, ost_idx));
+}
+
+int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
+ int flags, struct lov_user_md *lum, int lum_size)
+{
+ struct lov_stripe_md *lsm = NULL;
+ struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
+ int rc = 0;
+ ENTRY;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm != NULL) {
+ ccc_inode_lsm_put(inode, lsm);
+ CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
+ inode->i_ino);
+ RETURN(-EEXIST);
+ }
+
+ ll_inode_size_lock(inode);
rc = ll_intent_file_open(file, lum, lum_size, &oit);
if (rc)
GOTO(out, rc);
- if (it_disposition(&oit, DISP_LOOKUP_NEG))
- GOTO(out_req_free, rc = -ENOENT);
rc = oit.d.lustre.it_status;
if (rc < 0)
GOTO(out_req_free, rc);
ll_release_openhandle(file->f_dentry, &oit);
out:
- ll_inode_size_unlock(inode, 0);
- ll_intent_release(&oit);
- RETURN(rc);
+ ll_inode_size_unlock(inode);
+ ll_intent_release(&oit);
+ ccc_inode_lsm_put(inode, lsm);
+ RETURN(rc);
out_req_free:
- ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
- goto out;
+ ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
+ goto out;
}
int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
struct mdt_body *body;
struct lov_mds_md *lmm = NULL;
struct ptlrpc_request *req = NULL;
- struct obd_capa *oc;
+ struct md_op_data *op_data;
int rc, lmmsize;
rc = ll_get_max_mdsize(sbi, &lmmsize);
if (rc)
RETURN(rc);
- oc = ll_mdscapa_get(inode);
- rc = md_getattr_name(sbi->ll_md_exp, ll_inode2fid(inode),
- oc, filename, strlen(filename) + 1,
- OBD_MD_FLEASIZE | OBD_MD_FLDIREA, lmmsize,
- ll_i2suppgid(inode), &req);
- capa_put(oc);
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
+ strlen(filename), lmmsize,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
+ op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
+ rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
if (rc < 0) {
CDEBUG(D_INFO, "md_getattr_name failed "
"on %s: rc %d\n", filename, rc);
LASSERT(lmm != NULL);
if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_JOIN))) {
+ (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
GOTO(out, rc = -EPROTO);
}
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v3 *)lmm)->lmm_objects,
((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
- } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_JOIN)) {
- lustre_swab_lov_user_md_join((struct lov_user_md_join *)lmm);
}
}
- if (lmm->lmm_magic == LOV_MAGIC_JOIN) {
- struct lov_stripe_md *lsm;
- struct lov_user_md_join *lmj;
- int lmj_size, i, aindex = 0;
-
- rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
- if (rc < 0)
- GOTO(out, rc = -ENOMEM);
- rc = obd_checkmd(sbi->ll_dt_exp, sbi->ll_md_exp, lsm);
- if (rc)
- GOTO(out_free_memmd, rc);
-
- lmj_size = sizeof(struct lov_user_md_join) +
- lsm->lsm_stripe_count *
- sizeof(struct lov_user_ost_data_join);
- OBD_ALLOC(lmj, lmj_size);
- if (!lmj)
- GOTO(out_free_memmd, rc = -ENOMEM);
-
- memcpy(lmj, lmm, sizeof(struct lov_user_md_join));
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_extent *lex =
- &lsm->lsm_array->lai_ext_array[aindex];
-
- if (lex->le_loi_idx + lex->le_stripe_count <= i)
- aindex ++;
- CDEBUG(D_INFO, "aindex %d i %d l_extent_start "
- LPU64" len %d\n", aindex, i,
- lex->le_start, (int)lex->le_len);
- lmj->lmm_objects[i].l_extent_start =
- lex->le_start;
-
- if ((int)lex->le_len == -1)
- lmj->lmm_objects[i].l_extent_end = -1;
- else
- lmj->lmm_objects[i].l_extent_end =
- lex->le_start + lex->le_len;
- lmj->lmm_objects[i].l_object_id =
- lsm->lsm_oinfo[i]->loi_id;
- lmj->lmm_objects[i].l_object_gr =
- lsm->lsm_oinfo[i]->loi_gr;
- lmj->lmm_objects[i].l_ost_gen =
- lsm->lsm_oinfo[i]->loi_ost_gen;
- lmj->lmm_objects[i].l_ost_idx =
- lsm->lsm_oinfo[i]->loi_ost_idx;
- }
- lmm = (struct lov_mds_md *)lmj;
- lmmsize = lmj_size;
-out_free_memmd:
- obd_free_memmd(sbi->ll_dt_exp, &lsm);
- }
out:
*lmmp = lmm;
*lmm_size = lmmsize;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
- OBD_ALLOC(lump, lum_size);
+ OBD_ALLOC_LARGE(lump, lum_size);
if (lump == NULL) {
RETURN(-ENOMEM);
}
- if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
- OBD_FREE(lump, lum_size);
+ if (cfs_copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
+ OBD_FREE_LARGE(lump, lum_size);
RETURN(-EFAULT);
}
rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
- OBD_FREE(lump, lum_size);
+ OBD_FREE_LARGE(lump, lum_size);
RETURN(rc);
}
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
- if (copy_from_user(lumv1, lumv1p, lum_size))
+ if (cfs_copy_from_user(lumv1, lumv1p, lum_size))
RETURN(-EFAULT);
if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
lum_size = sizeof(struct lov_user_md_v3);
- if (copy_from_user(&lumv3, lumv3p, lum_size))
+ if (cfs_copy_from_user(&lumv3, lumv3p, lum_size))
RETURN(-EFAULT);
}
rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size);
if (rc == 0) {
- put_user(0, &lumv1p->lmm_stripe_count);
- rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
- 0, ll_i2info(inode)->lli_smd,
- (void *)arg);
- }
- RETURN(rc);
+ struct lov_stripe_md *lsm;
+ put_user(0, &lumv1p->lmm_stripe_count);
+ lsm = ccc_inode_lsm_get(inode);
+ rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
+ 0, lsm, (void *)arg);
+ ccc_inode_lsm_put(inode, lsm);
+ }
+ RETURN(rc);
}
static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
{
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
-
- if (!lsm)
- RETURN(-ENODATA);
-
- return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0, lsm,
- (void *)arg);
+ struct lov_stripe_md *lsm;
+ int rc = -ENODATA;
+ ENTRY;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm != NULL)
+ rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0,
+ lsm, (void *)arg);
+ ccc_inode_lsm_put(inode, lsm);
+ RETURN(rc);
}
int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
int rc;
ENTRY;
- spin_lock(&lli->lli_lock);
+ if (ll_file_nolock(file))
+ RETURN(-EOPNOTSUPP);
+
+ cfs_spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- CERROR("group lock already existed with gid %lu\n",
+ CWARN("group lock already existed with gid %lu\n",
fd->fd_grouplock.cg_gid);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(-EINVAL);
}
LASSERT(fd->fd_grouplock.cg_lock == NULL);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
RETURN(rc);
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
CERROR("another thread just won the race\n");
cl_put_grouplock(&grouplock);
RETURN(-EINVAL);
}
- fd->fd_flags |= (LL_FILE_GROUP_LOCKED | LL_FILE_IGNORE_LOCK);
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED;
fd->fd_grouplock = grouplock;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
RETURN(0);
struct ccc_grouplock grouplock;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- spin_unlock(&lli->lli_lock);
- CERROR("no group lock held\n");
+ cfs_spin_unlock(&lli->lli_lock);
+ CWARN("no group lock held\n");
RETURN(-EINVAL);
}
LASSERT(fd->fd_grouplock.cg_lock != NULL);
if (fd->fd_grouplock.cg_gid != arg) {
- CERROR("group lock %lu doesn't match current id %lu\n",
+ CWARN("group lock %lu doesn't match current id %lu\n",
arg, fd->fd_grouplock.cg_gid);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(-EINVAL);
}
grouplock = fd->fd_grouplock;
- fd->fd_grouplock.cg_env = NULL;
- fd->fd_grouplock.cg_lock = NULL;
- fd->fd_grouplock.cg_gid = 0;
- fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED | LL_FILE_IGNORE_LOCK);
- spin_unlock(&lli->lli_lock);
+ memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
+ fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
+ cfs_spin_unlock(&lli->lli_lock);
cl_put_grouplock(&grouplock);
CDEBUG(D_INFO, "group lock %lu released\n", arg);
RETURN(0);
}
-#if LUSTRE_FIX >= 50
-static int join_sanity_check(struct inode *head, struct inode *tail)
-{
- ENTRY;
- if ((ll_i2sbi(head)->ll_flags & LL_SBI_JOIN) == 0) {
- CERROR("server do not support join \n");
- RETURN(-EINVAL);
- }
- if (!S_ISREG(tail->i_mode) || !S_ISREG(head->i_mode)) {
- CERROR("tail ino %lu and ino head %lu must be regular\n",
- head->i_ino, tail->i_ino);
- RETURN(-EINVAL);
- }
- if (head->i_ino == tail->i_ino) {
- CERROR("file %lu can not be joined to itself \n", head->i_ino);
- RETURN(-EINVAL);
- }
- if (i_size_read(head) % JOIN_FILE_ALIGN) {
- CERROR("hsize %llu must be times of 64K\n", i_size_read(head));
- RETURN(-EINVAL);
- }
- RETURN(0);
-}
-
-static int join_file(struct inode *head_inode, struct file *head_filp,
- struct file *tail_filp)
-{
- struct dentry *tail_dentry = tail_filp->f_dentry;
- struct lookup_intent oit = {.it_op = IT_OPEN,
- .it_flags = head_filp->f_flags,
- .it_create_mode = M_JOIN_FILE};
- struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_CW,
- ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
-
- struct lustre_handle lockh;
- struct md_op_data *op_data;
- int rc;
- loff_t data;
- ENTRY;
-
- tail_dentry = tail_filp->f_dentry;
-
- data = i_size_read(head_inode);
- op_data = ll_prep_md_op_data(NULL, head_inode,
- tail_dentry->d_parent->d_inode,
- tail_dentry->d_name.name,
- tail_dentry->d_name.len, 0,
- LUSTRE_OPC_ANY, &data);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
-
- rc = md_enqueue(ll_i2mdexp(head_inode), &einfo, &oit,
- op_data, &lockh, NULL, 0, NULL, 0);
-
- ll_finish_md_op_data(op_data);
- if (rc < 0)
- GOTO(out, rc);
-
- rc = oit.d.lustre.it_status;
-
- if (rc < 0 || it_open_error(DISP_OPEN_OPEN, &oit)) {
- rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, &oit);
- ptlrpc_req_finished((struct ptlrpc_request *)
- oit.d.lustre.it_data);
- GOTO(out, rc);
- }
-
- if (oit.d.lustre.it_lock_mode) { /* If we got lock - release it right
- * away */
- ldlm_lock_decref(&lockh, oit.d.lustre.it_lock_mode);
- oit.d.lustre.it_lock_mode = 0;
- }
- ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
- it_clear_disposition(&oit, DISP_ENQ_COMPLETE);
- ll_release_openhandle(head_filp->f_dentry, &oit);
-out:
- ll_intent_release(&oit);
- RETURN(rc);
-}
-
-static int ll_file_join(struct inode *head, struct file *filp,
- char *filename_tail)
-{
- struct inode *tail = NULL, *first = NULL, *second = NULL;
- struct dentry *tail_dentry;
- struct file *tail_filp, *first_filp, *second_filp;
- struct ll_lock_tree first_tree, second_tree;
- struct ll_lock_tree_node *first_node, *second_node;
- struct ll_inode_info *hlli = ll_i2info(head);
- int rc = 0, cleanup_phase = 0;
- ENTRY;
-
- CDEBUG(D_VFSTRACE, "VFS Op:head=%lu/%u(%p) tail %s\n",
- head->i_ino, head->i_generation, head, filename_tail);
-
- tail_filp = filp_open(filename_tail, O_WRONLY, 0644);
- if (IS_ERR(tail_filp)) {
- CERROR("Can not open tail file %s", filename_tail);
- rc = PTR_ERR(tail_filp);
- GOTO(cleanup, rc);
- }
- tail = igrab(tail_filp->f_dentry->d_inode);
-
- tail_dentry = tail_filp->f_dentry;
- LASSERT(tail_dentry);
- cleanup_phase = 1;
-
- /*reorder the inode for lock sequence*/
- first = head->i_ino > tail->i_ino ? head : tail;
- second = head->i_ino > tail->i_ino ? tail : head;
- first_filp = head->i_ino > tail->i_ino ? filp : tail_filp;
- second_filp = head->i_ino > tail->i_ino ? tail_filp : filp;
-
- CDEBUG(D_INFO, "reorder object from %lu:%lu to %lu:%lu \n",
- head->i_ino, tail->i_ino, first->i_ino, second->i_ino);
- first_node = ll_node_from_inode(first, 0, OBD_OBJECT_EOF, LCK_EX);
- if (IS_ERR(first_node)){
- rc = PTR_ERR(first_node);
- GOTO(cleanup, rc);
- }
- first_tree.lt_fd = first_filp->private_data;
- rc = ll_tree_lock(&first_tree, first_node, NULL, 0, 0);
- if (rc != 0)
- GOTO(cleanup, rc);
- cleanup_phase = 2;
-
- second_node = ll_node_from_inode(second, 0, OBD_OBJECT_EOF, LCK_EX);
- if (IS_ERR(second_node)){
- rc = PTR_ERR(second_node);
- GOTO(cleanup, rc);
- }
- second_tree.lt_fd = second_filp->private_data;
- rc = ll_tree_lock(&second_tree, second_node, NULL, 0, 0);
- if (rc != 0)
- GOTO(cleanup, rc);
- cleanup_phase = 3;
-
- rc = join_sanity_check(head, tail);
- if (rc)
- GOTO(cleanup, rc);
-
- rc = join_file(head, filp, tail_filp);
- if (rc)
- GOTO(cleanup, rc);
-cleanup:
- switch (cleanup_phase) {
- case 3:
- ll_tree_unlock(&second_tree);
- obd_cancel_unused(ll_i2dtexp(second),
- ll_i2info(second)->lli_smd, 0, NULL);
- case 2:
- ll_tree_unlock(&first_tree);
- obd_cancel_unused(ll_i2dtexp(first),
- ll_i2info(first)->lli_smd, 0, NULL);
- case 1:
- filp_close(tail_filp, 0);
- if (tail)
- iput(tail);
- if (head && rc == 0) {
- obd_free_memmd(ll_i2sbi(head)->ll_dt_exp,
- &hlli->lli_smd);
- hlli->lli_smd = NULL;
- }
- case 0:
- break;
- default:
- CERROR("invalid cleanup_phase %d\n", cleanup_phase);
- LBUG();
- }
- RETURN(rc);
-}
-#endif /* LUSTRE_FIX >= 50 */
-
/**
* Close inode open handle
*
inode, och);
out:
/* this one is in place of ll_file_open */
- if (it_disposition(it, DISP_ENQ_OPEN_REF))
+ if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
ptlrpc_req_finished(it->d.lustre.it_data);
- it_clear_disposition(it, DISP_ENQ_OPEN_REF);
+ it_clear_disposition(it, DISP_ENQ_OPEN_REF);
+ }
RETURN(rc);
}
* Get size for inode for which FIEMAP mapping is requested.
* Make the FIEMAP get_info call and returns the result.
*/
-int ll_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
int num_bytes)
{
- struct obd_export *exp = ll_i2dtexp(inode);
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
+ struct obd_export *exp = ll_i2dtexp(inode);
+ struct lov_stripe_md *lsm = NULL;
struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
int vallen = num_bytes;
int rc;
ENTRY;
- /* If the stripe_count > 1 and the application does not understand
- * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
- */
- if (lsm->lsm_stripe_count > 1 &&
- !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER))
- return -EOPNOTSUPP;
+ /* Checks for fiemap flags */
+ if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
+ fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
+ return -EBADR;
+ }
+
+ /* Check for FIEMAP_FLAG_SYNC */
+ if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
+ rc = filemap_fdatawrite(inode->i_mapping);
+ if (rc)
+ return rc;
+ }
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm == NULL)
+ return -ENOENT;
+
+ /* If the stripe_count > 1 and the application does not understand
+ * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
+ */
+ if (lsm->lsm_stripe_count > 1 &&
+ !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER))
+ GOTO(out, rc = -EOPNOTSUPP);
fm_key.oa.o_id = lsm->lsm_object_id;
- fm_key.oa.o_gr = lsm->lsm_object_gr;
+ fm_key.oa.o_seq = lsm->lsm_object_seq;
fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
- obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLFID | OBD_MD_FLGROUP |
- OBD_MD_FLSIZE);
-
+ obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE);
+ obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid);
/* If filesize is 0, then there would be no objects for mapping */
if (fm_key.oa.o_size == 0) {
fiemap->fm_mapped_extents = 0;
- RETURN(0);
+ GOTO(out, rc = 0);
}
memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap));
- rc = obd_get_info(exp, sizeof(fm_key), &fm_key, &vallen, fiemap, lsm);
+ rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen,
+ fiemap, lsm);
if (rc)
CERROR("obd_get_info failed: rc = %d\n", rc);
- RETURN(rc);
+out:
+ ccc_inode_lsm_put(inode, lsm);
+ RETURN(rc);
}
int ll_fid2path(struct obd_export *exp, void *arg)
OBD_ALLOC_PTR(gfin);
if (gfin == NULL)
RETURN(-ENOMEM);
- if (copy_from_user(gfin, arg, sizeof(*gfin))) {
+ if (cfs_copy_from_user(gfin, arg, sizeof(*gfin))) {
OBD_FREE_PTR(gfin);
RETURN(-EFAULT);
}
rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
if (rc)
GOTO(gf_free, rc);
- if (copy_to_user(arg, gfout, outsize))
+ if (cfs_copy_to_user(arg, gfout, outsize))
rc = -EFAULT;
gf_free:
RETURN(rc);
}
-int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
+{
+ struct ll_user_fiemap *fiemap_s;
+ size_t num_bytes, ret_bytes;
+ unsigned int extent_count;
+ int rc = 0;
+
+ /* Get the extent count so we can calculate the size of
+ * required fiemap buffer */
+ if (get_user(extent_count,
+ &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
+ RETURN(-EFAULT);
+ num_bytes = sizeof(*fiemap_s) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+
+ OBD_ALLOC_LARGE(fiemap_s, num_bytes);
+ if (fiemap_s == NULL)
+ RETURN(-ENOMEM);
+
+ /* get the fiemap value */
+ if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
+ sizeof(*fiemap_s)))
+ GOTO(error, rc = -EFAULT);
+
+ /* If fm_extent_count is non-zero, read the first extent since
+ * it is used to calculate end_offset and device from previous
+ * fiemap call. */
+ if (extent_count) {
+ if (copy_from_user(&fiemap_s->fm_extents[0],
+ (char __user *)arg + sizeof(*fiemap_s),
+ sizeof(struct ll_fiemap_extent)))
+ GOTO(error, rc = -EFAULT);
+ }
+
+ rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
+ if (rc)
+ GOTO(error, rc);
+
+ ret_bytes = sizeof(struct ll_user_fiemap);
+
+ if (extent_count != 0)
+ ret_bytes += (fiemap_s->fm_mapped_extents *
+ sizeof(struct ll_fiemap_extent));
+
+ if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+ rc = -EFAULT;
+
+error:
+ OBD_FREE_LARGE(fiemap_s, num_bytes);
+ RETURN(rc);
+}
+
+/*
+ * Read the data_version for inode.
+ *
+ * This value is computed using stripe object version on OST.
+ * Version is computed using server side locking.
+ *
+ * @param extent_lock Take extent lock. Not needed if a process is already
+ * holding the OST object group locks.
+ */
+static int ll_data_version(struct inode *inode, __u64 *data_version,
+ int extent_lock)
+{
+ struct lov_stripe_md *lsm = NULL;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obdo *obdo = NULL;
+ int rc;
+ ENTRY;
+
+ /* If no stripe, we consider version is 0. */
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm == NULL) {
+ *data_version = 0;
+ CDEBUG(D_INODE, "No object for inode\n");
+ RETURN(0);
+ }
+
+ OBD_ALLOC_PTR(obdo);
+ if (obdo == NULL) {
+ ccc_inode_lsm_put(inode, lsm);
+ RETURN(-ENOMEM);
+ }
+
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, obdo, 0, extent_lock);
+ if (!rc) {
+ if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
+ rc = -EOPNOTSUPP;
+ else
+ *data_version = obdo->o_data_version;
+ }
+
+ OBD_FREE_PTR(obdo);
+ ccc_inode_lsm_put(inode, lsm);
+
+ RETURN(rc);
+}
+
+long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = file->f_dentry->d_inode;
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags;
+
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
case LL_IOC_LOV_GETSTRIPE:
RETURN(ll_lov_getstripe(inode, arg));
case LL_IOC_RECREATE_OBJ:
- RETURN(ll_lov_recreate_obj(inode, file, arg));
- case EXT3_IOC_FIEMAP: {
- struct ll_user_fiemap *fiemap_s;
- size_t num_bytes, ret_bytes;
- unsigned int extent_count;
- int rc = 0;
-
- /* Get the extent count so we can calculate the size of
- * required fiemap buffer */
- if (get_user(extent_count,
- &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
- RETURN(-EFAULT);
- num_bytes = sizeof(*fiemap_s) + (extent_count *
- sizeof(struct ll_fiemap_extent));
- OBD_VMALLOC(fiemap_s, num_bytes);
- if (fiemap_s == NULL)
- RETURN(-ENOMEM);
-
- if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
- sizeof(*fiemap_s)))
- GOTO(error, rc = -EFAULT);
-
- if (fiemap_s->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
- fiemap_s->fm_flags = fiemap_s->fm_flags &
- ~LUSTRE_FIEMAP_FLAGS_COMPAT;
- if (copy_to_user((char *)arg, fiemap_s,
- sizeof(*fiemap_s)))
- GOTO(error, rc = -EFAULT);
-
- GOTO(error, rc = -EBADR);
- }
-
- /* If fm_extent_count is non-zero, read the first extent since
- * it is used to calculate end_offset and device from previous
- * fiemap call. */
- if (extent_count) {
- if (copy_from_user(&fiemap_s->fm_extents[0],
- (char __user *)arg + sizeof(*fiemap_s),
- sizeof(struct ll_fiemap_extent)))
- GOTO(error, rc = -EFAULT);
- }
-
- if (fiemap_s->fm_flags & FIEMAP_FLAG_SYNC) {
- int rc;
-
- rc = filemap_fdatawrite(inode->i_mapping);
- if (rc)
- GOTO(error, rc);
- }
-
- rc = ll_fiemap(inode, fiemap_s, num_bytes);
- if (rc)
- GOTO(error, rc);
-
- ret_bytes = sizeof(struct ll_user_fiemap);
-
- if (extent_count != 0)
- ret_bytes += (fiemap_s->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent));
-
- if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
- rc = -EFAULT;
-
-error:
- OBD_VFREE(fiemap_s, num_bytes);
- RETURN(rc);
- }
- case EXT3_IOC_GETFLAGS:
- case EXT3_IOC_SETFLAGS:
+ RETURN(ll_lov_recreate_obj(inode, arg));
+ case LL_IOC_RECREATE_FID:
+ RETURN(ll_lov_recreate_fid(inode, arg));
+ case FSFILT_IOC_FIEMAP:
+ RETURN(ll_ioctl_fiemap(inode, arg));
+ case FSFILT_IOC_GETFLAGS:
+ case FSFILT_IOC_SETFLAGS:
RETURN(ll_iocontrol(inode, file, cmd, arg));
- case EXT3_IOC_GETVERSION_OLD:
- case EXT3_IOC_GETVERSION:
+ case FSFILT_IOC_GETVERSION_OLD:
+ case FSFILT_IOC_GETVERSION:
RETURN(put_user(inode->i_generation, (int *)arg));
- case LL_IOC_JOIN: {
-#if LUSTRE_FIX >= 50
- /* Allow file join in beta builds to allow debuggging */
- char *ftail;
- int rc;
-
- ftail = getname((const char *)arg);
- if (IS_ERR(ftail))
- RETURN(PTR_ERR(ftail));
- rc = ll_file_join(inode, file, ftail);
- putname(ftail);
- RETURN(rc);
-#else
- CWARN("file join is not supported in this version of Lustre\n");
- RETURN(-ENOTTY);
-#endif
- }
case LL_IOC_GROUP_LOCK:
RETURN(ll_get_grouplock(inode, file, arg));
case LL_IOC_GROUP_UNLOCK:
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
* network encode the arg field.
- case EXT3_IOC_SETVERSION_OLD:
- case EXT3_IOC_SETVERSION:
+ case FSFILT_IOC_SETVERSION_OLD:
+ case FSFILT_IOC_SETVERSION:
*/
case LL_IOC_FLUSHCTX:
RETURN(ll_flush_ctx(inode));
case LL_IOC_PATH2FID: {
- if (copy_to_user((void *)arg, &ll_i2info(inode)->lli_fid,
- sizeof(struct lu_fid)))
+ if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
RETURN(-EFAULT);
RETURN(0);
}
case OBD_IOC_FID2PATH:
RETURN(ll_fid2path(ll_i2mdexp(inode), (void *)arg));
+ case LL_IOC_DATA_VERSION: {
+ struct ioc_data_version idv;
+ int rc;
+ if (cfs_copy_from_user(&idv, (char *)arg, sizeof(idv)))
+ RETURN(-EFAULT);
+
+ rc = ll_data_version(inode, &idv.idv_version,
+ !(idv.idv_flags & LL_DV_NOFLUSH));
+
+ if (rc == 0 &&
+ cfs_copy_to_user((char *) arg, &idv, sizeof(idv)))
+ RETURN(-EFAULT);
+
+ RETURN(rc);
+ }
+
+ case LL_IOC_GET_MDTIDX: {
+ int mdtidx;
+
+ mdtidx = ll_get_mdt_idx(inode);
+ if (mdtidx < 0)
+ RETURN(mdtidx);
+
+ if (put_user((int)mdtidx, (int*)arg))
+ RETURN(-EFAULT);
+
+ RETURN(0);
+ }
+ case OBD_IOC_GETDTNAME:
+ case OBD_IOC_GETMDNAME:
+ RETURN(ll_get_obd_name(inode, cmd, arg));
default: {
int err;
ENTRY;
retval = offset + ((origin == 2) ? i_size_read(inode) :
(origin == 1) ? file->f_pos : 0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%Lu=%#Lx(%s)\n",
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%s)\n",
inode->i_ino, inode->i_generation, inode, retval, retval,
origin == 2 ? "SEEK_END": origin == 1 ? "SEEK_CUR" : "SEEK_SET");
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
if (origin == 2) { /* SEEK_END */
- int nonblock = 0, rc;
-
- if (file->f_flags & O_NONBLOCK)
- nonblock = LDLM_FL_BLOCK_NOWAIT;
+ int rc;
- rc = cl_glimpse_size(inode);
+ rc = ll_glimpse_size(inode);
if (rc != 0)
RETURN(rc);
- ll_inode_size_lock(inode, 0);
offset += i_size_read(inode);
- ll_inode_size_unlock(inode, 0);
} else if (origin == 1) { /* SEEK_CUR */
offset += file->f_pos;
}
RETURN(retval);
}
+int ll_flush(struct file *file, fl_owner_t id)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc, err;
+
+ LASSERT(!S_ISDIR(inode->i_mode));
+
+ /* the application should know write failure already. */
+ if (lli->lli_write_rc)
+ return 0;
+
+ /* catch async errors that were recorded back when async writeback
+ * failed for pages in this mapping. */
+ rc = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
+
+ return rc ? -EIO : 0;
+}
+
+/**
+ * Called to make sure a portion of file has been written out.
+ * if @local_only is not true, it will send OST_SYNC RPCs to ost.
+ *
+ * Return how many pages have been written.
+ */
+int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
+ enum cl_fsync_mode mode)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct obd_capa *capa = NULL;
+ struct cl_fsync_io *fio;
+ int result;
+ ENTRY;
+
+ if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
+ mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
+ RETURN(-EINVAL);
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ capa = ll_osscapa_get(inode, CAPA_OPC_OSS_WRITE);
+
+ io = ccc_env_thread_io(env);
+ io->ci_obj = cl_i2info(inode)->lli_clob;
+ io->ci_ignore_layout = 1;
+
+ /* initialize parameters for sync */
+ fio = &io->u.ci_fsync;
+ fio->fi_capa = capa;
+ fio->fi_start = start;
+ fio->fi_end = end;
+ fio->fi_fid = ll_inode2fid(inode);
+ fio->fi_mode = mode;
+ fio->fi_nr_written = 0;
+
+ if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
+ result = cl_io_loop(env, io);
+ else
+ result = io->ci_result;
+ if (result == 0)
+ result = fio->fi_nr_written;
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
+
+ capa_put(capa);
+
+ RETURN(result);
+}
+
+#ifdef HAVE_FILE_FSYNC_4ARGS
+int ll_fsync(struct file *file, loff_t start, loff_t end, int data)
+#elif defined(HAVE_FILE_FSYNC_2ARGS)
+int ll_fsync(struct file *file, int data)
+#else
int ll_fsync(struct file *file, struct dentry *dentry, int data)
+#endif
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file->f_dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
struct ptlrpc_request *req;
struct obd_capa *oc;
+ struct lov_stripe_md *lsm;
int rc, err;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
/* catch async errors that were recorded back when async writeback
* failed for pages in this mapping. */
- err = lli->lli_async_rc;
- lli->lli_async_rc = 0;
- if (rc == 0)
- rc = err;
- if (lsm) {
- err = lov_test_and_clear_async_rc(lsm);
+ if (!S_ISDIR(inode->i_mode)) {
+ err = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
if (rc == 0)
rc = err;
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
}
oc = ll_mdscapa_get(inode);
if (!err)
ptlrpc_req_finished(req);
- if (data && lsm) {
- struct obdo *oa;
-
- OBDO_ALLOC(oa);
- if (!oa)
- RETURN(rc ? rc : -ENOMEM);
-
- oa->o_id = lsm->lsm_object_id;
- oa->o_gr = lsm->lsm_object_gr;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLGROUP);
-
- oc = ll_osscapa_get(inode, CAPA_OPC_OSS_WRITE);
- err = obd_sync(ll_i2sbi(inode)->ll_dt_exp, oa, lsm,
- 0, OBD_OBJECT_EOF, oc);
- capa_put(oc);
- if (!rc)
- rc = err;
- OBDO_FREE(oa);
- }
-
- RETURN(rc);
+ lsm = ccc_inode_lsm_get(inode);
+ if (data && lsm) {
+ err = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
+ CL_FSYNC_ALL);
+ if (rc == 0 && err < 0)
+ rc = err;
+ lli->lli_write_rc = rc < 0 ? rc : 0;
+ }
+ ccc_inode_lsm_put(inode, lsm);
+
+ RETURN(rc);
}
int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
.ei_cbdata = file_lock };
struct md_op_data *op_data;
struct lustre_handle lockh = {0};
- ldlm_policy_data_t flock;
+ ldlm_policy_data_t flock = {{0}};
int flags = 0;
int rc;
ENTRY;
if (file_lock->fl_flags & FL_FLOCK) {
LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
- /* set missing params for flock() calls */
- file_lock->fl_end = OFFSET_MAX;
- file_lock->fl_pid = current->tgid;
+ /* flocks are whole-file locks */
+ flock.l_flock.end = OFFSET_MAX;
+ /* For flocks owner is determined by the local file desctiptor*/
+ flock.l_flock.owner = (unsigned long)file_lock->fl_file;
+ } else if (file_lock->fl_flags & FL_POSIX) {
+ flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
+ flock.l_flock.start = file_lock->fl_start;
+ flock.l_flock.end = file_lock->fl_end;
+ } else {
+ RETURN(-EINVAL);
}
flock.l_flock.pid = file_lock->fl_pid;
- flock.l_flock.start = file_lock->fl_start;
- flock.l_flock.end = file_lock->fl_end;
+
+ /* Somewhat ugly workaround for svc lockd.
+ * lockd installs custom fl_lmops->fl_compare_owner that checks
+ * for the fl_owner to be the same (which it always is on local node
+ * I guess between lockd processes) and then compares pid.
+ * As such we assign pid to the owner field to make it all work,
+ * conflict with normal locks is unlikely since pid space and
+ * pointer space for current->files are not intersecting */
+ if (file_lock->fl_lmops && file_lock->fl_lmops->fl_compare_owner)
+ flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
switch (file_lock->fl_type) {
case F_RDLCK:
einfo.ei_mode = LCK_PW;
break;
default:
- CERROR("unknown fcntl lock type: %d\n", file_lock->fl_type);
- RETURN (-EINVAL);
+ CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n",
+ file_lock->fl_type);
+ RETURN (-ENOTSUPP);
}
switch (cmd) {
if ((file_lock->fl_flags & FL_FLOCK) &&
(rc == 0 || file_lock->fl_type == F_UNLCK))
- ll_flock_lock_file_wait(file, file_lock, (cmd == F_SETLKW));
-#ifdef HAVE_F_OP_FLOCK
+ flock_lock_file_wait(file, file_lock);
if ((file_lock->fl_flags & FL_POSIX) &&
(rc == 0 || file_lock->fl_type == F_UNLCK) &&
!(flags & LDLM_FL_TEST_LOCK))
posix_lock_file_wait(file, file_lock);
-#endif
RETURN(rc);
}
RETURN(-ENOSYS);
}
-int ll_have_md_lock(struct inode *inode, __u64 bits)
+/**
+ * test if some locks matching bits and l_req_mode are acquired
+ * - bits can be in different locks
+ * - if found clear the common lock bits in *bits
+ * - the bits not found, are kept in *bits
+ * \param inode [IN]
+ * \param bits [IN] searched lock bits [IN]
+ * \param l_req_mode [IN] searched lock mode
+ * \retval boolean, true iff all bits are found
+ */
+int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
{
struct lustre_handle lockh;
- ldlm_policy_data_t policy = { .l_inodebits = {bits}};
+ ldlm_policy_data_t policy;
+ ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
+ (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
struct lu_fid *fid;
int flags;
+ int i;
ENTRY;
if (!inode)
RETURN(0);
fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
+ CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
+ ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
- if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh)) {
- RETURN(1);
+ for (i = 0; i < MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
+ policy.l_inodebits.bits = *bits & (1 << i);
+ if (policy.l_inodebits.bits == 0)
+ continue;
+
+ if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
+ &policy, mode, &lockh)) {
+ struct ldlm_lock *lock;
+
+ lock = ldlm_handle2lock(&lockh);
+ if (lock) {
+ *bits &=
+ ~(lock->l_policy_data.l_inodebits.bits);
+ LDLM_LOCK_PUT(lock);
+ } else {
+ *bits &= ~policy.l_inodebits.bits;
+ }
+ }
}
- RETURN(0);
+ RETURN(*bits == 0);
}
ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
{
struct inode *inode = dentry->d_inode;
struct ptlrpc_request *req = NULL;
- struct ll_sb_info *sbi;
struct obd_export *exp;
int rc = 0;
ENTRY;
CERROR("REPORT THIS LINE TO PETER\n");
RETURN(0);
}
- sbi = ll_i2sbi(inode);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n",
inode->i_ino, inode->i_generation, inode, dentry->d_name.name);
exp = ll_i2mdexp(inode);
+ /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC.
+ * But under CMD case, it caused some lock issues, should be fixed
+ * with new CMD ibits lock. See bug 12718 */
if (exp->exp_connect_flags & OBD_CONNECT_ATTRFID) {
struct lookup_intent oit = { .it_op = IT_GETATTR };
struct md_op_data *op_data;
+ if (ibits == MDS_INODELOCK_LOOKUP)
+ oit.it_op = IT_LOOKUP;
+
/* Call getattr by fid, so do not provide name at all. */
op_data = ll_prep_md_op_data(NULL, dentry->d_parent->d_inode,
dentry->d_inode, NULL, 0, 0,
do_lookup() -> ll_revalidate_it(). We cannot use d_drop
here to preserve get_cwd functionality on 2.6.
Bug 10503 */
- if (!dentry->d_inode->i_nlink) {
- spin_lock(&ll_lookup_lock);
- spin_lock(&dcache_lock);
- ll_drop_dentry(dentry);
- spin_unlock(&dcache_lock);
- spin_unlock(&ll_lookup_lock);
- }
+ if (!dentry->d_inode->i_nlink)
+ d_lustre_invalidate(dentry);
ll_lookup_finish_locks(&oit, dentry);
- } else if (!ll_have_md_lock(dentry->d_inode, ibits)) {
-
+ } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
obd_valid valid = OBD_MD_FLGETATTR;
- struct obd_capa *oc;
+ struct md_op_data *op_data;
int ealen = 0;
if (S_ISREG(inode->i_mode)) {
RETURN(rc);
valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE;
}
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
+ 0, ealen, LUSTRE_OPC_ANY,
+ NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
+ op_data->op_valid = valid;
/* Once OBD_CONNECT_ATTRFID is not supported, we can't find one
* capa for this inode. Because we only keep capas of dirs
* fresh. */
- oc = ll_mdscapa_get(inode);
- rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode), oc, valid,
- ealen, &req);
- capa_put(oc);
+ rc = md_getattr(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
if (rc) {
rc = ll_inode_revalidate_fini(inode, rc);
RETURN(rc);
return rc;
}
-int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
+int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
+ __u64 ibits)
{
+ struct inode *inode = dentry->d_inode;
int rc;
ENTRY;
- rc = __ll_inode_revalidate_it(dentry, it, MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LOOKUP);
+ rc = __ll_inode_revalidate_it(dentry, it, ibits);
/* if object not yet allocated, don't validate size */
- if (rc == 0 && ll_i2info(dentry->d_inode)->lli_smd == NULL)
+ if (rc == 0 && !ll_i2info(dentry->d_inode)->lli_has_smd) {
+ LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
+ LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
+ LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
RETURN(0);
+ }
- /* cl_glimpse_size will prefer locally cached writes if they extend
+ /* ll_glimpse_size will prefer locally cached writes if they extend
* the file */
if (rc == 0)
- rc = cl_glimpse_size(dentry->d_inode);
+ rc = ll_glimpse_size(inode);
RETURN(rc);
}
struct lookup_intent *it, struct kstat *stat)
{
struct inode *inode = de->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
int res = 0;
- res = ll_inode_revalidate_it(de, it);
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETATTR, 1);
+ res = ll_inode_revalidate_it(de, it, MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LOOKUP);
+ ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
if (res)
return res;
stat->dev = inode->i_sb->s_dev;
- stat->ino = inode->i_ino;
+ if (ll_need_32bit_api(sbi))
+ stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
+ else
+ stat->ino = inode->i_ino;
stat->mode = inode->i_mode;
stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
stat->blksize = 1 << inode->i_blkbits;
#endif
- ll_inode_size_lock(inode, 0);
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
- ll_inode_size_unlock(inode, 0);
return 0;
}
return ll_getattr_it(mnt, de, &it, stat);
}
-static
-int lustre_check_acl(struct inode *inode, int mask)
+#ifdef HAVE_LINUX_FIEMAP_H
+int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len)
+{
+ int rc;
+ size_t num_bytes;
+ struct ll_user_fiemap *fiemap;
+ unsigned int extent_count = fieinfo->fi_extents_max;
+
+ num_bytes = sizeof(*fiemap) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+ OBD_ALLOC_LARGE(fiemap, num_bytes);
+
+ if (fiemap == NULL)
+ RETURN(-ENOMEM);
+
+ fiemap->fm_flags = fieinfo->fi_flags;
+ fiemap->fm_extent_count = fieinfo->fi_extents_max;
+ fiemap->fm_start = start;
+ fiemap->fm_length = len;
+ memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
+ sizeof(struct ll_fiemap_extent));
+
+ rc = ll_do_fiemap(inode, fiemap, num_bytes);
+
+ fieinfo->fi_flags = fiemap->fm_flags;
+ fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
+ memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
+ fiemap->fm_mapped_extents * sizeof(struct ll_fiemap_extent));
+
+ OBD_FREE_LARGE(fiemap, num_bytes);
+ return rc;
+}
+#endif
+
+
+static int
+#ifdef HAVE_GENERIC_PERMISSION_4ARGS
+lustre_check_acl(struct inode *inode, int mask, unsigned int flags)
+#else
+lustre_check_acl(struct inode *inode, int mask)
+#endif
{
#ifdef CONFIG_FS_POSIX_ACL
struct ll_inode_info *lli = ll_i2info(inode);
int rc;
ENTRY;
- spin_lock(&lli->lli_lock);
+#ifdef HAVE_GENERIC_PERMISSION_4ARGS
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+#endif
+ cfs_spin_lock(&lli->lli_lock);
acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
if (!acl)
RETURN(-EAGAIN);
#endif
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
+#ifdef HAVE_GENERIC_PERMISSION_4ARGS
+int ll_inode_permission(struct inode *inode, int mask, unsigned int flags)
+#else
+# ifdef HAVE_INODE_PERMISION_2ARGS
+int ll_inode_permission(struct inode *inode, int mask)
+# else
int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
+# endif
+#endif
{
int rc = 0;
ENTRY;
+#ifdef HAVE_GENERIC_PERMISSION_4ARGS
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+#endif
+
/* as root inode are NOT getting validated in lookup operation,
* need to do it before permission check. */
return lustre_check_remote_perm(inode, mask);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
- rc = generic_permission(inode, mask, lustre_check_acl);
+ rc = ll_generic_permission(inode, mask, flags, lustre_check_acl);
RETURN(rc);
}
-#else
-int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
-{
- int mode = inode->i_mode;
- int rc;
-
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), mask %o\n",
- inode->i_ino, inode->i_generation, inode, mask);
-
- if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
- return lustre_check_remote_perm(inode, mask);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
-
- if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
- if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
- return -EACCES;
- if (current->fsuid == inode->i_uid) {
- mode >>= 6;
- } else if (1) {
- if (((mode >> 3) & mask & S_IRWXO) != mask)
- goto check_groups;
- rc = lustre_check_acl(inode, mask);
- if (rc == -EAGAIN)
- goto check_groups;
- if (rc == -EACCES)
- goto check_capabilities;
- return rc;
- } else {
-check_groups:
- if (in_group_p(inode->i_gid))
- mode >>= 3;
- }
- if ((mode & mask & S_IRWXO) == mask)
- return 0;
-
-check_capabilities:
- if (!(mask & MAY_EXEC) ||
- (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
- if (cfs_capable(CFS_CAP_DAC_OVERRIDE))
- return 0;
-
- if (cfs_capable(CFS_CAP_DAC_READ_SEARCH) && ((mask == MAY_READ) ||
- (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))))
- return 0;
-
- return -EACCES;
-}
-#endif
#ifdef HAVE_FILE_READV
#define READ_METHOD readv
.READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
.WRITE_METHOD = WRITE_FUNCTION,
- .ioctl = ll_file_ioctl,
+ .unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
+#ifdef HAVE_KERNEL_SENDFILE
.sendfile = ll_file_sendfile,
+#endif
+#ifdef HAVE_KERNEL_SPLICE_READ
+ .splice_read = ll_file_splice_read,
+#endif
.fsync = ll_fsync,
+ .flush = ll_flush
};
struct file_operations ll_file_operations_flock = {
.READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
.WRITE_METHOD = WRITE_FUNCTION,
- .ioctl = ll_file_ioctl,
+ .unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
+#ifdef HAVE_KERNEL_SENDFILE
.sendfile = ll_file_sendfile,
+#endif
+#ifdef HAVE_KERNEL_SPLICE_READ
+ .splice_read = ll_file_splice_read,
+#endif
.fsync = ll_fsync,
-#ifdef HAVE_F_OP_FLOCK
+ .flush = ll_flush,
.flock = ll_file_flock,
-#endif
.lock = ll_file_flock
};
.READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
.WRITE_METHOD = WRITE_FUNCTION,
- .ioctl = ll_file_ioctl,
+ .unlocked_ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
+#ifdef HAVE_KERNEL_SENDFILE
.sendfile = ll_file_sendfile,
+#endif
+#ifdef HAVE_KERNEL_SPLICE_READ
+ .splice_read = ll_file_splice_read,
+#endif
.fsync = ll_fsync,
-#ifdef HAVE_F_OP_FLOCK
+ .flush = ll_flush,
.flock = ll_file_noflock,
-#endif
.lock = ll_file_noflock
};
struct inode_operations ll_file_inode_operations = {
-#ifdef HAVE_VFS_INTENT_PATCHES
- .setattr_raw = ll_setattr_raw,
-#endif
.setattr = ll_setattr,
.truncate = ll_truncate,
.getattr = ll_getattr,
.getxattr = ll_getxattr,
.listxattr = ll_listxattr,
.removexattr = ll_removexattr,
+#ifdef HAVE_LINUX_FIEMAP_H
+ .fiemap = ll_fiemap,
+#endif
};
/* dynamic ioctl number support routins */
static struct llioc_ctl_data {
- struct rw_semaphore ioc_sem;
- struct list_head ioc_head;
+ cfs_rw_semaphore_t ioc_sem;
+ cfs_list_t ioc_head;
} llioc = {
__RWSEM_INITIALIZER(llioc.ioc_sem),
CFS_LIST_HEAD_INIT(llioc.ioc_head)
struct llioc_data {
- struct list_head iocd_list;
+ cfs_list_t iocd_list;
unsigned int iocd_size;
llioc_callback_t iocd_cb;
unsigned int iocd_count;
in_data->iocd_count = count;
memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
- down_write(&llioc.ioc_sem);
- list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- up_write(&llioc.ioc_sem);
+ cfs_down_write(&llioc.ioc_sem);
+ cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
+ cfs_up_write(&llioc.ioc_sem);
RETURN(in_data);
}
if (magic == NULL)
return;
- down_write(&llioc.ioc_sem);
- list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
+ cfs_down_write(&llioc.ioc_sem);
+ cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
if (tmp == magic) {
unsigned int size = tmp->iocd_size;
- list_del(&tmp->iocd_list);
- up_write(&llioc.ioc_sem);
+ cfs_list_del(&tmp->iocd_list);
+ cfs_up_write(&llioc.ioc_sem);
OBD_FREE(tmp, size);
return;
}
}
- up_write(&llioc.ioc_sem);
+ cfs_up_write(&llioc.ioc_sem);
CWARN("didn't find iocontrol register block with magic: %p\n", magic);
}
struct llioc_data *data;
int rc = -EINVAL, i;
- down_read(&llioc.ioc_sem);
- list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
+ cfs_down_read(&llioc.ioc_sem);
+ cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
for (i = 0; i < data->iocd_count; i++) {
if (cmd != data->iocd_cmd[i])
continue;
if (ret == LLIOC_STOP)
break;
}
- up_read(&llioc.ioc_sem);
+ cfs_up_read(&llioc.ioc_sem);
if (rcp)
*rcp = rc;
return ret;
}
+
+int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ int result;
+ ENTRY;
+
+ if (lli->lli_clob == NULL)
+ RETURN(0);
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ result = cl_conf_set(env, lli->lli_clob, conf);
+ cl_env_nested_put(&nest, env);
+ RETURN(result);
+}
+
+/**
+ * This function checks if there exists a LAYOUT lock on the client side,
+ * or enqueues it if it doesn't have one in cache.
+ *
+ * This function will not hold layout lock so it may be revoked any time after
+ * this function returns. Any operations depend on layout should be redone
+ * in that case.
+ *
+ * This function should be called before lov_io_init() to get an uptodate
+ * layout version, the caller should save the version number and after IO
+ * is finished, this function should be called again to verify that layout
+ * is not changed during IO time.
+ */
+int ll_layout_refresh(struct inode *inode, __u32 *gen)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data = NULL;
+ struct ptlrpc_request *req = NULL;
+ struct lookup_intent it = { .it_op = IT_LAYOUT };
+ struct lustre_handle lockh;
+ ldlm_mode_t mode;
+ struct cl_object_conf conf = { .coc_inode = inode,
+ .coc_validate_only = true };
+ int rc;
+ ENTRY;
+
+ *gen = 0;
+ if (!(ll_i2sbi(inode)->ll_flags & LL_SBI_LAYOUT_LOCK))
+ RETURN(0);
+
+ /* sanity checks */
+ LASSERT(fid_is_sane(ll_inode2fid(inode)));
+ LASSERT(S_ISREG(inode->i_mode));
+
+ /* mostly layout lock is caching on the local side, so try to match
+ * it before grabbing layout lock mutex. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh);
+ if (mode != 0) { /* hit cached lock */
+ struct lov_stripe_md *lsm;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm != NULL)
+ *gen = lsm->lsm_layout_gen;
+ ccc_inode_lsm_put(inode, lsm);
+ ldlm_lock_decref(&lockh, mode);
+
+ RETURN(0);
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ RETURN(PTR_ERR(op_data));
+
+ /* take layout lock mutex to enqueue layout lock exclusively. */
+ cfs_mutex_lock(&lli->lli_layout_mutex);
+
+ /* make sure the old conf goes away */
+ ll_layout_conf(inode, &conf);
+
+ /* enqueue layout lock */
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0,
+ &req, ll_md_blocking_ast, 0);
+ if (rc == 0) {
+ /* we get a new lock, so update the lock data */
+ lockh.cookie = it.d.lustre.it_lock_handle;
+ md_set_lock_data(sbi->ll_md_exp, &lockh.cookie, inode, NULL);
+
+ /* req == NULL is when lock was found in client cache, without
+ * any request to server (but lsm can be canceled just after a
+ * release) */
+ if (req != NULL) {
+ struct ldlm_lock *lock = ldlm_handle2lock(&lockh);
+ struct lustre_md md = { NULL };
+ void *lmm;
+ int lmmsize;
+
+ /* for IT_LAYOUT lock, lmm is returned in lock's lvb
+ * data via completion callback */
+ LASSERT(lock != NULL);
+ lmm = lock->l_lvb_data;
+ lmmsize = lock->l_lvb_len;
+ if (lmm != NULL)
+ rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
+ lmm, lmmsize);
+ if (rc == 0) {
+ if (md.lsm != NULL)
+ *gen = md.lsm->lsm_layout_gen;
+
+ memset(&conf, 0, sizeof conf);
+ conf.coc_inode = inode;
+ conf.u.coc_md = &md;
+ ll_layout_conf(inode, &conf);
+ /* is this racy? */
+ lli->lli_has_smd = md.lsm != NULL;
+ }
+ if (md.lsm != NULL)
+ obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
+
+ LDLM_LOCK_PUT(lock);
+ ptlrpc_req_finished(req);
+ } else { /* hit caching lock */
+ struct lov_stripe_md *lsm;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm != NULL)
+ *gen = lsm->lsm_layout_gen;
+ ccc_inode_lsm_put(inode, lsm);
+ }
+ ll_intent_drop_lock(&it);
+ }
+ cfs_mutex_unlock(&lli->lli_layout_mutex);
+ ll_finish_md_op_data(op_data);
+
+ RETURN(rc);
+}