* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include "llite_internal.h"
#include <lustre/ll_fiemap.h>
-/* also used by llite/special.c:ll_special_open() */
+#include "cl_object.h"
+
struct ll_file_data *ll_file_data_get(void)
{
struct ll_file_data *fd;
- OBD_SLAB_ALLOC_PTR(fd, ll_file_data_slab);
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
return fd;
}
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
op_data->op_attr_blocks = inode->i_blocks;
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = inode->i_flags;
+ ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
+ ll_inode_to_ext_flags(inode->i_flags);
op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
- memcpy(&op_data->op_handle, fh, sizeof(op_data->op_handle));
+ if (fh)
+ op_data->op_handle = *fh;
op_data->op_capa1 = ll_mdscapa_get(inode);
}
+/**
+ * Closes the IO epoch and packs all the attributes into @op_data for
+ * the CLOSE rpc.
+ */
static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle *och)
{
if (!(och->och_flags & FMODE_WRITE))
goto out;
- if (!(ll_i2mdexp(inode)->exp_connect_flags & OBD_CONNECT_SOM) ||
- !S_ISREG(inode->i_mode))
+ if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
else
- ll_epoch_close(inode, op_data, &och, 0);
+ ll_ioepoch_close(inode, op_data, &och, 0);
out:
ll_pack_inode2opdata(inode, op_data, &och->och_fh);
+ ll_prep_md_op_data(op_data, inode, NULL, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
EXIT;
}
struct ptlrpc_request *req = NULL;
struct obd_device *obd = class_exp2obd(exp);
int epoch_close = 1;
- int seq_end = 0, rc;
+ int rc;
ENTRY;
if (obd == NULL) {
GOTO(out, rc = 0);
}
- /*
- * here we check if this is forced umount. If so this is called on
- * canceling "open lock" and we do not call md_close() in this case, as
- * it will not be successful, as import is already deactivated.
- */
- if (obd->obd_force)
- GOTO(out, rc = 0);
-
OBD_ALLOC_PTR(op_data);
if (op_data == NULL)
GOTO(out, rc = -ENOMEM); // XXX We leak openhandle and request here.
ll_prepare_close(inode, op_data, och);
epoch_close = (op_data->op_flags & MF_EPOCH_CLOSE);
rc = md_close(md_exp, op_data, och->och_mod, &req);
- if (rc != -EAGAIN)
- seq_end = 1;
-
if (rc == -EAGAIN) {
/* This close must have the epoch closed. */
- LASSERT(exp->exp_connect_flags & OBD_CONNECT_SOM);
LASSERT(epoch_close);
/* MDS has instructed us to obtain Size-on-MDS attribute from
* OSTs and send setattr to back to MDS. */
- rc = ll_sizeonmds_update(inode, och->och_mod,
- &och->och_fh, op_data->op_ioepoch);
+ rc = ll_som_update(inode, op_data);
if (rc) {
CERROR("inode %lu mdc Size-on-MDS update failed: "
"rc = %d\n", inode->i_ino, rc);
EXIT;
out:
- if ((exp->exp_connect_flags & OBD_CONNECT_SOM) && !epoch_close &&
+ if (exp_connect_som(exp) && !epoch_close &&
S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
ll_queue_done_writing(inode, LLIF_DONE_WRITING);
} else {
- if (seq_end)
- ptlrpc_close_replay_seq(req);
md_clear_open_replay_data(md_exp, och);
/* Free @och if it is not waiting for DONE_WRITING. */
och->och_fh.cookie = DEAD_HANDLE_MAGIC;
och_usecount = &lli->lli_open_fd_read_count;
}
- down(&lli->lli_och_sem);
+ cfs_down(&lli->lli_och_sem);
if (*och_usecount) { /* There are still users of this handle, so
skip freeing it. */
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
RETURN(0);
}
och=*och_p;
*och_p = NULL;
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
if (och) { /* There might be a race and somebody have freed this och
already */
ENTRY;
/* clear group lock, if present */
- if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
- rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP,
- &fd->fd_cwlockh);
- }
+ if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
+ ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
/* Let's see if we have good enough OPEN lock on the file and if
we can skip talking to MDS */
struct inode *inode = file->f_dentry->d_inode;
ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
- down(&lli->lli_och_sem);
+ cfs_down(&lli->lli_och_sem);
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
LASSERT(lli->lli_open_fd_read_count);
lli->lli_open_fd_read_count--;
}
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode,
struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm = lli->lli_smd;
int rc;
-
ENTRY;
+
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
/* The last ref on @file, maybe not the the owner pid of statahead.
* Different processes can open the same dir, "ll_opendir_key" means:
* it is me that should stop the statahead thread. */
- if (lli->lli_opendir_key == fd)
- ll_stop_statahead(inode, fd);
+ if (lli->lli_opendir_key == fd && lli->lli_opendir_pid != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
if (inode->i_sb->s_root == file->f_dentry) {
LUSTRE_FPRIVATE(file) = NULL;
lli->lli_async_rc = 0;
rc = ll_md_close(sbi->ll_md_exp, inode, file);
+
+ if (OBD_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, obd_fail_val))
+ libcfs_debug_dumplog();
+
RETURN(rc);
}
GOTO(out, rc);
}
- if (itp->d.lustre.it_lock_mode)
+ rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL);
+ if (!rc && itp->d.lustre.it_lock_mode)
md_set_lock_data(sbi->ll_md_exp,
&itp->d.lustre.it_lock_handle,
- file->f_dentry->d_inode);
+ file->f_dentry->d_inode, NULL);
- rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL);
out:
ptlrpc_req_finished(itp->d.lustre.it_data);
it_clear_disposition(itp, DISP_ENQ_COMPLETE);
RETURN(rc);
}
+/**
+ * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
+ * not believe attributes if a few ioepoch holders exist. Attributes for
+ * previous ioepoch if new one is opened are also skipped by MDS.
+ */
+void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
+{
+ if (ioepoch && lli->lli_ioepoch != ioepoch) {
+ lli->lli_ioepoch = ioepoch;
+ CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID"\n",
+ ioepoch, PFID(&lli->lli_fid));
+ }
+}
+
static int ll_och_fill(struct obd_export *md_exp, struct ll_inode_info *lli,
struct lookup_intent *it, struct obd_client_handle *och)
{
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
och->och_fid = lli->lli_fid;
och->och_flags = it->it_flags;
- lli->lli_ioepoch = body->ioepoch;
+ ll_ioepoch_open(lli, body->ioepoch);
return md_set_open_replay_data(md_exp, och, req);
}
if (fd == NULL)
RETURN(-ENOMEM);
+ fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
- spin_lock(&lli->lli_lock);
- /* "lli->lli_opendir_pid != 0" means someone has set it.
- * "lli->lli_sai != NULL" means the previous statahead has not
- * been cleanup. */
- if (lli->lli_opendir_pid == 0 && lli->lli_sai == NULL) {
- opendir_set = 1;
- lli->lli_opendir_pid = cfs_curproc_pid();
- lli->lli_opendir_key = fd;
- } else if (unlikely(lli->lli_opendir_pid == cfs_curproc_pid())) {
- /* Two cases for this:
- * (1) The same process open such directory many times.
- * (2) The old process opened the directory, and exited
- * before its children processes. Then new process
- * with the same pid opens such directory before the
- * old process's children processes exit.
- * Change the owner to the latest one. */
- opendir_set = 2;
+ cfs_spin_lock(&lli->lli_sa_lock);
+ if (lli->lli_opendir_key == NULL && lli->lli_opendir_pid == 0) {
+ LASSERT(lli->lli_sai == NULL);
lli->lli_opendir_key = fd;
+ lli->lli_opendir_pid = cfs_curproc_pid();
+ opendir_set = 1;
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_sa_lock);
}
if (inode->i_sb->s_root == file->f_dentry) {
* already? XXX - NFS implications? */
oit.it_flags &= ~O_EXCL;
+ /* bug20584, if "it_flags" contains O_CREAT, the file will be
+ * created if necessary, then "IT_CREAT" should be set to keep
+ * consistent with it */
+ if (oit.it_flags & O_CREAT)
+ oit.it_op |= IT_CREAT;
+
it = &oit;
}
och_usecount = &lli->lli_open_fd_read_count;
}
- down(&lli->lli_och_sem);
+ cfs_down(&lli->lli_och_sem);
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
let's close it somehow. This will decref request. */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
ll_file_data_put(fd);
GOTO(out_openerr, rc);
}
rc = ll_local_open(file, it, fd, NULL);
if (rc) {
(*och_usecount)--;
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
ll_file_data_put(fd);
GOTO(out_openerr, rc);
}
could be cancelled, and since blocking ast handler
would attempt to grab och_sem as well, that would
result in a deadlock */
- up(&lli->lli_och_sem);
- it->it_flags |= O_CHECK_STALE;
+ cfs_up(&lli->lli_och_sem);
+ it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
- it->it_flags &= ~O_CHECK_STALE;
+ it->it_create_mode &= ~M_CHECK_STALE;
if (rc) {
ll_file_data_put(fd);
GOTO(out_openerr, rc);
req = it->d.lustre.it_data;
ptlrpc_req_finished(req);
}
- md_set_lock_data(ll_i2sbi(inode)->ll_md_exp,
- &it->d.lustre.it_lock_handle,
- file->f_dentry->d_inode);
goto restart;
}
OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
GOTO(out_och_free, rc);
}
}
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
/* Must do this outside lli_och_sem lock to prevent deadlock where
different kind of OPEN lock for this same inode gets cancelled
*och_p = NULL; /* OBD_FREE writes some magic there */
(*och_usecount)--;
}
- up(&lli->lli_och_sem);
+ cfs_up(&lli->lli_och_sem);
out_openerr:
- if (opendir_set == 1) {
- lli->lli_opendir_key = NULL;
- lli->lli_opendir_pid = 0;
- } else if (unlikely(opendir_set == 2)) {
- ll_stop_statahead(inode, fd);
- }
+ if (opendir_set != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
}
return rc;
}
-/* Fills the obdo with the attributes for the inode defined by lsm */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo)
+/* Fills the obdo with the attributes for the lsm */
+static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
+ struct obd_capa *capa, struct obdo *obdo,
+ __u64 ioepoch, int sync)
{
struct ptlrpc_request_set *set;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
+ struct obd_info oinfo = { { { 0 } } };
+ int rc;
- struct obd_info oinfo = { { { 0 } } };
- int rc;
ENTRY;
LASSERT(lsm != NULL);
oinfo.oi_md = lsm;
oinfo.oi_oa = obdo;
oinfo.oi_oa->o_id = lsm->lsm_object_id;
- oinfo.oi_oa->o_gr = lsm->lsm_object_gr;
+ oinfo.oi_oa->o_seq = lsm->lsm_object_seq;
oinfo.oi_oa->o_mode = S_IFREG;
+ oinfo.oi_oa->o_ioepoch = ioepoch;
oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLGROUP;
- oinfo.oi_capa = ll_mdscapa_get(inode);
+ OBD_MD_FLGROUP | OBD_MD_FLEPOCH;
+ oinfo.oi_capa = capa;
+ if (sync) {
+ oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
+ oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
+ }
set = ptlrpc_prep_set();
if (set == NULL) {
CERROR("can't allocate ptlrpc set\n");
rc = -ENOMEM;
} else {
- rc = obd_getattr_async(ll_i2dtexp(inode), &oinfo, set);
+ rc = obd_getattr_async(exp, &oinfo, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
}
- capa_put(oinfo.oi_capa);
- if (rc)
- RETURN(rc);
-
- oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE);
-
- obdo_refresh_inode(inode, oinfo.oi_oa, oinfo.oi_oa->o_valid);
- CDEBUG(D_INODE, "objid "LPX64" size %Lu, blocks %llu, blksize %lu\n",
- lli->lli_smd->lsm_object_id, i_size_read(inode),
- (unsigned long long)inode->i_blocks,
- (unsigned long)ll_inode_blksize(inode));
- RETURN(0);
-}
-
-static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct obd_export *exp = ll_i2dtexp(inode);
- struct {
- char name[16];
- struct ldlm_lock *lock;
- } key = { .name = KEY_LOCK_TO_STRIPE, .lock = lock };
- __u32 stripe, vallen = sizeof(stripe);
- struct lov_oinfo *loinfo;
- int rc;
- ENTRY;
-
- if (lsm->lsm_stripe_count == 1)
- GOTO(check, stripe = 0);
-
- /* get our offset in the lov */
- rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe, lsm);
- if (rc != 0) {
- CERROR("obd_get_info: rc = %d\n", rc);
- RETURN(rc);
- }
- LASSERT(stripe < lsm->lsm_stripe_count);
-
-check:
- loinfo = lsm->lsm_oinfo[stripe];
- if (!osc_res_name_eq(loinfo->loi_id, loinfo->loi_gr,
- &lock->l_resource->lr_name)){
- LDLM_ERROR(lock, "resource doesn't match object "LPU64"/"LPU64,
- loinfo->loi_id, loinfo->loi_gr);
- RETURN(-ELDLM_NO_LOCK_DATA);
- }
-
- RETURN(stripe);
-}
-
-/* Get extra page reference to ensure it is not going away */
-void ll_pin_extent_cb(void *data)
-{
- struct page *page = data;
-
- page_cache_get(page);
-
- return;
+ if (rc == 0)
+ oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME | OBD_MD_FLSIZE);
+ RETURN(rc);
}
-/* Flush the page from page cache for an extent as its canceled.
- * Page to remove is delivered as @data.
- *
- * No one can dirty the extent until we've finished our work and they cannot
- * enqueue another lock. The DLM protects us from ll_file_read/write here,
- * but other kernel actors could have pages locked.
- *
- * If @discard is set, there is no need to write the page if it is dirty.
- *
- * Called with the DLM lock held. */
-int ll_page_removal_cb(void *data, int discard)
-{
+/**
+ * Performs the getattr on the inode and updates its fields.
+ * If @sync != 0, perform the getattr under the server-side lock.
+ */
+int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
+ __u64 ioepoch, int sync)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *capa = ll_mdscapa_get(inode);
int rc;
- struct page *page = data;
- struct address_space *mapping;
-
- ENTRY;
-
- /* We have page reference already from ll_pin_page */
- lock_page(page);
-
- /* Already truncated by somebody */
- if (!page->mapping)
- GOTO(out, rc = 0);
- mapping = page->mapping;
-
- ll_teardown_mmaps(mapping,
- (__u64)page->index << PAGE_CACHE_SHIFT,
- ((__u64)page->index<<PAGE_CACHE_SHIFT)|
- ~PAGE_CACHE_MASK);
- LL_CDEBUG_PAGE(D_PAGE, page, "removing page\n");
-
- if (!discard && clear_page_dirty_for_io(page)) {
- LASSERT(page->mapping);
- rc = ll_call_writepage(page->mapping->host, page);
- /* either waiting for io to complete or reacquiring
- * the lock that the failed writepage released */
- lock_page(page);
- wait_on_page_writeback(page);
- if (rc != 0) {
- CERROR("writepage inode %lu(%p) of page %p "
- "failed: %d\n", mapping->host->i_ino,
- mapping->host, page, rc);
- if (rc == -ENOSPC)
- set_bit(AS_ENOSPC, &mapping->flags);
- else
- set_bit(AS_EIO, &mapping->flags);
- }
- set_bit(AS_EIO, &mapping->flags);
- }
- if (page->mapping != NULL) {
- struct ll_async_page *llap = llap_cast_private(page);
- /* checking again to account for writeback's lock_page() */
- LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
- if (llap)
- ll_ra_accounting(llap, page->mapping);
- ll_truncate_complete_page(page);
- }
- EXIT;
-out:
- LASSERT(!PageWriteback(page));
- unlock_page(page);
- page_cache_release(page);
-
- return 0;
-}
-
-int ll_extent_lock_cancel_cb(struct ldlm_lock *lock, struct ldlm_lock_desc *new,
- void *data, int flag)
-{
- struct inode *inode;
- struct ll_inode_info *lli;
- struct lov_stripe_md *lsm;
- int stripe;
- __u64 kms;
-
- ENTRY;
-
- if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
- LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
- LBUG();
- }
-
- inode = ll_inode_from_lock(lock);
- if (inode == NULL)
- RETURN(0);
- lli = ll_i2info(inode);
- if (lli == NULL)
- GOTO(iput, 0);
- if (lli->lli_smd == NULL)
- GOTO(iput, 0);
- lsm = lli->lli_smd;
-
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- GOTO(iput, 0);
-
- lov_stripe_lock(lsm);
- lock_res_and_lock(lock);
- kms = ldlm_extent_shift_kms(lock,
- lsm->lsm_oinfo[stripe]->loi_kms);
-
- if (lsm->lsm_oinfo[stripe]->loi_kms != kms)
- LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
- lsm->lsm_oinfo[stripe]->loi_kms, kms);
- lsm->lsm_oinfo[stripe]->loi_kms = kms;
- unlock_res_and_lock(lock);
- lov_stripe_unlock(lsm);
- ll_queue_done_writing(inode, 0);
- EXIT;
-iput:
- iput(inode);
-
- return 0;
-}
-
-#if 0
-int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
-{
- /* XXX ALLOCATE - 160 bytes */
- struct inode *inode = ll_inode_from_lock(lock);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lustre_handle lockh = { 0 };
- struct ost_lvb *lvb;
- int stripe;
- ENTRY;
-
- if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV)) {
- LBUG(); /* not expecting any blocked async locks yet */
- LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
- "lock, returning");
- ldlm_lock_dump(D_OTHER, lock, 0);
- ldlm_reprocess_all(lock->l_resource);
- RETURN(0);
- }
-
- LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
-
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- goto iput;
-
- if (lock->l_lvb_len) {
- struct lov_stripe_md *lsm = lli->lli_smd;
- __u64 kms;
- lvb = lock->l_lvb_data;
- lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
-
- lock_res_and_lock(lock);
- ll_inode_size_lock(inode, 1);
- kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
- kms = ldlm_extent_shift_kms(NULL, kms);
- if (lsm->lsm_oinfo[stripe].loi_kms != kms)
- LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
- lsm->lsm_oinfo[stripe].loi_kms, kms);
- lsm->lsm_oinfo[stripe].loi_kms = kms;
- ll_inode_size_unlock(inode, 1);
- unlock_res_and_lock(lock);
- }
-
-iput:
- iput(inode);
- wake_up(&lock->l_waitq);
-
- ldlm_lock2handle(lock, &lockh);
- ldlm_lock_decref(&lockh, LCK_PR);
- RETURN(0);
-}
-#endif
-
-static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
-{
- struct ptlrpc_request *req = reqp;
- struct inode *inode = ll_inode_from_lock(lock);
- struct ll_inode_info *lli;
- struct lov_stripe_md *lsm;
- struct ost_lvb *lvb;
- int rc, stripe;
ENTRY;
- if (inode == NULL)
- GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
- lli = ll_i2info(inode);
- if (lli == NULL)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
- lsm = lli->lli_smd;
- if (lsm == NULL)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
-
- /* First, find out which stripe index this lock corresponds to. */
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
-
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc) {
- CERROR("lustre_pack_reply: %d\n", rc);
- GOTO(iput, rc);
+ rc = ll_lsm_getattr(lli->lli_smd, ll_i2dtexp(inode),
+ capa, obdo, ioepoch, sync);
+ capa_put(capa);
+ if (rc == 0) {
+ obdo_refresh_inode(inode, obdo, obdo->o_valid);
+ CDEBUG(D_INODE,
+ "objid "LPX64" size %Lu, blocks %llu, blksize %lu\n",
+ lli->lli_smd->lsm_object_id, i_size_read(inode),
+ (unsigned long long)inode->i_blocks,
+ (unsigned long)ll_inode_blksize(inode));
}
-
- lvb = req_capsule_server_get(&req->rq_pill, &RMF_DLM_LVB);
- lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe]->loi_kms;
- lvb->lvb_mtime = LTIME_S(inode->i_mtime);
- lvb->lvb_atime = LTIME_S(inode->i_atime);
- lvb->lvb_ctime = LTIME_S(inode->i_ctime);
-
- LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64
- " atime "LPU64", mtime "LPU64", ctime "LPU64,
- i_size_read(inode), stripe, lvb->lvb_size, lvb->lvb_mtime,
- lvb->lvb_atime, lvb->lvb_ctime);
- iput:
- iput(inode);
-
- out:
- /* These errors are normal races, so we don't want to fill the console
- * with messages by calling ptlrpc_error() */
- if (rc == -ELDLM_NO_LOCK_DATA)
- lustre_pack_reply(req, 1, NULL, NULL);
-
- req->rq_status = rc;
- return rc;
+ RETURN(rc);
}
-static int ll_merge_lvb(struct inode *inode)
+int ll_merge_lvb(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
ll_inode_size_lock(inode, 1);
inode_init_lvb(inode, &lvb);
+
+ /* merge timestamps the most resently obtained from mds with
+ timestamps obtained from osts */
+ lvb.lvb_atime = lli->lli_lvb.lvb_atime;
+ lvb.lvb_mtime = lli->lli_lvb.lvb_mtime;
+ lvb.lvb_ctime = lli->lli_lvb.lvb_ctime;
rc = obd_merge_lvb(sbi->ll_dt_exp, lli->lli_smd, &lvb, 0);
- i_size_write(inode, lvb.lvb_size);
+ cl_isize_write_nolock(inode, lvb.lvb_size);
+
+ CDEBUG(D_VFSTRACE, DFID" updating i_size "LPU64"\n",
+ PFID(&lli->lli_fid), lvb.lvb_size);
inode->i_blocks = lvb.lvb_blocks;
LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
RETURN(rc);
}
-int ll_local_size(struct inode *inode)
-{
- ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lustre_handle lockh = { 0 };
- int flags = 0;
- int rc;
- ENTRY;
-
- if (lli->lli_smd->lsm_stripe_count == 0)
- RETURN(0);
-
- rc = obd_match(sbi->ll_dt_exp, lli->lli_smd, LDLM_EXTENT,
- &policy, LCK_PR, &flags, inode, &lockh);
- if (rc < 0)
- RETURN(rc);
- else if (rc == 0)
- RETURN(-ENODATA);
-
- rc = ll_merge_lvb(inode);
- obd_cancel(sbi->ll_dt_exp, lli->lli_smd, LCK_PR, &lockh);
- RETURN(rc);
-}
-
int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
lstat_t *st)
{
- struct lustre_handle lockh = { 0 };
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- struct ost_lvb lvb;
+ struct obdo obdo = { 0 };
int rc;
- ENTRY;
-
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = LCK_PR;
- einfo.ei_cb_bl = osc_extent_blocking_cb;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = NULL;
-
- oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
- oinfo.oi_lockh = &lockh;
- oinfo.oi_md = lsm;
- oinfo.oi_flags = LDLM_FL_HAS_INTENT;
-
- rc = obd_enqueue_rqset(sbi->ll_dt_exp, &oinfo, &einfo);
- if (rc == -ENOENT)
- RETURN(rc);
- if (rc != 0) {
- CERROR("obd_enqueue returned rc %d, "
- "returning -EIO\n", rc);
- RETURN(rc > 0 ? -EIO : rc);
- }
-
- lov_stripe_lock(lsm);
- memset(&lvb, 0, sizeof(lvb));
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 0);
- st->st_size = lvb.lvb_size;
- st->st_blocks = lvb.lvb_blocks;
- st->st_mtime = lvb.lvb_mtime;
- st->st_atime = lvb.lvb_atime;
- st->st_ctime = lvb.lvb_ctime;
- lov_stripe_unlock(lsm);
-
- RETURN(rc);
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, &obdo, 0, 0);
+ if (rc == 0) {
+ st->st_size = obdo.o_size;
+ st->st_blocks = obdo.o_blocks;
+ st->st_mtime = obdo.o_mtime;
+ st->st_atime = obdo.o_atime;
+ st->st_ctime = obdo.o_ctime;
+ }
+ return rc;
}
-/* NB: obd_merge_lvb will prefer locally cached writes if they extend the
- * file (because it prefers KMS over RSS when larger) */
-int ll_glimpse_size(struct inode *inode, int ast_flags)
+void ll_io_init(struct cl_io *io, const struct file *file, int write)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lustre_handle lockh = { 0 };
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- int rc;
- ENTRY;
-
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- RETURN(0);
-
- CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
-
- if (!lli->lli_smd) {
- CDEBUG(D_DLMTRACE, "No objects for inode %lu\n", inode->i_ino);
- RETURN(0);
- }
+ struct inode *inode = file->f_dentry->d_inode;
- /* NOTE: this looks like DLM lock request, but it may not be one. Due
- * to LDLM_FL_HAS_INTENT flag, this is glimpse request, that
- * won't revoke any conflicting DLM locks held. Instead,
- * ll_glimpse_callback() will be called on each client
- * holding a DLM lock against this file, and resulting size
- * will be returned for each stripe. DLM lock on [0, EOF] is
- * acquired only if there were no conflicting locks. */
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = LCK_PR;
- einfo.ei_cb_bl = osc_extent_blocking_cb;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = inode;
-
- oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
- oinfo.oi_lockh = &lockh;
- oinfo.oi_md = lli->lli_smd;
- oinfo.oi_flags = ast_flags | LDLM_FL_HAS_INTENT;
-
- rc = obd_enqueue_rqset(sbi->ll_dt_exp, &oinfo, &einfo);
- if (rc == -ENOENT)
- RETURN(rc);
- if (rc != 0) {
- CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
- RETURN(rc > 0 ? -EIO : rc);
+ memset(io, 0, sizeof *io);
+ io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
+ if (write)
+ io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_lockreq = CILR_MAYBE;
+ if (ll_file_nolock(file)) {
+ io->ci_lockreq = CILR_NEVER;
+ io->ci_no_srvlock = 1;
+ } else if (file->f_flags & O_APPEND) {
+ io->ci_lockreq = CILR_MANDATORY;
}
-
- rc = ll_merge_lvb(inode);
-
- CDEBUG(D_DLMTRACE, "glimpse: size: %llu, blocks: %llu\n",
- i_size_read(inode), (unsigned long long)inode->i_blocks);
-
- RETURN(rc);
}
-int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm, int mode,
- ldlm_policy_data_t *policy, struct lustre_handle *lockh,
- int ast_flags)
+static ssize_t ll_file_io_generic(const struct lu_env *env,
+ struct vvp_io_args *args, struct file *file,
+ enum cl_io_type iot, loff_t *ppos, size_t count)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ost_lvb lvb;
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- int rc;
+ struct cl_io *io;
+ ssize_t result;
ENTRY;
- LASSERT(!lustre_handle_is_used(lockh));
- LASSERT(lsm != NULL);
-
- /* don't drop the mmapped file to LRU */
- if (mapping_mapped(inode->i_mapping))
- ast_flags |= LDLM_FL_NO_LRU;
-
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
-
- CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
- inode->i_ino, policy->l_extent.start, policy->l_extent.end);
-
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = mode;
- einfo.ei_cb_bl = osc_extent_blocking_cb;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = inode;
-
- oinfo.oi_policy = *policy;
- oinfo.oi_lockh = lockh;
- oinfo.oi_md = lsm;
- oinfo.oi_flags = ast_flags;
-
- rc = obd_enqueue(sbi->ll_dt_exp, &oinfo, &einfo, NULL);
- *policy = oinfo.oi_policy;
- if (rc > 0)
- rc = -EIO;
-
- ll_inode_size_lock(inode, 1);
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 1);
-
- if (policy->l_extent.start == 0 &&
- policy->l_extent.end == OBD_OBJECT_EOF) {
- /* vmtruncate()->ll_truncate() first sets the i_size and then
- * the kms under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size from the kms before the truncating path has
- * updated the kms. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order. */
- i_size_write(inode, lvb.lvb_size);
- CDEBUG(D_INODE, "inode=%lu, updating i_size %llu\n",
- inode->i_ino, i_size_read(inode));
+ io = &ccc_env_info(env)->cti_io;
+ ll_io_init(io, file, iot == CIT_WRITE);
+
+ if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
+ struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ int write_sem_locked = 0;
+
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ vio->cui_io_subtype = args->via_io_subtype;
+
+ switch (vio->cui_io_subtype) {
+ case IO_NORMAL:
+ cio->cui_iov = args->u.normal.via_iov;
+ cio->cui_nrsegs = args->u.normal.via_nrsegs;
+ cio->cui_tot_nrsegs = cio->cui_nrsegs;
+#ifndef HAVE_FILE_WRITEV
+ cio->cui_iocb = args->u.normal.via_iocb;
+#endif
+ if ((iot == CIT_WRITE) &&
+ !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ if(cfs_down_interruptible(&lli->lli_write_sem))
+ GOTO(out, result = -ERESTARTSYS);
+ write_sem_locked = 1;
+ } else if (iot == CIT_READ) {
+ cfs_down_read(&lli->lli_trunc_sem);
+ }
+ break;
+ case IO_SENDFILE:
+ vio->u.sendfile.cui_actor = args->u.sendfile.via_actor;
+ vio->u.sendfile.cui_target = args->u.sendfile.via_target;
+ break;
+ case IO_SPLICE:
+ vio->u.splice.cui_pipe = args->u.splice.via_pipe;
+ vio->u.splice.cui_flags = args->u.splice.via_flags;
+ break;
+ default:
+ CERROR("Unknow IO type - %u\n", vio->cui_io_subtype);
+ LBUG();
+ }
+ result = cl_io_loop(env, io);
+ if (write_sem_locked)
+ cfs_up(&lli->lli_write_sem);
+ else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
+ cfs_up_read(&lli->lli_trunc_sem);
+ } else {
+ /* cl_io_rw_init() handled IO */
+ result = io->ci_result;
}
- if (rc == 0) {
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
+ if (io->ci_nob > 0) {
+ result = io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos;
}
- ll_inode_size_unlock(inode, 1);
-
- RETURN(rc);
+ GOTO(out, result);
+out:
+ cl_io_fini(env, io);
+ return result;
}
-int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm, int mode,
- struct lustre_handle *lockh)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
- ENTRY;
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
+/*
+ * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
+ */
+static int ll_file_get_iov_count(const struct iovec *iov,
+ unsigned long *nr_segs, size_t *count)
+{
+ size_t cnt = 0;
+ unsigned long seg;
- rc = obd_cancel(sbi->ll_dt_exp, lsm, mode, lockh);
+ for (seg = 0; seg < *nr_segs; seg++) {
+ const struct iovec *iv = &iov[seg];
- RETURN(rc);
+ /*
+ * If any segment has a negative length, or the cumulative
+ * length ever wraps negative then return -EINVAL.
+ */
+ cnt += iv->iov_len;
+ if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
+ return -EINVAL;
+ if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
+ continue;
+ if (seg == 0)
+ return -EFAULT;
+ *nr_segs = seg;
+ cnt -= iv->iov_len; /* This segment is no good */
+ break;
+ }
+ *count = cnt;
+ return 0;
}
-static void ll_set_file_contended(struct inode *inode)
+#ifdef HAVE_FILE_READV
+static ssize_t ll_file_readv(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- cfs_time_t now = cfs_time_current();
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
+ ENTRY;
- spin_lock(&lli->lli_lock);
- lli->lli_contention_time = now;
- lli->lli_flags |= LLIF_CONTENDED;
- spin_unlock(&lli->lli_lock);
-}
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
-void ll_clear_file_contended(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_CONTENDED;
- spin_unlock(&lli->lli_lock);
-}
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
-static int ll_is_file_contended(struct file *file)
-{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- ENTRY;
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
- if (!(sbi->ll_lco.lco_flags & OBD_CONNECT_SRVLOCK)) {
- CDEBUG(D_INFO, "the server does not support SRVLOCK feature,"
- " osc connect flags = 0x"LPX64"\n",
- sbi->ll_lco.lco_flags);
- RETURN(0);
- }
- if (fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK))
- RETURN(1);
- if (lli->lli_flags & LLIF_CONTENDED) {
- cfs_time_t cur_time = cfs_time_current();
- cfs_time_t retry_time;
-
- retry_time = cfs_time_add(
- lli->lli_contention_time,
- cfs_time_seconds(sbi->ll_contention_time));
- if (cfs_time_after(cur_time, retry_time)) {
- ll_clear_file_contended(inode);
- RETURN(0);
- }
- RETURN(1);
- }
- RETURN(0);
+ result = ll_file_io_generic(env, args, file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-static int ll_file_get_tree_lock(struct ll_lock_tree *tree, struct file *file,
- const char *buf, size_t count,
- loff_t start, loff_t end, int rw)
+static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
{
- int append;
- int tree_locked = 0;
- int rc;
- struct inode * inode = file->f_dentry->d_inode;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ ssize_t result;
+ int refcheck;
ENTRY;
- append = (rw == OBD_BRW_WRITE) && (file->f_flags & O_APPEND);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- if (append || !ll_is_file_contended(file)) {
- struct ll_lock_tree_node *node;
- int ast_flags;
-
- ast_flags = append ? 0 : LDLM_FL_DENY_ON_CONTENTION;
- if (file->f_flags & O_NONBLOCK)
- ast_flags |= LDLM_FL_BLOCK_NOWAIT;
- node = ll_node_from_inode(inode, start, end,
- (rw == OBD_BRW_WRITE) ? LCK_PW : LCK_PR);
- if (IS_ERR(node)) {
- rc = PTR_ERR(node);
- GOTO(out, rc);
- }
- tree->lt_fd = LUSTRE_FPRIVATE(file);
- rc = ll_tree_lock(tree, node, buf, count, ast_flags);
- if (rc == 0)
- tree_locked = 1;
- else if (rc == -EUSERS)
- ll_set_file_contended(inode);
- else
- GOTO(out, rc);
- }
- RETURN(tree_locked);
-out:
- return rc;
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ result = ll_file_readv(file, local_iov, 1, ppos);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/**
- * Checks if requested extent lock is compatible with a lock under a page.
- *
- * Checks if the lock under \a page is compatible with a read or write lock
- * (specified by \a rw) for an extent [\a start , \a end].
- *
- * \param page the page under which lock is considered
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param start start of the requested extent
- * \param end end of the requested extent
- * \param cookie transparent parameter for passing locking context
- *
- * \post result == 1, *cookie == context, appropriate lock is referenced or
- * \post result == 0
- *
- * \retval 1 owned lock is reused for the request
- * \retval 0 no lock reused for the request
- *
- * \see ll_release_short_lock
- */
-static int ll_reget_short_lock(struct page *page, int rw,
- obd_off start, obd_off end,
- void **cookie)
-{
- struct ll_async_page *llap;
- struct obd_export *exp;
- struct inode *inode = page->mapping->host;
-
+#else
+static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- exp = ll_i2dtexp(inode);
- if (exp == NULL)
- RETURN(0);
-
- llap = llap_cast_private(page);
- if (llap == NULL)
- RETURN(0);
-
- RETURN(obd_reget_short_lock(exp, ll_i2info(inode)->lli_smd,
- &llap->llap_cookie, rw, start, end,
- cookie));
-}
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
-/**
- * Releases a reference to a lock taken in a "fast" way.
- *
- * Releases a read or a write (specified by \a rw) lock
- * referenced by \a cookie.
- *
- * \param inode inode to which data belong
- * \param end end of the locked extent
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param cookie transparent parameter for passing locking context
- *
- * \post appropriate lock is dereferenced
- *
- * \see ll_reget_short_lock
- */
-static void ll_release_short_lock(struct inode *inode, obd_off end,
- void *cookie, int rw)
-{
- struct obd_export *exp;
- int rc;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- exp = ll_i2dtexp(inode);
- if (exp == NULL)
- return;
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iocb = iocb;
- rc = obd_release_short_lock(exp, ll_i2info(inode)->lli_smd, end,
- cookie, rw);
- if (rc < 0)
- CERROR("unlock failed (%d)\n", rc);
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/**
- * Checks if requested extent lock is compatible
- * with a lock under a page in page cache.
- *
- * Checks if a lock under some \a page is compatible with a read or write lock
- * (specified by \a rw) for an extent [\a start , \a end].
- *
- * \param file the file under which lock is considered
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param ppos start of the requested extent
- * \param end end of the requested extent
- * \param cookie transparent parameter for passing locking context
- * \param buf userspace buffer for the data
- *
- * \post result == 1, *cookie == context, appropriate lock is referenced
- * \post retuls == 0
- *
- * \retval 1 owned lock is reused for the request
- * \retval 0 no lock reused for the request
- *
- * \see ll_file_put_fast_lock
- */
-static inline int ll_file_get_fast_lock(struct file *file,
- obd_off ppos, obd_off end,
- char *buf, void **cookie, int rw)
+static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
{
- int rc = 0;
- struct page *page;
-
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (!ll_region_mapped((unsigned long)buf, end - ppos)) {
- page = find_lock_page(file->f_dentry->d_inode->i_mapping,
- ppos >> CFS_PAGE_SHIFT);
- if (page) {
- if (ll_reget_short_lock(page, rw, ppos, end, cookie))
- rc = 1;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- unlock_page(page);
- page_cache_release(page);
- }
- }
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_left = count;
- RETURN(rc);
-}
+ result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
-/**
- * Releases a reference to a lock taken in a "fast" way.
- *
- * Releases a read or a write (specified by \a rw) lock
- * referenced by \a cookie.
- *
- * \param inode inode to which data belong
- * \param end end of the locked extent
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param cookie transparent parameter for passing locking context
- *
- * \post appropriate lock is dereferenced
- *
- * \see ll_file_get_fast_lock
- */
-static inline void ll_file_put_fast_lock(struct inode *inode, obd_off end,
- void *cookie, int rw)
-{
- ll_release_short_lock(inode, end, cookie, rw);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
+#endif
-enum ll_lock_style {
- LL_LOCK_STYLE_NOLOCK = 0,
- LL_LOCK_STYLE_FASTLOCK = 1,
- LL_LOCK_STYLE_TREELOCK = 2
-};
-
-/**
- * Checks if requested extent lock is compatible with a lock
- * under a page cache page.
- *
- * Checks if the lock under \a page is compatible with a read or write lock
- * (specified by \a rw) for an extent [\a start , \a end].
- *
- * \param file file under which I/O is processed
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param ppos start of the requested extent
- * \param end end of the requested extent
- * \param cookie transparent parameter for passing locking context
- * (only used with LL_LOCK_STYLE_FASTLOCK)
- * \param tree lock tree (only used with LL_LOCK_STYLE_TREELOCK)
- * \param buf userspace buffer for the data
- *
- * \retval LL_LOCK_STYLE_FASTLOCK owned lock is reused through fast lock
- * \retval LL_LOCK_STYLE_TREELOCK got a lock through tree lock
- * \retval LL_LOCK_STYLE_NOLOCK got no lock
- *
- * \see ll_file_put_lock
+/*
+ * Write to a file (through the page cache).
*/
-static inline int ll_file_get_lock(struct file *file, obd_off ppos,
- obd_off end, char *buf, void **cookie,
- struct ll_lock_tree *tree, int rw)
-{
- int rc;
-
+#ifdef HAVE_FILE_WRITEV
+static ssize_t ll_file_writev(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
+{
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (ll_file_get_fast_lock(file, ppos, end, buf, cookie, rw))
- RETURN(LL_LOCK_STYLE_FASTLOCK);
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
- rc = ll_file_get_tree_lock(tree, file, buf, ppos - end, ppos, end, rw);
- /* rc: 1 for tree lock, 0 for no lock, <0 for error */
- switch (rc) {
- case 1:
- RETURN(LL_LOCK_STYLE_TREELOCK);
- case 0:
- RETURN(LL_LOCK_STYLE_NOLOCK);
- }
-
- /* an error happened if we reached this point, rc = -errno here */
- RETURN(rc);
-}
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
-/**
- * Drops the lock taken by ll_file_get_lock.
- *
- * Releases a read or a write (specified by \a rw) lock
- * referenced by \a tree or \a cookie.
- *
- * \param inode inode to which data belong
- * \param end end of the locked extent
- * \param lockstyle facility through which the lock was taken
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param cookie transparent parameter for passing locking context
- * (only used with LL_LOCK_STYLE_FASTLOCK)
- * \param tree lock tree (only used with LL_LOCK_STYLE_TREELOCK)
- *
- * \post appropriate lock is dereferenced
- *
- * \see ll_file_get_lock
- */
-static inline void ll_file_put_lock(struct inode *inode, obd_off end,
- enum ll_lock_style lock_style,
- void *cookie, struct ll_lock_tree *tree,
- int rw)
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
-{
- switch (lock_style) {
- case LL_LOCK_STYLE_TREELOCK:
- ll_tree_unlock(tree);
- break;
- case LL_LOCK_STYLE_FASTLOCK:
- ll_file_put_fast_lock(inode, end, cookie, rw);
- break;
- default:
- CERROR("invalid locking style (%d)\n", lock_style);
- }
+ result = ll_file_io_generic(env, args, file, CIT_WRITE, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
- loff_t *ppos)
+static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
+ loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_lock_tree tree;
- struct ost_lvb lvb;
- struct ll_ra_read bead;
- int ra = 0;
- obd_off end;
- ssize_t retval, chunk, sum = 0;
- int lock_style;
- void *cookie;
-
- __u64 kms;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
- /* "If nbyte is 0, read() will return 0 and have no other results."
- * -- Single Unix Spec */
- if (count == 0)
- RETURN(0);
-
- ll_stats_ops_tally(sbi, LPROC_LL_READ_BYTES, count);
-
- if (!lsm) {
- /* Read on file with no objects should return zero-filled
- * buffers up to file size (we can get non-zero sizes with
- * mknod + truncate, then opening file for read. This is a
- * common pattern in NFS case, it seems). Bug 6243 */
- int notzeroed;
- /* Since there are no objects on OSTs, we have nothing to get
- * lock on and so we are forced to access inode->i_size
- * unguarded */
-
- /* Read beyond end of file */
- if (*ppos >= i_size_read(inode))
- RETURN(0);
-
- if (count > i_size_read(inode) - *ppos)
- count = i_size_read(inode) - *ppos;
- /* Make sure to correctly adjust the file pos pointer for
- * EFAULT case */
- notzeroed = clear_user(buf, count);
- count -= notzeroed;
- *ppos += count;
- if (!count)
- RETURN(-EFAULT);
- RETURN(count);
- }
-repeat:
- if (sbi->ll_max_rw_chunk != 0) {
- /* first, let's know the end of the current stripe */
- end = *ppos;
- obd_extent_calc(sbi->ll_dt_exp, lsm, OBD_CALC_STRIPE_END, &end);
- /* correct, the end is beyond the request */
- if (end > *ppos + count - 1)
- end = *ppos + count - 1;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- /* and chunk shouldn't be too large even if striping is wide */
- if (end - *ppos > sbi->ll_max_rw_chunk)
- end = *ppos + sbi->ll_max_rw_chunk - 1;
- } else {
- end = *ppos + count - 1;
- }
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
- lock_style = ll_file_get_lock(file, (obd_off)(*ppos), end,
- buf, &cookie, &tree, OBD_BRW_READ);
- if (lock_style < 0)
- GOTO(out, retval = lock_style);
+ result = ll_file_writev(file, local_iov, 1, ppos);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
+}
- ll_inode_size_lock(inode, 1);
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being read and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock acquired by
- * ll_tree_lock() above, because to change class, other client has to
- * take DLM lock conflicting with our lock. Also, any updates to
- * ->i_size by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 1);
- kms = lvb.lvb_size;
- if (*ppos + count - 1 > kms) {
- /* A glimpse is necessary to determine whether we return a
- * short read (B) or some zeroes at the end of the buffer (C) */
- ll_inode_size_unlock(inode, 1);
- retval = ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
- if (retval) {
- if (lock_style != LL_LOCK_STYLE_NOLOCK)
- ll_file_put_lock(inode, end, lock_style,
- cookie, &tree, OBD_BRW_READ);
- goto out;
- }
- } else {
- /* region is within kms and, hence, within real file size (A).
- * We need to increase i_size to cover the read region so that
- * generic_file_read() will do its job, but that doesn't mean
- * the kms size is _correct_, it is only the _minimum_ size.
- * If someone does a stat they will get the correct size which
- * will always be >= the kms value here. b=11081 */
- if (i_size_read(inode) < kms)
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode, 1);
- }
-
- chunk = end - *ppos + 1;
- CDEBUG(D_INODE, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
- inode->i_ino, chunk, *ppos, i_size_read(inode));
-
- if (lock_style != LL_LOCK_STYLE_NOLOCK) {
- /* turn off the kernel's read-ahead */
- file->f_ra.ra_pages = 0;
-
- /* initialize read-ahead window once per syscall */
- if (ra == 0) {
- ra = 1;
- bead.lrr_start = *ppos >> CFS_PAGE_SHIFT;
- bead.lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- ll_ra_read_in(file, &bead);
- }
+#else /* AIO stuff */
+static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
+ ENTRY;
- /* BUG: 5972 */
- file_accessed(file);
- retval = generic_file_read(file, buf, chunk, ppos);
- ll_file_put_lock(inode, end, lock_style, cookie, &tree,
- OBD_BRW_READ);
- } else {
- retval = ll_file_lockless_io(file, buf, chunk, ppos, READ);
- }
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
- ll_rw_stats_tally(sbi, current->pid, file, chunk, 0);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- if (retval > 0) {
- buf += retval;
- count -= retval;
- sum += retval;
- if (retval == chunk && count > 0)
- goto repeat;
- }
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iocb = iocb;
- out:
- if (ra != 0)
- ll_ra_read_ex(file, &bead);
- retval = (sum > 0) ? sum : retval;
- RETURN(retval);
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/*
- * Write to a file (through the page cache).
- */
static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- struct ll_lock_tree tree;
- loff_t maxbytes = ll_file_maxbytes(inode);
- loff_t lock_start, lock_end, end;
- ssize_t retval, chunk, sum = 0;
- int tree_locked;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
-
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- /* POSIX, but surprised the VFS doesn't check this already */
- if (count == 0)
- RETURN(0);
-
- /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
- * called on the file, don't fail the below assertion (bug 2388). */
- if (file->f_flags & O_LOV_DELAY_CREATE &&
- ll_i2info(inode)->lli_smd == NULL)
- RETURN(-EBADF);
-
- LASSERT(ll_i2info(inode)->lli_smd != NULL);
-
- down(&ll_i2info(inode)->lli_write_sem);
-
-repeat:
- chunk = 0; /* just to fix gcc's warning */
- end = *ppos + count - 1;
-
- if (file->f_flags & O_APPEND) {
- lock_start = 0;
- lock_end = OBD_OBJECT_EOF;
- } else if (sbi->ll_max_rw_chunk != 0) {
- /* first, let's know the end of the current stripe */
- end = *ppos;
- obd_extent_calc(sbi->ll_dt_exp, lsm, OBD_CALC_STRIPE_END,
- (obd_off *)&end);
-
- /* correct, the end is beyond the request */
- if (end > *ppos + count - 1)
- end = *ppos + count - 1;
-
- /* and chunk shouldn't be too large even if striping is wide */
- if (end - *ppos > sbi->ll_max_rw_chunk)
- end = *ppos + sbi->ll_max_rw_chunk - 1;
- lock_start = *ppos;
- lock_end = end;
- } else {
- lock_start = *ppos;
- lock_end = *ppos + count - 1;
- }
-
- tree_locked = ll_file_get_tree_lock(&tree, file, buf, count,
- lock_start, lock_end, OBD_BRW_WRITE);
- if (tree_locked < 0)
- GOTO(out, retval = tree_locked);
-
- /* This is ok, g_f_w will overwrite this under i_sem if it races
- * with a local truncate, it just makes our maxbyte checking easier.
- * The i_size value gets updated in ll_extent_lock() as a consequence
- * of the [0,EOF] extent lock we requested above. */
- if (file->f_flags & O_APPEND) {
- *ppos = i_size_read(inode);
- end = *ppos + count - 1;
- }
-
- if (*ppos >= maxbytes) {
- send_sig(SIGXFSZ, current, 0);
- GOTO(out_unlock, retval = -EFBIG);
- }
- if (end > maxbytes - 1)
- end = maxbytes - 1;
-
- /* generic_file_write handles O_APPEND after getting i_mutex */
- chunk = end - *ppos + 1;
- CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
- inode->i_ino, chunk, *ppos);
- if (tree_locked)
- retval = generic_file_write(file, buf, chunk, ppos);
- else
- retval = ll_file_lockless_io(file, (char*)buf, chunk,
- ppos, WRITE);
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid, file, chunk, 1);
-
-out_unlock:
- if (tree_locked)
- ll_tree_unlock(&tree);
-
-out:
- if (retval > 0) {
- buf += retval;
- count -= retval;
- sum += retval;
- if (retval == chunk && count > 0)
- goto repeat;
- }
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_left = count;
- up(&ll_i2info(inode)->lli_write_sem);
+ result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
- retval = (sum > 0) ? sum : retval;
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
- retval > 0 ? retval : 0);
- RETURN(retval);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
+#endif
+
+#ifdef HAVE_KERNEL_SENDFILE
/*
* Send file content (through pagecache) somewhere with helper
*/
static ssize_t ll_file_sendfile(struct file *in_file, loff_t *ppos,size_t count,
read_actor_t actor, void *target)
{
- struct inode *inode = in_file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct ll_lock_tree tree;
- struct ll_lock_tree_node *node;
- struct ost_lvb lvb;
- struct ll_ra_read bead;
- int rc;
- ssize_t retval;
- __u64 kms;
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
- /* "If nbyte is 0, read() will return 0 and have no other results."
- * -- Single Unix Spec */
- if (count == 0)
- RETURN(0);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_READ_BYTES, count);
- /* turn off the kernel's read-ahead */
- in_file->f_ra.ra_pages = 0;
+ args = vvp_env_args(env, IO_SENDFILE);
+ args->u.sendfile.via_target = target;
+ args->u.sendfile.via_actor = actor;
- /* File with no objects, nothing to lock */
- if (!lsm)
- RETURN(generic_file_sendfile(in_file, ppos, count, actor, target));
-
- node = ll_node_from_inode(inode, *ppos, *ppos + count - 1, LCK_PR);
- if (IS_ERR(node))
- RETURN(PTR_ERR(node));
-
- tree.lt_fd = LUSTRE_FPRIVATE(in_file);
- rc = ll_tree_lock(&tree, node, NULL, count,
- in_file->f_flags & O_NONBLOCK?LDLM_FL_BLOCK_NOWAIT:0);
- if (rc != 0)
- RETURN(rc);
+ result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
+}
+#endif
- ll_clear_file_contended(inode);
- ll_inode_size_lock(inode, 1);
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being read and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock acquired by
- * ll_tree_lock() above, because to change class, other client has to
- * take DLM lock conflicting with our lock. Also, any updates to
- * ->i_size by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(ll_i2sbi(inode)->ll_dt_exp, lsm, &lvb, 1);
- kms = lvb.lvb_size;
- if (*ppos + count - 1 > kms) {
- /* A glimpse is necessary to determine whether we return a
- * short read (B) or some zeroes at the end of the buffer (C) */
- ll_inode_size_unlock(inode, 1);
- retval = ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
- if (retval)
- goto out;
- } else {
- /* region is within kms and, hence, within real file size (A) */
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode, 1);
- }
+#ifdef HAVE_KERNEL_SPLICE_READ
+/*
+ * Send file content (through pagecache) somewhere with helper
+ */
+static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t count,
+ unsigned int flags)
+{
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ ssize_t result;
+ int refcheck;
+ ENTRY;
- CDEBUG(D_INFO, "Send ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
- inode->i_ino, count, *ppos, i_size_read(inode));
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- bead.lrr_start = *ppos >> CFS_PAGE_SHIFT;
- bead.lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- ll_ra_read_in(in_file, &bead);
- /* BUG: 5972 */
- file_accessed(in_file);
- retval = generic_file_sendfile(in_file, ppos, count, actor, target);
- ll_ra_read_ex(in_file, &bead);
+ args = vvp_env_args(env, IO_SPLICE);
+ args->u.splice.via_pipe = pipe;
+ args->u.splice.via_flags = flags;
- out:
- ll_tree_unlock(&tree);
- RETURN(retval);
+ result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
+#endif
-static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
- unsigned long arg)
+static int ll_lov_recreate(struct inode *inode, obd_id id, obd_seq seq,
+ obd_count ost_idx)
{
- struct ll_inode_info *lli = ll_i2info(inode);
struct obd_export *exp = ll_i2dtexp(inode);
- struct ll_recreate_obj ucreatp;
struct obd_trans_info oti = { 0 };
struct obdo *oa = NULL;
int lsm_size;
struct lov_stripe_md *lsm, *lsm2;
ENTRY;
- if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
-
- rc = copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
- sizeof(struct ll_recreate_obj));
- if (rc) {
- RETURN(-EFAULT);
- }
OBDO_ALLOC(oa);
if (oa == NULL)
RETURN(-ENOMEM);
- down(&lli->lli_size_sem);
- lsm = lli->lli_smd;
+ ll_inode_size_lock(inode, 0);
+ lsm = ll_i2info(inode)->lli_smd;
if (lsm == NULL)
GOTO(out, rc = -ENOENT);
lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
if (lsm2 == NULL)
GOTO(out, rc = -ENOMEM);
- oa->o_id = ucreatp.lrc_id;
- oa->o_gr = ucreatp.lrc_group;
- oa->o_nlink = ucreatp.lrc_ost_idx;
+ oa->o_id = id;
+ oa->o_seq = seq;
+ oa->o_nlink = ost_idx;
oa->o_flags |= OBD_FL_RECREATE_OBJS;
oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
-
+ obdo_from_inode(oa, inode, &ll_i2info(inode)->lli_fid, OBD_MD_FLTYPE |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME);
memcpy(lsm2, lsm, lsm_size);
rc = obd_create(exp, oa, &lsm2, &oti);
OBD_FREE(lsm2, lsm_size);
GOTO(out, rc);
out:
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
OBDO_FREE(oa);
return rc;
}
+static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
+{
+ struct ll_recreate_obj ucreat;
+ ENTRY;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+
+ if (cfs_copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
+ sizeof(struct ll_recreate_obj)))
+ RETURN(-EFAULT);
+
+ RETURN(ll_lov_recreate(inode, ucreat.lrc_id, 0,
+ ucreat.lrc_ost_idx));
+}
+
+static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
+{
+ struct lu_fid fid;
+ obd_id id;
+ obd_count ost_idx;
+ ENTRY;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+
+ if (cfs_copy_from_user(&fid, (struct lu_fid *)arg,
+ sizeof(struct lu_fid)))
+ RETURN(-EFAULT);
+
+ id = fid_oid(&fid) | ((fid_seq(&fid) & 0xffff) << 32);
+ ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
+ RETURN(ll_lov_recreate(inode, id, 0, ost_idx));
+}
+
int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
int flags, struct lov_user_md *lum, int lum_size)
{
- struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
int rc = 0;
ENTRY;
- down(&lli->lli_size_sem);
- lsm = lli->lli_smd;
+ ll_inode_size_lock(inode, 0);
+ lsm = ll_i2info(inode)->lli_smd;
if (lsm) {
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
RETURN(-EEXIST);
ll_release_openhandle(file->f_dentry, &oit);
out:
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
ll_intent_release(&oit);
RETURN(rc);
out_req_free:
struct mdt_body *body;
struct lov_mds_md *lmm = NULL;
struct ptlrpc_request *req = NULL;
- struct obd_capa *oc;
+ struct md_op_data *op_data;
int rc, lmmsize;
rc = ll_get_max_mdsize(sbi, &lmmsize);
if (rc)
RETURN(rc);
- oc = ll_mdscapa_get(inode);
- rc = md_getattr_name(sbi->ll_md_exp, ll_inode2fid(inode),
- oc, filename, strlen(filename) + 1,
- OBD_MD_FLEASIZE | OBD_MD_FLDIREA, lmmsize,
- ll_i2suppgid(inode), &req);
- capa_put(oc);
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
+ strlen(filename), lmmsize,
+ LUSTRE_OPC_ANY, NULL);
+ if (op_data == NULL)
+ RETURN(-ENOMEM);
+
+ op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
+ rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
if (rc < 0) {
CDEBUG(D_INFO, "md_getattr_name failed "
"on %s: rc %d\n", filename, rc);
LASSERT(lmm != NULL);
if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3)) &&
- (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_JOIN))) {
+ (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
GOTO(out, rc = -EPROTO);
}
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v3 *)lmm)->lmm_objects,
((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
- } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_JOIN)) {
- lustre_swab_lov_user_md_join((struct lov_user_md_join *)lmm);
}
}
- if (lmm->lmm_magic == LOV_MAGIC_JOIN) {
- struct lov_stripe_md *lsm;
- struct lov_user_md_join *lmj;
- int lmj_size, i, aindex = 0;
-
- rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
- if (rc < 0)
- GOTO(out, rc = -ENOMEM);
- rc = obd_checkmd(sbi->ll_dt_exp, sbi->ll_md_exp, lsm);
- if (rc)
- GOTO(out_free_memmd, rc);
-
- lmj_size = sizeof(struct lov_user_md_join) +
- lsm->lsm_stripe_count *
- sizeof(struct lov_user_ost_data_join);
- OBD_ALLOC(lmj, lmj_size);
- if (!lmj)
- GOTO(out_free_memmd, rc = -ENOMEM);
-
- memcpy(lmj, lmm, sizeof(struct lov_user_md_join));
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- struct lov_extent *lex =
- &lsm->lsm_array->lai_ext_array[aindex];
-
- if (lex->le_loi_idx + lex->le_stripe_count <= i)
- aindex ++;
- CDEBUG(D_INFO, "aindex %d i %d l_extent_start "
- LPU64" len %d\n", aindex, i,
- lex->le_start, (int)lex->le_len);
- lmj->lmm_objects[i].l_extent_start =
- lex->le_start;
-
- if ((int)lex->le_len == -1)
- lmj->lmm_objects[i].l_extent_end = -1;
- else
- lmj->lmm_objects[i].l_extent_end =
- lex->le_start + lex->le_len;
- lmj->lmm_objects[i].l_object_id =
- lsm->lsm_oinfo[i]->loi_id;
- lmj->lmm_objects[i].l_object_gr =
- lsm->lsm_oinfo[i]->loi_gr;
- lmj->lmm_objects[i].l_ost_gen =
- lsm->lsm_oinfo[i]->loi_ost_gen;
- lmj->lmm_objects[i].l_ost_idx =
- lsm->lsm_oinfo[i]->loi_ost_idx;
- }
- lmm = (struct lov_mds_md *)lmj;
- lmmsize = lmj_size;
-out_free_memmd:
- obd_free_memmd(sbi->ll_dt_exp, &lsm);
- }
out:
*lmmp = lmm;
*lmm_size = lmmsize;
if (lump == NULL) {
RETURN(-ENOMEM);
}
- rc = copy_from_user(lump, (struct lov_user_md *)arg, lum_size);
- if (rc) {
+ if (cfs_copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
OBD_FREE(lump, lum_size);
RETURN(-EFAULT);
}
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
- rc = copy_from_user(lumv1, lumv1p, lum_size);
- if (rc)
+ if (cfs_copy_from_user(lumv1, lumv1p, lum_size))
RETURN(-EFAULT);
if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
lum_size = sizeof(struct lov_user_md_v3);
- rc = copy_from_user(&lumv3, lumv3p, lum_size);
- if (rc)
+ if (cfs_copy_from_user(&lumv3, lumv3p, lum_size))
RETURN(-EFAULT);
}
RETURN(rc);
}
-static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
-{
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
-
- if (!lsm)
- RETURN(-ENODATA);
-
- return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0, lsm,
- (void *)arg);
-}
-
-static int ll_get_grouplock(struct inode *inode, struct file *file,
- unsigned long arg)
-{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- ldlm_policy_data_t policy = { .l_extent = { .start = 0,
- .end = OBD_OBJECT_EOF}};
- struct lustre_handle lockh = { 0 };
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int flags = 0, rc;
- ENTRY;
-
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- RETURN(-EINVAL);
- }
-
- policy.l_extent.gid = arg;
- if (file->f_flags & O_NONBLOCK)
- flags = LDLM_FL_BLOCK_NOWAIT;
-
- rc = ll_extent_lock(fd, inode, lsm, LCK_GROUP, &policy, &lockh, flags);
- if (rc)
- RETURN(rc);
+static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
+{
+ struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- fd->fd_flags |= LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK;
- fd->fd_gid = arg;
- memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
+ if (!lsm)
+ RETURN(-ENODATA);
- RETURN(0);
+ return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0, lsm,
+ (void *)arg);
}
-static int ll_put_grouplock(struct inode *inode, struct file *file,
- unsigned long arg)
+int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int rc;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
+ int rc;
ENTRY;
- if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- /* Ugh, it's already unlocked. */
- RETURN(-EINVAL);
- }
+ if (ll_file_nolock(file))
+ RETURN(-EOPNOTSUPP);
- if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
+ cfs_spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ CWARN("group lock already existed with gid %lu\n",
+ fd->fd_grouplock.cg_gid);
+ cfs_spin_unlock(&lli->lli_lock);
RETURN(-EINVAL);
+ }
+ LASSERT(fd->fd_grouplock.cg_lock == NULL);
+ cfs_spin_unlock(&lli->lli_lock);
- fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
-
- rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
+ rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
RETURN(rc);
- fd->fd_gid = 0;
- memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
-
- RETURN(0);
-}
-
-#if LUSTRE_FIX >= 50
-static int join_sanity_check(struct inode *head, struct inode *tail)
-{
- ENTRY;
- if ((ll_i2sbi(head)->ll_flags & LL_SBI_JOIN) == 0) {
- CERROR("server do not support join \n");
- RETURN(-EINVAL);
- }
- if (!S_ISREG(tail->i_mode) || !S_ISREG(head->i_mode)) {
- CERROR("tail ino %lu and ino head %lu must be regular\n",
- head->i_ino, tail->i_ino);
- RETURN(-EINVAL);
- }
- if (head->i_ino == tail->i_ino) {
- CERROR("file %lu can not be joined to itself \n", head->i_ino);
- RETURN(-EINVAL);
- }
- if (i_size_read(head) % JOIN_FILE_ALIGN) {
- CERROR("hsize %llu must be times of 64K\n", i_size_read(head));
+ cfs_spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ cfs_spin_unlock(&lli->lli_lock);
+ CERROR("another thread just won the race\n");
+ cl_put_grouplock(&grouplock);
RETURN(-EINVAL);
}
+
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED;
+ fd->fd_grouplock = grouplock;
+ cfs_spin_unlock(&lli->lli_lock);
+
+ CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
RETURN(0);
}
-static int join_file(struct inode *head_inode, struct file *head_filp,
- struct file *tail_filp)
+int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct dentry *tail_dentry = tail_filp->f_dentry;
- struct lookup_intent oit = {.it_op = IT_OPEN,
- .it_flags = head_filp->f_flags|O_JOIN_FILE};
- struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_CW,
- ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL };
-
- struct lustre_handle lockh;
- struct md_op_data *op_data;
- int rc;
- loff_t data;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
ENTRY;
- tail_dentry = tail_filp->f_dentry;
-
- data = i_size_read(head_inode);
- op_data = ll_prep_md_op_data(NULL, head_inode,
- tail_dentry->d_parent->d_inode,
- tail_dentry->d_name.name,
- tail_dentry->d_name.len, 0,
- LUSTRE_OPC_ANY, &data);
- if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
-
- rc = md_enqueue(ll_i2mdexp(head_inode), &einfo, &oit,
- op_data, &lockh, NULL, 0, NULL, 0);
-
- ll_finish_md_op_data(op_data);
- if (rc < 0)
- GOTO(out, rc);
-
- rc = oit.d.lustre.it_status;
-
- if (rc < 0 || it_open_error(DISP_OPEN_OPEN, &oit)) {
- rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, &oit);
- ptlrpc_req_finished((struct ptlrpc_request *)
- oit.d.lustre.it_data);
- GOTO(out, rc);
+ cfs_spin_lock(&lli->lli_lock);
+ if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ cfs_spin_unlock(&lli->lli_lock);
+ CWARN("no group lock held\n");
+ RETURN(-EINVAL);
}
+ LASSERT(fd->fd_grouplock.cg_lock != NULL);
- if (oit.d.lustre.it_lock_mode) { /* If we got lock - release it right
- * away */
- ldlm_lock_decref(&lockh, oit.d.lustre.it_lock_mode);
- oit.d.lustre.it_lock_mode = 0;
+ if (fd->fd_grouplock.cg_gid != arg) {
+ CWARN("group lock %lu doesn't match current id %lu\n",
+ arg, fd->fd_grouplock.cg_gid);
+ cfs_spin_unlock(&lli->lli_lock);
+ RETURN(-EINVAL);
}
- ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
- it_clear_disposition(&oit, DISP_ENQ_COMPLETE);
- ll_release_openhandle(head_filp->f_dentry, &oit);
-out:
- ll_intent_release(&oit);
- RETURN(rc);
-}
-
-static int ll_file_join(struct inode *head, struct file *filp,
- char *filename_tail)
-{
- struct inode *tail = NULL, *first = NULL, *second = NULL;
- struct dentry *tail_dentry;
- struct file *tail_filp, *first_filp, *second_filp;
- struct ll_lock_tree first_tree, second_tree;
- struct ll_lock_tree_node *first_node, *second_node;
- struct ll_inode_info *hlli = ll_i2info(head), *tlli;
- int rc = 0, cleanup_phase = 0;
- ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:head=%lu/%u(%p) tail %s\n",
- head->i_ino, head->i_generation, head, filename_tail);
-
- tail_filp = filp_open(filename_tail, O_WRONLY, 0644);
- if (IS_ERR(tail_filp)) {
- CERROR("Can not open tail file %s", filename_tail);
- rc = PTR_ERR(tail_filp);
- GOTO(cleanup, rc);
- }
- tail = igrab(tail_filp->f_dentry->d_inode);
-
- tlli = ll_i2info(tail);
- tail_dentry = tail_filp->f_dentry;
- LASSERT(tail_dentry);
- cleanup_phase = 1;
-
- /*reorder the inode for lock sequence*/
- first = head->i_ino > tail->i_ino ? head : tail;
- second = head->i_ino > tail->i_ino ? tail : head;
- first_filp = head->i_ino > tail->i_ino ? filp : tail_filp;
- second_filp = head->i_ino > tail->i_ino ? tail_filp : filp;
-
- CDEBUG(D_INFO, "reorder object from %lu:%lu to %lu:%lu \n",
- head->i_ino, tail->i_ino, first->i_ino, second->i_ino);
- first_node = ll_node_from_inode(first, 0, OBD_OBJECT_EOF, LCK_EX);
- if (IS_ERR(first_node)){
- rc = PTR_ERR(first_node);
- GOTO(cleanup, rc);
- }
- first_tree.lt_fd = first_filp->private_data;
- rc = ll_tree_lock(&first_tree, first_node, NULL, 0, 0);
- if (rc != 0)
- GOTO(cleanup, rc);
- cleanup_phase = 2;
-
- second_node = ll_node_from_inode(second, 0, OBD_OBJECT_EOF, LCK_EX);
- if (IS_ERR(second_node)){
- rc = PTR_ERR(second_node);
- GOTO(cleanup, rc);
- }
- second_tree.lt_fd = second_filp->private_data;
- rc = ll_tree_lock(&second_tree, second_node, NULL, 0, 0);
- if (rc != 0)
- GOTO(cleanup, rc);
- cleanup_phase = 3;
-
- rc = join_sanity_check(head, tail);
- if (rc)
- GOTO(cleanup, rc);
+ grouplock = fd->fd_grouplock;
+ memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
+ fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
+ cfs_spin_unlock(&lli->lli_lock);
- rc = join_file(head, filp, tail_filp);
- if (rc)
- GOTO(cleanup, rc);
-cleanup:
- switch (cleanup_phase) {
- case 3:
- ll_tree_unlock(&second_tree);
- obd_cancel_unused(ll_i2dtexp(second),
- ll_i2info(second)->lli_smd, 0, NULL);
- case 2:
- ll_tree_unlock(&first_tree);
- obd_cancel_unused(ll_i2dtexp(first),
- ll_i2info(first)->lli_smd, 0, NULL);
- case 1:
- filp_close(tail_filp, 0);
- if (tail)
- iput(tail);
- if (head && rc == 0) {
- obd_free_memmd(ll_i2sbi(head)->ll_dt_exp,
- &hlli->lli_smd);
- hlli->lli_smd = NULL;
- }
- case 0:
- break;
- default:
- CERROR("invalid cleanup_phase %d\n", cleanup_phase);
- LBUG();
- }
- RETURN(rc);
+ cl_put_grouplock(&grouplock);
+ CDEBUG(D_INFO, "group lock %lu released\n", arg);
+ RETURN(0);
}
-#endif /* LUSTRE_FIX >= 50 */
/**
* Close inode open handle
* Get size for inode for which FIEMAP mapping is requested.
* Make the FIEMAP get_info call and returns the result.
*/
-int ll_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
int num_bytes)
{
struct obd_export *exp = ll_i2dtexp(inode);
int rc;
ENTRY;
+ /* Checks for fiemap flags */
+ if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
+ fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
+ return -EBADR;
+ }
+
+ /* Check for FIEMAP_FLAG_SYNC */
+ if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
+ rc = filemap_fdatawrite(inode->i_mapping);
+ if (rc)
+ return rc;
+ }
+
/* If the stripe_count > 1 and the application does not understand
* DEVICE_ORDER flag, then it cannot interpret the extents correctly.
*/
return -EOPNOTSUPP;
fm_key.oa.o_id = lsm->lsm_object_id;
- fm_key.oa.o_gr = lsm->lsm_object_gr;
+ fm_key.oa.o_seq = lsm->lsm_object_seq;
fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
- obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLFID | OBD_MD_FLGROUP |
+ obdo_from_inode(&fm_key.oa, inode, &ll_i2info(inode)->lli_fid,
OBD_MD_FLSIZE);
-
/* If filesize is 0, then there would be no objects for mapping */
if (fm_key.oa.o_size == 0) {
fiemap->fm_mapped_extents = 0;
RETURN(rc);
}
+int ll_fid2path(struct obd_export *exp, void *arg)
+{
+ struct getinfo_fid2path *gfout, *gfin;
+ int outsize, rc;
+ ENTRY;
+
+ /* Need to get the buflen */
+ OBD_ALLOC_PTR(gfin);
+ if (gfin == NULL)
+ RETURN(-ENOMEM);
+ if (cfs_copy_from_user(gfin, arg, sizeof(*gfin))) {
+ OBD_FREE_PTR(gfin);
+ RETURN(-EFAULT);
+ }
+
+ outsize = sizeof(*gfout) + gfin->gf_pathlen;
+ OBD_ALLOC(gfout, outsize);
+ if (gfout == NULL) {
+ OBD_FREE_PTR(gfin);
+ RETURN(-ENOMEM);
+ }
+ memcpy(gfout, gfin, sizeof(*gfout));
+ OBD_FREE_PTR(gfin);
+
+ /* Call mdc_iocontrol */
+ rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
+ if (rc)
+ GOTO(gf_free, rc);
+ if (cfs_copy_to_user(arg, gfout, outsize))
+ rc = -EFAULT;
+
+gf_free:
+ OBD_FREE(gfout, outsize);
+ RETURN(rc);
+}
+
+static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
+{
+ struct ll_user_fiemap *fiemap_s;
+ size_t num_bytes, ret_bytes;
+ unsigned int extent_count;
+ int rc = 0;
+
+ /* Get the extent count so we can calculate the size of
+ * required fiemap buffer */
+ if (get_user(extent_count,
+ &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
+ RETURN(-EFAULT);
+ num_bytes = sizeof(*fiemap_s) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+
+ OBD_VMALLOC(fiemap_s, num_bytes);
+ if (fiemap_s == NULL)
+ RETURN(-ENOMEM);
+
+ /* get the fiemap value */
+ if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
+ sizeof(*fiemap_s)))
+ GOTO(error, rc = -EFAULT);
+
+ /* If fm_extent_count is non-zero, read the first extent since
+ * it is used to calculate end_offset and device from previous
+ * fiemap call. */
+ if (extent_count) {
+ if (copy_from_user(&fiemap_s->fm_extents[0],
+ (char __user *)arg + sizeof(*fiemap_s),
+ sizeof(struct ll_fiemap_extent)))
+ GOTO(error, rc = -EFAULT);
+ }
+
+ rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
+ if (rc)
+ GOTO(error, rc);
+
+ ret_bytes = sizeof(struct ll_user_fiemap);
+
+ if (extent_count != 0)
+ ret_bytes += (fiemap_s->fm_mapped_extents *
+ sizeof(struct ll_fiemap_extent));
+
+ if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+ rc = -EFAULT;
+
+error:
+ OBD_VFREE(fiemap_s, num_bytes);
+ RETURN(rc);
+}
+
int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
case LL_IOC_LOV_GETSTRIPE:
RETURN(ll_lov_getstripe(inode, arg));
case LL_IOC_RECREATE_OBJ:
- RETURN(ll_lov_recreate_obj(inode, file, arg));
- case EXT3_IOC_FIEMAP: {
- struct ll_user_fiemap *fiemap_s;
- size_t num_bytes, ret_bytes;
- unsigned int extent_count;
- int rc = 0;
-
- /* Get the extent count so we can calculate the size of
- * required fiemap buffer */
- if (get_user(extent_count,
- &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
- RETURN(-EFAULT);
- num_bytes = sizeof(*fiemap_s) + (extent_count *
- sizeof(struct ll_fiemap_extent));
- OBD_VMALLOC(fiemap_s, num_bytes);
- if (fiemap_s == NULL)
- RETURN(-ENOMEM);
-
- if (copy_from_user(fiemap_s,(struct ll_user_fiemap __user *)arg,
- sizeof(*fiemap_s)))
- GOTO(error, rc = -EFAULT);
-
- if (fiemap_s->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
- fiemap_s->fm_flags = fiemap_s->fm_flags &
- ~LUSTRE_FIEMAP_FLAGS_COMPAT;
- if (copy_to_user((char *)arg, fiemap_s,
- sizeof(*fiemap_s)))
- GOTO(error, rc = -EFAULT);
-
- GOTO(error, rc = -EBADR);
- }
-
- /* If fm_extent_count is non-zero, read the first extent since
- * it is used to calculate end_offset and device from previous
- * fiemap call. */
- if (extent_count) {
- if (copy_from_user(&fiemap_s->fm_extents[0],
- (char __user *)arg + sizeof(*fiemap_s),
- sizeof(struct ll_fiemap_extent)))
- GOTO(error, rc = -EFAULT);
- }
-
- if (fiemap_s->fm_flags & FIEMAP_FLAG_SYNC) {
- int rc;
-
- rc = filemap_fdatawrite(inode->i_mapping);
- if (rc)
- GOTO(error, rc);
- }
-
- rc = ll_fiemap(inode, fiemap_s, num_bytes);
- if (rc)
- GOTO(error, rc);
-
- ret_bytes = sizeof(struct ll_user_fiemap);
-
- if (extent_count != 0)
- ret_bytes += (fiemap_s->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent));
-
- if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
- rc = -EFAULT;
-
-error:
- OBD_VFREE(fiemap_s, num_bytes);
- RETURN(rc);
- }
- case EXT3_IOC_GETFLAGS:
- case EXT3_IOC_SETFLAGS:
+ RETURN(ll_lov_recreate_obj(inode, arg));
+ case LL_IOC_RECREATE_FID:
+ RETURN(ll_lov_recreate_fid(inode, arg));
+ case FSFILT_IOC_FIEMAP:
+ RETURN(ll_ioctl_fiemap(inode, arg));
+ case FSFILT_IOC_GETFLAGS:
+ case FSFILT_IOC_SETFLAGS:
RETURN(ll_iocontrol(inode, file, cmd, arg));
- case EXT3_IOC_GETVERSION_OLD:
- case EXT3_IOC_GETVERSION:
+ case FSFILT_IOC_GETVERSION_OLD:
+ case FSFILT_IOC_GETVERSION:
RETURN(put_user(inode->i_generation, (int *)arg));
- case LL_IOC_JOIN: {
-#if LUSTRE_FIX >= 50
- /* Allow file join in beta builds to allow debuggging */
- char *ftail;
- int rc;
-
- ftail = getname((const char *)arg);
- if (IS_ERR(ftail))
- RETURN(PTR_ERR(ftail));
- rc = ll_file_join(inode, file, ftail);
- putname(ftail);
- RETURN(rc);
-#else
- CWARN("file join is not supported in this version of Lustre\n");
- RETURN(-ENOTTY);
-#endif
- }
case LL_IOC_GROUP_LOCK:
RETURN(ll_get_grouplock(inode, file, arg));
case LL_IOC_GROUP_UNLOCK:
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
* network encode the arg field.
- case EXT3_IOC_SETVERSION_OLD:
- case EXT3_IOC_SETVERSION:
+ case FSFILT_IOC_SETVERSION_OLD:
+ case FSFILT_IOC_SETVERSION:
*/
case LL_IOC_FLUSHCTX:
RETURN(ll_flush_ctx(inode));
+ case LL_IOC_PATH2FID: {
+ if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
+ RETURN(-EFAULT);
+
+ RETURN(0);
+ }
+ case OBD_IOC_FID2PATH:
+ RETURN(ll_fid2path(ll_i2mdexp(inode), (void *)arg));
+
+ case LL_IOC_GET_MDTIDX: {
+ int mdtidx;
+
+ mdtidx = ll_get_mdt_idx(inode);
+ if (mdtidx < 0)
+ RETURN(mdtidx);
+
+ if (put_user((int)mdtidx, (int*)arg))
+ RETURN(-EFAULT);
+
+ RETURN(0);
+ }
+
default: {
int err;
loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
loff_t retval;
ENTRY;
retval = offset + ((origin == 2) ? i_size_read(inode) :
if (file->f_flags & O_NONBLOCK)
nonblock = LDLM_FL_BLOCK_NOWAIT;
- if (lsm != NULL) {
- rc = ll_glimpse_size(inode, nonblock);
- if (rc != 0)
- RETURN(rc);
- }
+ rc = cl_glimpse_size(inode);
+ if (rc != 0)
+ RETURN(rc);
- ll_inode_size_lock(inode, 0);
offset += i_size_read(inode);
- ll_inode_size_unlock(inode, 0);
} else if (origin == 1) { /* SEEK_CUR */
offset += file->f_pos;
}
if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
if (offset != file->f_pos) {
file->f_pos = offset;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- file->f_reada = 0;
- file->f_version = ++event;
-#endif
}
retval = offset;
}
RETURN(retval);
}
+#ifdef HAVE_FLUSH_OWNER_ID
+int ll_flush(struct file *file, fl_owner_t id)
+#else
+int ll_flush(struct file *file)
+#endif
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ int rc, err;
+
+ /* catch async errors that were recorded back when async writeback
+ * failed for pages in this mapping. */
+ rc = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
+ if (lsm) {
+ err = lov_test_and_clear_async_rc(lsm);
+ if (rc == 0)
+ rc = err;
+ }
+
+ return rc ? -EIO : 0;
+}
+
int ll_fsync(struct file *file, struct dentry *dentry, int data)
{
struct inode *inode = dentry->d_inode;
RETURN(rc ? rc : -ENOMEM);
oa->o_id = lsm->lsm_object_id;
- oa->o_gr = lsm->lsm_object_gr;
+ oa->o_seq = lsm->lsm_object_seq;
oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLGROUP);
+ obdo_from_inode(oa, inode, &ll_i2info(inode)->lli_fid,
+ OBD_MD_FLTYPE | OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLGROUP);
oc = ll_osscapa_get(inode, CAPA_OPC_OSS_WRITE);
err = obd_sync(ll_i2sbi(inode)->ll_dt_exp, oa, lsm,
break;
default:
CERROR("unknown fcntl lock type: %d\n", file_lock->fl_type);
- LBUG();
+ RETURN (-EINVAL);
}
switch (cmd) {
break;
default:
CERROR("unknown fcntl lock command: %d\n", cmd);
- LBUG();
+ RETURN (-EINVAL);
}
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
RETURN(-ENOSYS);
}
-int ll_have_md_lock(struct inode *inode, __u64 bits)
+int ll_have_md_lock(struct inode *inode, __u64 bits, ldlm_mode_t l_req_mode)
{
struct lustre_handle lockh;
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
+ ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
+ (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
struct lu_fid *fid;
int flags;
ENTRY;
RETURN(0);
fid = &ll_i2info(inode)->lli_fid;
- CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
+ CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
+ ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS, &policy,
- LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh)) {
+ mode, &lockh)) {
RETURN(1);
}
RETURN(0);
return 0;
}
-int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
+int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
+ __u64 ibits)
{
struct inode *inode = dentry->d_inode;
struct ptlrpc_request *req = NULL;
struct ll_sb_info *sbi;
struct obd_export *exp;
- int rc;
+ int rc = 0;
ENTRY;
if (!inode) {
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- oit.it_flags |= O_CHECK_STALE;
+ oit.it_create_mode |= M_CHECK_STALE;
rc = md_intent_lock(exp, op_data, NULL, 0,
/* we are not interested in name
based lookup */
&oit, 0, &req,
ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
- oit.it_flags &= ~O_CHECK_STALE;
+ oit.it_create_mode &= ~M_CHECK_STALE;
if (rc < 0) {
rc = ll_inode_revalidate_fini(inode, rc);
GOTO (out, rc);
here to preserve get_cwd functionality on 2.6.
Bug 10503 */
if (!dentry->d_inode->i_nlink) {
- spin_lock(&ll_lookup_lock);
+ cfs_spin_lock(&ll_lookup_lock);
spin_lock(&dcache_lock);
ll_drop_dentry(dentry);
spin_unlock(&dcache_lock);
- spin_unlock(&ll_lookup_lock);
+ cfs_spin_unlock(&ll_lookup_lock);
}
ll_lookup_finish_locks(&oit, dentry);
- } else if (!ll_have_md_lock(dentry->d_inode, MDS_INODELOCK_UPDATE |
- MDS_INODELOCK_LOOKUP)) {
+ } else if (!ll_have_md_lock(dentry->d_inode, ibits, LCK_MINMODE)) {
struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
obd_valid valid = OBD_MD_FLGETATTR;
- struct obd_capa *oc;
+ struct md_op_data *op_data;
int ealen = 0;
if (S_ISREG(inode->i_mode)) {
RETURN(rc);
valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE;
}
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
+ 0, ealen, LUSTRE_OPC_ANY,
+ NULL);
+ if (op_data == NULL)
+ RETURN(-ENOMEM);
+
+ op_data->op_valid = valid;
/* Once OBD_CONNECT_ATTRFID is not supported, we can't find one
* capa for this inode. Because we only keep capas of dirs
* fresh. */
- oc = ll_mdscapa_get(inode);
- rc = md_getattr(sbi->ll_md_exp, ll_inode2fid(inode), oc, valid,
- ealen, &req);
- capa_put(oc);
+ rc = md_getattr(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
if (rc) {
rc = ll_inode_revalidate_fini(inode, rc);
RETURN(rc);
}
rc = ll_prep_inode(&inode, req, NULL);
- if (rc)
- GOTO(out, rc);
}
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
+{
+ struct inode *inode = dentry->d_inode;
+ int rc;
+ ENTRY;
+
+ rc = __ll_inode_revalidate_it(dentry, it, MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LOOKUP);
/* if object not yet allocated, don't validate size */
- if (ll_i2info(inode)->lli_smd == NULL)
- GOTO(out, rc = 0);
+ if (rc == 0 && ll_i2info(dentry->d_inode)->lli_smd == NULL) {
+ LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
+ LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
+ LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
+ RETURN(0);
+ }
- /* ll_glimpse_size will prefer locally cached writes if they extend
+ /* cl_glimpse_size will prefer locally cached writes if they extend
* the file */
- rc = ll_glimpse_size(inode, 0);
- EXIT;
-out:
- ptlrpc_req_finished(req);
- return rc;
+
+ if (rc == 0)
+ rc = cl_glimpse_size(inode);
+
+ RETURN(rc);
}
int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
struct lookup_intent *it, struct kstat *stat)
{
struct inode *inode = de->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
int res = 0;
res = ll_inode_revalidate_it(de, it);
return res;
stat->dev = inode->i_sb->s_dev;
- stat->ino = inode->i_ino;
+ if (ll_need_32bit_api(ll_i2sbi(inode)))
+ stat->ino = cl_fid_build_ino32(&lli->lli_fid);
+ else
+ stat->ino = inode->i_ino;
+
stat->mode = inode->i_mode;
stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
stat->blksize = 1 << inode->i_blkbits;
#endif
- ll_inode_size_lock(inode, 0);
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
- ll_inode_size_unlock(inode, 0);
return 0;
}
return ll_getattr_it(mnt, de, &it, stat);
}
+#ifdef HAVE_LINUX_FIEMAP_H
+int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ __u64 start, __u64 len)
+{
+ int rc;
+ size_t num_bytes;
+ struct ll_user_fiemap *fiemap;
+ unsigned int extent_count = fieinfo->fi_extents_max;
+
+ num_bytes = sizeof(*fiemap) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+ OBD_VMALLOC(fiemap, num_bytes);
+
+ if (fiemap == NULL)
+ RETURN(-ENOMEM);
+
+ fiemap->fm_flags = fieinfo->fi_flags;
+ fiemap->fm_extent_count = fieinfo->fi_extents_max;
+ fiemap->fm_start = start;
+ fiemap->fm_length = len;
+ memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
+ sizeof(struct ll_fiemap_extent));
+
+ rc = ll_do_fiemap(inode, fiemap, num_bytes);
+
+ fieinfo->fi_flags = fiemap->fm_flags;
+ fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
+ memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
+ fiemap->fm_mapped_extents * sizeof(struct ll_fiemap_extent));
+
+ OBD_VFREE(fiemap, num_bytes);
+ return rc;
+}
+#endif
+
+
static
int lustre_check_acl(struct inode *inode, int mask)
{
int rc;
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
acl = posix_acl_dup(lli->lli_posix_acl);
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
if (!acl)
RETURN(-EAGAIN);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
+#ifndef HAVE_INODE_PERMISION_2ARGS
int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
+#else
+int ll_inode_permission(struct inode *inode, int mask)
+#endif
{
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), mask %o\n",
- inode->i_ino, inode->i_generation, inode, mask);
+ int rc = 0;
+ ENTRY;
+
+ /* as root inode are NOT getting validated in lookup operation,
+ * need to do it before permission check. */
+
+ if (inode == inode->i_sb->s_root->d_inode) {
+ struct lookup_intent it = { .it_op = IT_LOOKUP };
+
+ rc = __ll_inode_revalidate_it(inode->i_sb->s_root, &it,
+ MDS_INODELOCK_LOOKUP);
+ if (rc)
+ RETURN(rc);
+ }
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
+ inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
+
if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
return lustre_check_remote_perm(inode, mask);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
- return generic_permission(inode, mask, lustre_check_acl);
+ rc = generic_permission(inode, mask, lustre_check_acl);
+
+ RETURN(rc);
}
#else
int ll_inode_permission(struct inode *inode, int mask, struct nameidata *nd)
return -EROFS;
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
return -EACCES;
- if (current->fsuid == inode->i_uid) {
+ if (cfs_curproc_fsuid() == inode->i_uid) {
mode >>= 6;
} else if (1) {
if (((mode >> 3) & mask & S_IRWXO) != mask)
return rc;
} else {
check_groups:
- if (in_group_p(inode->i_gid))
+ if (cfs_curproc_is_in_groups(inode->i_gid))
mode >>= 3;
}
if ((mode & mask & S_IRWXO) == mask)
}
#endif
+#ifdef HAVE_FILE_READV
+#define READ_METHOD readv
+#define READ_FUNCTION ll_file_readv
+#define WRITE_METHOD writev
+#define WRITE_FUNCTION ll_file_writev
+#else
+#define READ_METHOD aio_read
+#define READ_FUNCTION ll_file_aio_read
+#define WRITE_METHOD aio_write
+#define WRITE_FUNCTION ll_file_aio_write
+#endif
+
/* -o localflock - only provides locally consistent flock locks */
struct file_operations ll_file_operations = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
+#ifdef HAVE_KERNEL_SENDFILE
.sendfile = ll_file_sendfile,
+#endif
+#ifdef HAVE_KERNEL_SPLICE_READ
+ .splice_read = ll_file_splice_read,
+#endif
.fsync = ll_fsync,
+ .flush = ll_flush
};
struct file_operations ll_file_operations_flock = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
+#ifdef HAVE_KERNEL_SENDFILE
.sendfile = ll_file_sendfile,
+#endif
+#ifdef HAVE_KERNEL_SPLICE_READ
+ .splice_read = ll_file_splice_read,
+#endif
.fsync = ll_fsync,
+ .flush = ll_flush,
#ifdef HAVE_F_OP_FLOCK
.flock = ll_file_flock,
#endif
/* These are for -o noflock - to return ENOSYS on flock calls */
struct file_operations ll_file_operations_noflock = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
+#ifdef HAVE_KERNEL_SENDFILE
.sendfile = ll_file_sendfile,
+#endif
+#ifdef HAVE_KERNEL_SPLICE_READ
+ .splice_read = ll_file_splice_read,
+#endif
.fsync = ll_fsync,
+ .flush = ll_flush,
#ifdef HAVE_F_OP_FLOCK
.flock = ll_file_noflock,
#endif
.getxattr = ll_getxattr,
.listxattr = ll_listxattr,
.removexattr = ll_removexattr,
+#ifdef HAVE_LINUX_FIEMAP_H
+ .fiemap = ll_fiemap,
+#endif
};
/* dynamic ioctl number support routins */
static struct llioc_ctl_data {
- struct rw_semaphore ioc_sem;
- struct list_head ioc_head;
+ cfs_rw_semaphore_t ioc_sem;
+ cfs_list_t ioc_head;
} llioc = {
__RWSEM_INITIALIZER(llioc.ioc_sem),
CFS_LIST_HEAD_INIT(llioc.ioc_head)
struct llioc_data {
- struct list_head iocd_list;
+ cfs_list_t iocd_list;
unsigned int iocd_size;
llioc_callback_t iocd_cb;
unsigned int iocd_count;
in_data->iocd_count = count;
memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
- down_write(&llioc.ioc_sem);
- list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- up_write(&llioc.ioc_sem);
+ cfs_down_write(&llioc.ioc_sem);
+ cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
+ cfs_up_write(&llioc.ioc_sem);
RETURN(in_data);
}
if (magic == NULL)
return;
- down_write(&llioc.ioc_sem);
- list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
+ cfs_down_write(&llioc.ioc_sem);
+ cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
if (tmp == magic) {
unsigned int size = tmp->iocd_size;
- list_del(&tmp->iocd_list);
- up_write(&llioc.ioc_sem);
+ cfs_list_del(&tmp->iocd_list);
+ cfs_up_write(&llioc.ioc_sem);
OBD_FREE(tmp, size);
return;
}
}
- up_write(&llioc.ioc_sem);
+ cfs_up_write(&llioc.ioc_sem);
CWARN("didn't find iocontrol register block with magic: %p\n", magic);
}
struct llioc_data *data;
int rc = -EINVAL, i;
- down_read(&llioc.ioc_sem);
- list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
+ cfs_down_read(&llioc.ioc_sem);
+ cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
for (i = 0; i < data->iocd_count; i++) {
if (cmd != data->iocd_cmd[i])
continue;
if (ret == LLIOC_STOP)
break;
}
- up_read(&llioc.ioc_sem);
+ cfs_up_read(&llioc.ioc_sem);
if (rcp)
*rcp = rc;