#include "llite_internal.h"
#include <lustre/ll_fiemap.h>
-/* also used by llite/special.c:ll_special_open() */
+#include "cl_object.h"
+
struct ll_file_data *ll_file_data_get(void)
{
struct ll_file_data *fd;
- OBD_SLAB_ALLOC_PTR(fd, ll_file_data_slab);
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
return fd;
}
struct ptlrpc_request *req = NULL;
struct obd_device *obd = class_exp2obd(exp);
int epoch_close = 1;
- int seq_end = 0, rc;
+ int rc;
ENTRY;
if (obd == NULL) {
ll_prepare_close(inode, op_data, och);
epoch_close = (op_data->op_flags & MF_EPOCH_CLOSE);
rc = md_close(md_exp, op_data, och->och_mod, &req);
- if (rc != -EAGAIN)
- seq_end = 1;
-
if (rc == -EAGAIN) {
/* This close must have the epoch closed. */
LASSERT(exp->exp_connect_flags & OBD_CONNECT_SOM);
LASSERT(epoch_close);
/* MDS has instructed us to obtain Size-on-MDS attribute from
* OSTs and send setattr to back to MDS. */
- rc = ll_sizeonmds_update(inode, och->och_mod,
- &och->och_fh, op_data->op_ioepoch);
+ rc = ll_sizeonmds_update(inode, &och->och_fh,
+ op_data->op_ioepoch);
if (rc) {
CERROR("inode %lu mdc Size-on-MDS update failed: "
"rc = %d\n", inode->i_ino, rc);
S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
ll_queue_done_writing(inode, LLIF_DONE_WRITING);
} else {
- if (seq_end)
- ptlrpc_close_replay_seq(req);
md_clear_open_replay_data(md_exp, och);
/* Free @och if it is not waiting for DONE_WRITING. */
och->och_fh.cookie = DEAD_HANDLE_MAGIC;
/* clear group lock, if present */
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+#if 0 /* XXX */
struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP,
&fd->fd_cwlockh);
+#endif
}
/* Let's see if we have good enough OPEN lock on the file and if
struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm = lli->lli_smd;
int rc;
-
ENTRY;
+
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
/* The last ref on @file, maybe not the the owner pid of statahead.
* Different processes can open the same dir, "ll_opendir_key" means:
* it is me that should stop the statahead thread. */
- if (lli->lli_opendir_key == fd)
- ll_stop_statahead(inode, fd);
+ if (lli->lli_opendir_key == fd && lli->lli_opendir_pid != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
if (inode->i_sb->s_root == file->f_dentry) {
LUSTRE_FPRIVATE(file) = NULL;
it_open_error(DISP_OPEN_OPEN, itp))
GOTO(out, rc);
ll_release_openhandle(file->f_dentry, itp);
- GOTO(out_stale, rc);
+ GOTO(out, rc);
}
if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL);
out:
ptlrpc_req_finished(itp->d.lustre.it_data);
-
-out_stale:
it_clear_disposition(itp, DISP_ENQ_COMPLETE);
ll_intent_drop_lock(itp);
if (fd == NULL)
RETURN(-ENOMEM);
+ fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
+again:
spin_lock(&lli->lli_lock);
- /* "lli->lli_opendir_pid != 0" means someone has set it.
- * "lli->lli_sai != NULL" means the previous statahead has not
- * been cleanup. */
- if (lli->lli_opendir_pid == 0 && lli->lli_sai == NULL) {
- opendir_set = 1;
- lli->lli_opendir_pid = cfs_curproc_pid();
+ if (lli->lli_opendir_key == NULL && lli->lli_opendir_pid == 0) {
+ LASSERT(lli->lli_sai == NULL);
lli->lli_opendir_key = fd;
- } else if (unlikely(lli->lli_opendir_pid == cfs_curproc_pid())) {
+ lli->lli_opendir_pid = cfs_curproc_pid();
+ opendir_set = 1;
+ } else if (unlikely(lli->lli_opendir_pid == cfs_curproc_pid() &&
+ lli->lli_opendir_key != NULL)) {
/* Two cases for this:
* (1) The same process open such directory many times.
* (2) The old process opened the directory, and exited
* before its children processes. Then new process
* with the same pid opens such directory before the
* old process's children processes exit.
- * Change the owner to the latest one. */
- opendir_set = 2;
- lli->lli_opendir_key = fd;
+ * reset stat ahead for such cases. */
+ spin_unlock(&lli->lli_lock);
+ CDEBUG(D_INFO, "Conflict statahead for %.*s "DFID
+ " reset it.\n", file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name,
+ PFID(&lli->lli_fid));
+ ll_stop_statahead(inode, lli->lli_opendir_key);
+ goto again;
}
spin_unlock(&lli->lli_lock);
}
would attempt to grab och_sem as well, that would
result in a deadlock */
up(&lli->lli_och_sem);
- it->it_flags |= O_CHECK_STALE;
+ it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
- it->it_flags &= ~O_CHECK_STALE;
+ it->it_create_mode &= ~M_CHECK_STALE;
if (rc) {
ll_file_data_put(fd);
GOTO(out_openerr, rc);
}
up(&lli->lli_och_sem);
out_openerr:
- if (opendir_set == 1) {
- lli->lli_opendir_key = NULL;
- lli->lli_opendir_pid = 0;
- } else if (unlikely(opendir_set == 2)) {
- ll_stop_statahead(inode, fd);
- }
+ if (opendir_set != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
}
return rc;
}
-/* Fills the obdo with the attributes for the inode defined by lsm */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo)
+/* Fills the obdo with the attributes for the lsm */
+static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
+ struct obd_capa *capa, struct obdo *obdo)
{
struct ptlrpc_request_set *set;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
+ struct obd_info oinfo = { { { 0 } } };
+ int rc;
- struct obd_info oinfo = { { { 0 } } };
- int rc;
ENTRY;
LASSERT(lsm != NULL);
OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
OBD_MD_FLGROUP;
- oinfo.oi_capa = ll_mdscapa_get(inode);
+ oinfo.oi_capa = capa;
set = ptlrpc_prep_set();
if (set == NULL) {
CERROR("can't allocate ptlrpc set\n");
rc = -ENOMEM;
} else {
- rc = obd_getattr_async(ll_i2dtexp(inode), &oinfo, set);
+ rc = obd_getattr_async(exp, &oinfo, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
}
- capa_put(oinfo.oi_capa);
- if (rc)
- RETURN(rc);
-
- oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE);
-
- obdo_refresh_inode(inode, oinfo.oi_oa, oinfo.oi_oa->o_valid);
- CDEBUG(D_INODE, "objid "LPX64" size %Lu, blocks %llu, blksize %lu\n",
- lli->lli_smd->lsm_object_id, i_size_read(inode),
- (unsigned long long)inode->i_blocks,
- (unsigned long)ll_inode_blksize(inode));
- RETURN(0);
-}
-
-static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct obd_export *exp = ll_i2dtexp(inode);
- struct {
- char name[16];
- struct ldlm_lock *lock;
- } key = { .name = KEY_LOCK_TO_STRIPE, .lock = lock };
- __u32 stripe, vallen = sizeof(stripe);
- struct lov_oinfo *loinfo;
- int rc;
- ENTRY;
-
- if (lsm->lsm_stripe_count == 1)
- GOTO(check, stripe = 0);
-
- /* get our offset in the lov */
- rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe, lsm);
- if (rc != 0) {
- CERROR("obd_get_info: rc = %d\n", rc);
- RETURN(rc);
- }
- LASSERT(stripe < lsm->lsm_stripe_count);
-
-check:
- loinfo = lsm->lsm_oinfo[stripe];
- if (!osc_res_name_eq(loinfo->loi_id, loinfo->loi_gr,
- &lock->l_resource->lr_name)){
- LDLM_ERROR(lock, "resource doesn't match object "LPU64"/"LPU64,
- loinfo->loi_id, loinfo->loi_gr);
- RETURN(-ELDLM_NO_LOCK_DATA);
- }
-
- RETURN(stripe);
-}
-
-/* Get extra page reference to ensure it is not going away */
-void ll_pin_extent_cb(void *data)
-{
- struct page *page = data;
-
- page_cache_get(page);
-
- return;
+ if (rc == 0)
+ oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME | OBD_MD_FLSIZE);
+ RETURN(rc);
}
-/* Flush the page from page cache for an extent as its canceled.
- * Page to remove is delivered as @data.
- *
- * No one can dirty the extent until we've finished our work and they cannot
- * enqueue another lock. The DLM protects us from ll_file_read/write here,
- * but other kernel actors could have pages locked.
- *
- * If @discard is set, there is no need to write the page if it is dirty.
- *
- * Called with the DLM lock held. */
-int ll_page_removal_cb(void *data, int discard)
+/* Fills the obdo with the attributes for the inode defined by lsm */
+int ll_inode_getattr(struct inode *inode, struct obdo *obdo)
{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *capa = ll_mdscapa_get(inode);
int rc;
- struct page *page = data;
- struct address_space *mapping;
-
- ENTRY;
-
- /* We have page reference already from ll_pin_page */
- lock_page(page);
-
- /* Already truncated by somebody */
- if (!page->mapping)
- GOTO(out, rc = 0);
- mapping = page->mapping;
-
- ll_teardown_mmaps(mapping,
- (__u64)page->index << PAGE_CACHE_SHIFT,
- ((__u64)page->index<<PAGE_CACHE_SHIFT)|
- ~PAGE_CACHE_MASK);
- LL_CDEBUG_PAGE(D_PAGE, page, "removing page\n");
-
- if (!discard && clear_page_dirty_for_io(page)) {
- LASSERT(page->mapping);
- rc = ll_call_writepage(page->mapping->host, page);
- /* either waiting for io to complete or reacquiring
- * the lock that the failed writepage released */
- lock_page(page);
- wait_on_page_writeback(page);
- if (rc != 0) {
- CERROR("writepage inode %lu(%p) of page %p "
- "failed: %d\n", mapping->host->i_ino,
- mapping->host, page, rc);
- if (rc == -ENOSPC)
- set_bit(AS_ENOSPC, &mapping->flags);
- else
- set_bit(AS_EIO, &mapping->flags);
- }
- set_bit(AS_EIO, &mapping->flags);
- }
- if (page->mapping != NULL) {
- struct ll_async_page *llap = llap_cast_private(page);
- /* checking again to account for writeback's lock_page() */
- LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
- if (llap)
- ll_ra_accounting(llap, page->mapping);
- ll_truncate_complete_page(page);
- }
- EXIT;
-out:
- LASSERT(!PageWriteback(page));
- unlock_page(page);
- page_cache_release(page);
-
- return 0;
-}
-
-int ll_extent_lock_cancel_cb(struct ldlm_lock *lock, struct ldlm_lock_desc *new,
- void *data, int flag)
-{
- struct inode *inode;
- struct ll_inode_info *lli;
- struct lov_stripe_md *lsm;
- int stripe;
- __u64 kms;
-
- ENTRY;
-
- if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
- LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
- LBUG();
- }
-
- inode = ll_inode_from_lock(lock);
- if (inode == NULL)
- RETURN(0);
- lli = ll_i2info(inode);
- if (lli == NULL)
- GOTO(iput, 0);
- if (lli->lli_smd == NULL)
- GOTO(iput, 0);
- lsm = lli->lli_smd;
-
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- GOTO(iput, 0);
-
- lov_stripe_lock(lsm);
- lock_res_and_lock(lock);
- kms = ldlm_extent_shift_kms(lock,
- lsm->lsm_oinfo[stripe]->loi_kms);
-
- if (lsm->lsm_oinfo[stripe]->loi_kms != kms)
- LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
- lsm->lsm_oinfo[stripe]->loi_kms, kms);
- lsm->lsm_oinfo[stripe]->loi_kms = kms;
- unlock_res_and_lock(lock);
- lov_stripe_unlock(lsm);
- ll_queue_done_writing(inode, 0);
- EXIT;
-iput:
- iput(inode);
-
- return 0;
-}
-
-#if 0
-int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
-{
- /* XXX ALLOCATE - 160 bytes */
- struct inode *inode = ll_inode_from_lock(lock);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lustre_handle lockh = { 0 };
- struct ost_lvb *lvb;
- int stripe;
- ENTRY;
-
- if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV)) {
- LBUG(); /* not expecting any blocked async locks yet */
- LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
- "lock, returning");
- ldlm_lock_dump(D_OTHER, lock, 0);
- ldlm_reprocess_all(lock->l_resource);
- RETURN(0);
- }
-
- LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
-
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- goto iput;
-
- if (lock->l_lvb_len) {
- struct lov_stripe_md *lsm = lli->lli_smd;
- __u64 kms;
- lvb = lock->l_lvb_data;
- lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
-
- lock_res_and_lock(lock);
- ll_inode_size_lock(inode, 1);
- kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
- kms = ldlm_extent_shift_kms(NULL, kms);
- if (lsm->lsm_oinfo[stripe].loi_kms != kms)
- LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
- lsm->lsm_oinfo[stripe].loi_kms, kms);
- lsm->lsm_oinfo[stripe].loi_kms = kms;
- ll_inode_size_unlock(inode, 1);
- unlock_res_and_lock(lock);
- }
-
-iput:
- iput(inode);
- wake_up(&lock->l_waitq);
-
- ldlm_lock2handle(lock, &lockh);
- ldlm_lock_decref(&lockh, LCK_PR);
- RETURN(0);
-}
-#endif
-
-static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
-{
- struct ptlrpc_request *req = reqp;
- struct inode *inode = ll_inode_from_lock(lock);
- struct ll_inode_info *lli;
- struct lov_stripe_md *lsm;
- struct ost_lvb *lvb;
- int rc, stripe;
ENTRY;
- if (inode == NULL)
- GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
- lli = ll_i2info(inode);
- if (lli == NULL)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
- lsm = lli->lli_smd;
- if (lsm == NULL)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
-
- /* First, find out which stripe index this lock corresponds to. */
- stripe = ll_lock_to_stripe_offset(inode, lock);
- if (stripe < 0)
- GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
-
- req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- sizeof(*lvb));
- rc = req_capsule_server_pack(&req->rq_pill);
- if (rc) {
- CERROR("lustre_pack_reply: %d\n", rc);
- GOTO(iput, rc);
+ rc = ll_lsm_getattr(lli->lli_smd, ll_i2dtexp(inode), capa, obdo);
+ capa_put(capa);
+ if (rc == 0) {
+ obdo_refresh_inode(inode, obdo, obdo->o_valid);
+ CDEBUG(D_INODE,
+ "objid "LPX64" size %Lu, blocks %llu, blksize %lu\n",
+ lli->lli_smd->lsm_object_id, i_size_read(inode),
+ (unsigned long long)inode->i_blocks,
+ (unsigned long)ll_inode_blksize(inode));
}
-
- lvb = req_capsule_server_get(&req->rq_pill, &RMF_DLM_LVB);
- lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe]->loi_kms;
- lvb->lvb_mtime = LTIME_S(inode->i_mtime);
- lvb->lvb_atime = LTIME_S(inode->i_atime);
- lvb->lvb_ctime = LTIME_S(inode->i_ctime);
-
- LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64
- " atime "LPU64", mtime "LPU64", ctime "LPU64,
- i_size_read(inode), stripe, lvb->lvb_size, lvb->lvb_mtime,
- lvb->lvb_atime, lvb->lvb_ctime);
- iput:
- iput(inode);
-
- out:
- /* These errors are normal races, so we don't want to fill the console
- * with messages by calling ptlrpc_error() */
- if (rc == -ELDLM_NO_LOCK_DATA)
- lustre_pack_reply(req, 1, NULL, NULL);
-
- req->rq_status = rc;
- return rc;
+ RETURN(rc);
}
-static int ll_merge_lvb(struct inode *inode)
+int ll_merge_lvb(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
RETURN(rc);
}
-int ll_local_size(struct inode *inode)
-{
- ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lustre_handle lockh = { 0 };
- int flags = 0;
- int rc;
- ENTRY;
-
- if (lli->lli_smd->lsm_stripe_count == 0)
- RETURN(0);
-
- rc = obd_match(sbi->ll_dt_exp, lli->lli_smd, LDLM_EXTENT,
- &policy, LCK_PR, &flags, inode, &lockh);
- if (rc < 0)
- RETURN(rc);
- else if (rc == 0)
- RETURN(-ENODATA);
-
- rc = ll_merge_lvb(inode);
- obd_cancel(sbi->ll_dt_exp, lli->lli_smd, LCK_PR, &lockh);
- RETURN(rc);
-}
-
int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
lstat_t *st)
{
- struct lustre_handle lockh = { 0 };
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- struct ost_lvb lvb;
+ struct obdo obdo = { 0 };
int rc;
- ENTRY;
-
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = LCK_PR;
- einfo.ei_cb_bl = osc_extent_blocking_cb;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = NULL;
-
- oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
- oinfo.oi_lockh = &lockh;
- oinfo.oi_md = lsm;
- oinfo.oi_flags = LDLM_FL_HAS_INTENT;
-
- rc = obd_enqueue_rqset(sbi->ll_dt_exp, &oinfo, &einfo);
- if (rc == -ENOENT)
- RETURN(rc);
- if (rc != 0) {
- CERROR("obd_enqueue returned rc %d, "
- "returning -EIO\n", rc);
- RETURN(rc > 0 ? -EIO : rc);
- }
-
- lov_stripe_lock(lsm);
- memset(&lvb, 0, sizeof(lvb));
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 0);
- st->st_size = lvb.lvb_size;
- st->st_blocks = lvb.lvb_blocks;
- st->st_mtime = lvb.lvb_mtime;
- st->st_atime = lvb.lvb_atime;
- st->st_ctime = lvb.lvb_ctime;
- lov_stripe_unlock(lsm);
-
- RETURN(rc);
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, &obdo);
+ if (rc == 0) {
+ st->st_size = obdo.o_size;
+ st->st_blocks = obdo.o_blocks;
+ st->st_mtime = obdo.o_mtime;
+ st->st_atime = obdo.o_atime;
+ st->st_ctime = obdo.o_ctime;
+ }
+ return rc;
}
-/* NB: obd_merge_lvb will prefer locally cached writes if they extend the
- * file (because it prefers KMS over RSS when larger) */
-int ll_glimpse_size(struct inode *inode, int ast_flags)
+void ll_io_init(struct cl_io *io, const struct file *file, int write)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lustre_handle lockh = { 0 };
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- int rc;
- ENTRY;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- RETURN(0);
+ LASSERT(fd != NULL);
+ memset(io, 0, sizeof *io);
+ io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
+ if (write)
+ io->u.ci_wr.wr_append = file->f_flags & O_APPEND;
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_lockreq = CILR_MAYBE;
+ if (fd->fd_flags & LL_FILE_IGNORE_LOCK || sbi->ll_flags & LL_SBI_NOLCK)
+ io->ci_lockreq = CILR_NEVER;
+ else if (file->f_flags & O_APPEND)
+ io->ci_lockreq = CILR_MANDATORY;
+}
+
+static ssize_t ll_file_io_generic(const struct lu_env *env,
+ struct ccc_io_args *args, struct file *file,
+ enum cl_io_type iot, loff_t *ppos, size_t count)
+{
+ struct cl_io *io;
+ ssize_t result;
+ ENTRY;
- CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
+ io = &ccc_env_info(env)->cti_io;
+ ll_io_init(io, file, iot == CIT_WRITE);
- if (!lli->lli_smd) {
- CDEBUG(D_DLMTRACE, "No objects for inode %lu\n", inode->i_ino);
- RETURN(0);
- }
+ if (iot == CIT_READ)
+ io->u.ci_rd.rd_is_sendfile = args->cia_is_sendfile;
- /* NOTE: this looks like DLM lock request, but it may not be one. Due
- * to LDLM_FL_HAS_INTENT flag, this is glimpse request, that
- * won't revoke any conflicting DLM locks held. Instead,
- * ll_glimpse_callback() will be called on each client
- * holding a DLM lock against this file, and resulting size
- * will be returned for each stripe. DLM lock on [0, EOF] is
- * acquired only if there were no conflicting locks. */
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = LCK_PR;
- einfo.ei_cb_bl = osc_extent_blocking_cb;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = inode;
-
- oinfo.oi_policy.l_extent.end = OBD_OBJECT_EOF;
- oinfo.oi_lockh = &lockh;
- oinfo.oi_md = lli->lli_smd;
- oinfo.oi_flags = ast_flags | LDLM_FL_HAS_INTENT;
-
- rc = obd_enqueue_rqset(sbi->ll_dt_exp, &oinfo, &einfo);
- if (rc == -ENOENT)
- RETURN(rc);
- if (rc != 0) {
- CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
- RETURN(rc > 0 ? -EIO : rc);
+ if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
+ if (cl_io_is_sendfile(io)) {
+ vio->u.read.cui_actor = args->cia_actor;
+ vio->u.read.cui_target = args->cia_target;
+ } else {
+ cio->cui_iov = args->cia_iov;
+ cio->cui_nrsegs = args->cia_nrsegs;
+#ifndef HAVE_FILE_WRITEV
+ cio->cui_iocb = args->cia_iocb;
+#endif
+ }
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ result = cl_io_loop(env, io);
+ } else
+ /* cl_io_rw_init() handled IO */
+ result = io->ci_result;
+ if (io->ci_nob > 0) {
+ result = io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos;
}
-
- rc = ll_merge_lvb(inode);
-
- CDEBUG(D_DLMTRACE, "glimpse: size: %llu, blocks: %llu\n",
- i_size_read(inode), (unsigned long long)inode->i_blocks);
-
- RETURN(rc);
+ cl_io_fini(env, io);
+ RETURN(result);
}
-int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm, int mode,
- ldlm_policy_data_t *policy, struct lustre_handle *lockh,
- int ast_flags)
-{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ost_lvb lvb;
- struct ldlm_enqueue_info einfo = { 0 };
- struct obd_info oinfo = { { { 0 } } };
- int rc;
- ENTRY;
-
- LASSERT(!lustre_handle_is_used(lockh));
- LASSERT(lsm != NULL);
-
- /* don't drop the mmapped file to LRU */
- if (mapping_mapped(inode->i_mapping))
- ast_flags |= LDLM_FL_NO_LRU;
-
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
- CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
- inode->i_ino, policy->l_extent.start, policy->l_extent.end);
-
- einfo.ei_type = LDLM_EXTENT;
- einfo.ei_mode = mode;
- einfo.ei_cb_bl = osc_extent_blocking_cb;
- einfo.ei_cb_cp = ldlm_completion_ast;
- einfo.ei_cb_gl = ll_glimpse_callback;
- einfo.ei_cbdata = inode;
-
- oinfo.oi_policy = *policy;
- oinfo.oi_lockh = lockh;
- oinfo.oi_md = lsm;
- oinfo.oi_flags = ast_flags;
-
- rc = obd_enqueue(sbi->ll_dt_exp, &oinfo, &einfo, NULL);
- *policy = oinfo.oi_policy;
- if (rc > 0)
- rc = -EIO;
+/*
+ * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
+ */
+static int ll_file_get_iov_count(const struct iovec *iov,
+ unsigned long *nr_segs, size_t *count)
+{
+ size_t cnt = 0;
+ unsigned long seg;
- ll_inode_size_lock(inode, 1);
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 1);
-
- if (policy->l_extent.start == 0 &&
- policy->l_extent.end == OBD_OBJECT_EOF) {
- /* vmtruncate()->ll_truncate() first sets the i_size and then
- * the kms under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size from the kms before the truncating path has
- * updated the kms. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order. */
- i_size_write(inode, lvb.lvb_size);
- CDEBUG(D_INODE, "inode=%lu, updating i_size %llu\n",
- inode->i_ino, i_size_read(inode));
- }
+ for (seg = 0; seg < *nr_segs; seg++) {
+ const struct iovec *iv = &iov[seg];
- if (rc == 0) {
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
+ /*
+ * If any segment has a negative length, or the cumulative
+ * length ever wraps negative then return -EINVAL.
+ */
+ cnt += iv->iov_len;
+ if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
+ return -EINVAL;
+ if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
+ continue;
+ if (seg == 0)
+ return -EFAULT;
+ *nr_segs = seg;
+ cnt -= iv->iov_len; /* This segment is no good */
+ break;
}
- ll_inode_size_unlock(inode, 1);
-
- RETURN(rc);
+ *count = cnt;
+ return 0;
}
-int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm, int mode,
- struct lustre_handle *lockh)
+#ifdef HAVE_FILE_READV
+static ssize_t ll_file_readv(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
-
- rc = obd_cancel(sbi->ll_dt_exp, lsm, mode, lockh);
-
- RETURN(rc);
-}
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
-static void ll_set_file_contended(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- cfs_time_t now = cfs_time_current();
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- spin_lock(&lli->lli_lock);
- lli->lli_contention_time = now;
- lli->lli_flags |= LLIF_CONTENDED;
- spin_unlock(&lli->lli_lock);
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_is_sendfile = 0;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ result = ll_file_io_generic(env, args, file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-void ll_clear_file_contended(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
-
- spin_lock(&lli->lli_lock);
- lli->lli_flags &= ~LLIF_CONTENDED;
- spin_unlock(&lli->lli_lock);
-}
-
-static int ll_is_file_contended(struct file *file)
+static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct lu_env *env;
+ struct iovec *local_iov;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (!(sbi->ll_lco.lco_flags & OBD_CONNECT_SRVLOCK)) {
- CDEBUG(D_INFO, "the server does not support SRVLOCK feature,"
- " osc connect flags = 0x"LPX64"\n",
- sbi->ll_lco.lco_flags);
- RETURN(0);
- }
- if (fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK))
- RETURN(1);
- if (lli->lli_flags & LLIF_CONTENDED) {
- cfs_time_t cur_time = cfs_time_current();
- cfs_time_t retry_time;
-
- retry_time = cfs_time_add(
- lli->lli_contention_time,
- cfs_time_seconds(sbi->ll_contention_time));
- if (cfs_time_after(cur_time, retry_time)) {
- ll_clear_file_contended(inode);
- RETURN(0);
- }
- RETURN(1);
- }
- RETURN(0);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ result = ll_file_readv(file, local_iov, 1, ppos);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-static int ll_file_get_tree_lock(struct ll_lock_tree *tree, struct file *file,
- const char *buf, size_t count,
- loff_t start, loff_t end, int rw)
-{
- int append;
- int tree_locked = 0;
- int rc;
- struct inode * inode = file->f_dentry->d_inode;
+#else
+static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- append = (rw == OBD_BRW_WRITE) && (file->f_flags & O_APPEND);
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
- if (append || !ll_is_file_contended(file)) {
- struct ll_lock_tree_node *node;
- int ast_flags;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- ast_flags = append ? 0 : LDLM_FL_DENY_ON_CONTENTION;
- if (file->f_flags & O_NONBLOCK)
- ast_flags |= LDLM_FL_BLOCK_NOWAIT;
- node = ll_node_from_inode(inode, start, end,
- (rw == OBD_BRW_WRITE) ? LCK_PW : LCK_PR);
- if (IS_ERR(node)) {
- rc = PTR_ERR(node);
- GOTO(out, rc);
- }
- tree->lt_fd = LUSTRE_FPRIVATE(file);
- rc = ll_tree_lock(tree, node, buf, count, ast_flags);
- if (rc == 0)
- tree_locked = 1;
- else if (rc == -EUSERS)
- ll_set_file_contended(inode);
- else
- GOTO(out, rc);
- }
- RETURN(tree_locked);
-out:
- return rc;
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_is_sendfile = 0;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ args->cia_iocb = iocb;
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/**
- * Checks if requested extent lock is compatible with a lock under a page.
- *
- * Checks if the lock under \a page is compatible with a read or write lock
- * (specified by \a rw) for an extent [\a start , \a end].
- *
- * \param page the page under which lock is considered
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param start start of the requested extent
- * \param end end of the requested extent
- * \param cookie transparent parameter for passing locking context
- *
- * \post result == 1, *cookie == context, appropriate lock is referenced or
- * \post result == 0
- *
- * \retval 1 owned lock is reused for the request
- * \retval 0 no lock reused for the request
- *
- * \see ll_release_short_lock
- */
-static int ll_reget_short_lock(struct page *page, int rw,
- obd_off start, obd_off end,
- void **cookie)
+static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
{
- struct ll_async_page *llap;
- struct obd_export *exp;
- struct inode *inode = page->mapping->host;
-
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
ENTRY;
- exp = ll_i2dtexp(inode);
- if (exp == NULL)
- RETURN(0);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- llap = llap_cast_private(page);
- if (llap == NULL)
- RETURN(0);
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_left = count;
- RETURN(obd_reget_short_lock(exp, ll_i2info(inode)->lli_smd,
- &llap->llap_cookie, rw, start, end,
- cookie));
-}
+ result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
-/**
- * Releases a reference to a lock taken in a "fast" way.
- *
- * Releases a read or a write (specified by \a rw) lock
- * referenced by \a cookie.
- *
- * \param inode inode to which data belong
- * \param end end of the locked extent
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param cookie transparent parameter for passing locking context
- *
- * \post appropriate lock is dereferenced
- *
- * \see ll_reget_short_lock
- */
-static void ll_release_short_lock(struct inode *inode, obd_off end,
- void *cookie, int rw)
-{
- struct obd_export *exp;
- int rc;
-
- exp = ll_i2dtexp(inode);
- if (exp == NULL)
- return;
-
- rc = obd_release_short_lock(exp, ll_i2info(inode)->lli_smd, end,
- cookie, rw);
- if (rc < 0)
- CERROR("unlock failed (%d)\n", rc);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
+#endif
-/**
- * Checks if requested extent lock is compatible
- * with a lock under a page in page cache.
- *
- * Checks if a lock under some \a page is compatible with a read or write lock
- * (specified by \a rw) for an extent [\a start , \a end].
- *
- * \param file the file under which lock is considered
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param ppos start of the requested extent
- * \param end end of the requested extent
- * \param cookie transparent parameter for passing locking context
- * \param buf userspace buffer for the data
- *
- * \post result == 1, *cookie == context, appropriate lock is referenced
- * \post retuls == 0
- *
- * \retval 1 owned lock is reused for the request
- * \retval 0 no lock reused for the request
- *
- * \see ll_file_put_fast_lock
+/*
+ * Write to a file (through the page cache).
*/
-static inline int ll_file_get_fast_lock(struct file *file,
- obd_off ppos, obd_off end,
- char *buf, void **cookie, int rw)
-{
- int rc = 0;
- struct page *page;
-
+#ifdef HAVE_FILE_WRITEV
+static ssize_t ll_file_writev(struct file *file, const struct iovec *iov,
+ unsigned long nr_segs, loff_t *ppos)
+{
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (!ll_region_mapped((unsigned long)buf, end - ppos)) {
- page = find_lock_page(file->f_dentry->d_inode->i_mapping,
- ppos >> CFS_PAGE_SHIFT);
- if (page) {
- if (ll_reget_short_lock(page, rw, ppos, end, cookie))
- rc = 1;
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
- unlock_page(page);
- page_cache_release(page);
- }
- }
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- RETURN(rc);
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ result = ll_file_io_generic(env, args, file, CIT_WRITE, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/**
- * Releases a reference to a lock taken in a "fast" way.
- *
- * Releases a read or a write (specified by \a rw) lock
- * referenced by \a cookie.
- *
- * \param inode inode to which data belong
- * \param end end of the locked extent
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param cookie transparent parameter for passing locking context
- *
- * \post appropriate lock is dereferenced
- *
- * \see ll_file_get_fast_lock
- */
-static inline void ll_file_put_fast_lock(struct inode *inode, obd_off end,
- void *cookie, int rw)
-{
- ll_release_short_lock(inode, end, cookie, rw);
-}
-
-enum ll_lock_style {
- LL_LOCK_STYLE_NOLOCK = 0,
- LL_LOCK_STYLE_FASTLOCK = 1,
- LL_LOCK_STYLE_TREELOCK = 2
-};
-
-/**
- * Checks if requested extent lock is compatible with a lock
- * under a page cache page.
- *
- * Checks if the lock under \a page is compatible with a read or write lock
- * (specified by \a rw) for an extent [\a start , \a end].
- *
- * \param file file under which I/O is processed
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param ppos start of the requested extent
- * \param end end of the requested extent
- * \param cookie transparent parameter for passing locking context
- * (only used with LL_LOCK_STYLE_FASTLOCK)
- * \param tree lock tree (only used with LL_LOCK_STYLE_TREELOCK)
- * \param buf userspace buffer for the data
- *
- * \retval LL_LOCK_STYLE_FASTLOCK owned lock is reused through fast lock
- * \retval LL_LOCK_STYLE_TREELOCK got a lock through tree lock
- * \retval LL_LOCK_STYLE_NOLOCK got no lock
- *
- * \see ll_file_put_lock
- */
-static inline int ll_file_get_lock(struct file *file, obd_off ppos,
- obd_off end, char *buf, void **cookie,
- struct ll_lock_tree *tree, int rw)
+static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
+ loff_t *ppos)
{
- int rc;
-
+ struct lu_env *env;
+ struct iovec *local_iov;
+ ssize_t result;
+ int refcheck;
ENTRY;
- if (ll_file_get_fast_lock(file, ppos, end, buf, cookie, rw))
- RETURN(LL_LOCK_STYLE_FASTLOCK);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- rc = ll_file_get_tree_lock(tree, file, buf, ppos - end, ppos, end, rw);
- /* rc: 1 for tree lock, 0 for no lock, <0 for error */
- switch (rc) {
- case 1:
- RETURN(LL_LOCK_STYLE_TREELOCK);
- case 0:
- RETURN(LL_LOCK_STYLE_NOLOCK);
- }
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
- /* an error happened if we reached this point, rc = -errno here */
- RETURN(rc);
+ result = ll_file_writev(file, local_iov, 1, ppos);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/**
- * Drops the lock taken by ll_file_get_lock.
- *
- * Releases a read or a write (specified by \a rw) lock
- * referenced by \a tree or \a cookie.
- *
- * \param inode inode to which data belong
- * \param end end of the locked extent
- * \param lockstyle facility through which the lock was taken
- * \param rw OBD_BRW_READ if requested for reading,
- * OBD_BRW_WRITE if requested for writing
- * \param cookie transparent parameter for passing locking context
- * (only used with LL_LOCK_STYLE_FASTLOCK)
- * \param tree lock tree (only used with LL_LOCK_STYLE_TREELOCK)
- *
- * \post appropriate lock is dereferenced
- *
- * \see ll_file_get_lock
- */
-static inline void ll_file_put_lock(struct inode *inode, obd_off end,
- enum ll_lock_style lock_style,
- void *cookie, struct ll_lock_tree *tree,
- int rw)
-
-{
- switch (lock_style) {
- case LL_LOCK_STYLE_TREELOCK:
- ll_tree_unlock(tree);
- break;
- case LL_LOCK_STYLE_FASTLOCK:
- ll_file_put_fast_lock(inode, end, cookie, rw);
- break;
- default:
- CERROR("invalid locking style (%d)\n", lock_style);
- }
-}
-
-static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
- loff_t *ppos)
+#else /* AIO stuff */
+static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_lock_tree tree;
- struct ost_lvb lvb;
- struct ll_ra_read bead;
- int ra = 0;
- obd_off end;
- ssize_t retval, chunk, sum = 0;
- int lock_style;
- void *cookie;
-
- __u64 kms;
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
- /* "If nbyte is 0, read() will return 0 and have no other results."
- * -- Single Unix Spec */
- if (count == 0)
- RETURN(0);
-
- ll_stats_ops_tally(sbi, LPROC_LL_READ_BYTES, count);
-
- if (!lsm) {
- /* Read on file with no objects should return zero-filled
- * buffers up to file size (we can get non-zero sizes with
- * mknod + truncate, then opening file for read. This is a
- * common pattern in NFS case, it seems). Bug 6243 */
- int notzeroed;
- /* Since there are no objects on OSTs, we have nothing to get
- * lock on and so we are forced to access inode->i_size
- * unguarded */
-
- /* Read beyond end of file */
- if (*ppos >= i_size_read(inode))
- RETURN(0);
-
- if (count > i_size_read(inode) - *ppos)
- count = i_size_read(inode) - *ppos;
- /* Make sure to correctly adjust the file pos pointer for
- * EFAULT case */
- notzeroed = clear_user(buf, count);
- count -= notzeroed;
- *ppos += count;
- if (!count)
- RETURN(-EFAULT);
- RETURN(count);
- }
-repeat:
- if (sbi->ll_max_rw_chunk != 0) {
- /* first, let's know the end of the current stripe */
- end = *ppos;
- obd_extent_calc(sbi->ll_dt_exp, lsm, OBD_CALC_STRIPE_END, &end);
- /* correct, the end is beyond the request */
- if (end > *ppos + count - 1)
- end = *ppos + count - 1;
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ RETURN(result);
- /* and chunk shouldn't be too large even if striping is wide */
- if (end - *ppos > sbi->ll_max_rw_chunk)
- end = *ppos + sbi->ll_max_rw_chunk - 1;
- } else {
- end = *ppos + count - 1;
- }
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- lock_style = ll_file_get_lock(file, (obd_off)(*ppos), end,
- buf, &cookie, &tree, OBD_BRW_READ);
- if (lock_style < 0)
- GOTO(out, retval = lock_style);
-
- ll_inode_size_lock(inode, 1);
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being read and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock acquired by
- * ll_tree_lock() above, because to change class, other client has to
- * take DLM lock conflicting with our lock. Also, any updates to
- * ->i_size by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(sbi->ll_dt_exp, lsm, &lvb, 1);
- kms = lvb.lvb_size;
- if (*ppos + count - 1 > kms) {
- /* A glimpse is necessary to determine whether we return a
- * short read (B) or some zeroes at the end of the buffer (C) */
- ll_inode_size_unlock(inode, 1);
- retval = ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
- if (retval) {
- if (lock_style != LL_LOCK_STYLE_NOLOCK)
- ll_file_put_lock(inode, end, lock_style,
- cookie, &tree, OBD_BRW_READ);
- goto out;
- }
- } else {
- /* region is within kms and, hence, within real file size (A).
- * We need to increase i_size to cover the read region so that
- * generic_file_read() will do its job, but that doesn't mean
- * the kms size is _correct_, it is only the _minimum_ size.
- * If someone does a stat they will get the correct size which
- * will always be >= the kms value here. b=11081 */
- if (i_size_read(inode) < kms)
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode, 1);
- }
-
- chunk = end - *ppos + 1;
- CDEBUG(D_INODE, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
- inode->i_ino, chunk, *ppos, i_size_read(inode));
-
- if (lock_style != LL_LOCK_STYLE_NOLOCK) {
- /* turn off the kernel's read-ahead */
- file->f_ra.ra_pages = 0;
-
- /* initialize read-ahead window once per syscall */
- if (ra == 0) {
- ra = 1;
- bead.lrr_start = *ppos >> CFS_PAGE_SHIFT;
- bead.lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- ll_ra_read_in(file, &bead);
- }
-
- /* BUG: 5972 */
- file_accessed(file);
- retval = generic_file_read(file, buf, chunk, ppos);
- ll_file_put_lock(inode, end, lock_style, cookie, &tree,
- OBD_BRW_READ);
- } else {
- retval = ll_file_lockless_io(file, buf, chunk, ppos, READ);
- }
-
- ll_rw_stats_tally(sbi, current->pid, file, chunk, 0);
-
- if (retval > 0) {
- buf += retval;
- count -= retval;
- sum += retval;
- if (retval == chunk && count > 0)
- goto repeat;
- }
-
- out:
- if (ra != 0)
- ll_ra_read_ex(file, &bead);
- retval = (sum > 0) ? sum : retval;
- RETURN(retval);
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_iov = (struct iovec *)iov;
+ args->cia_nrsegs = nr_segs;
+ args->cia_iocb = iocb;
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
-/*
- * Write to a file (through the page cache).
- */
static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- struct ll_lock_tree tree;
- loff_t maxbytes = ll_file_maxbytes(inode);
- loff_t lock_start, lock_end, end;
- ssize_t retval, chunk, sum = 0;
- int tree_locked;
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
-
- SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
-
- /* POSIX, but surprised the VFS doesn't check this already */
- if (count == 0)
- RETURN(0);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
- * called on the file, don't fail the below assertion (bug 2388). */
- if (file->f_flags & O_LOV_DELAY_CREATE &&
- ll_i2info(inode)->lli_smd == NULL)
- RETURN(-EBADF);
-
- LASSERT(ll_i2info(inode)->lli_smd != NULL);
-
- down(&ll_i2info(inode)->lli_write_sem);
-
-repeat:
- chunk = 0; /* just to fix gcc's warning */
- end = *ppos + count - 1;
-
- if (file->f_flags & O_APPEND) {
- lock_start = 0;
- lock_end = OBD_OBJECT_EOF;
- } else if (sbi->ll_max_rw_chunk != 0) {
- /* first, let's know the end of the current stripe */
- end = *ppos;
- obd_extent_calc(sbi->ll_dt_exp, lsm, OBD_CALC_STRIPE_END,
- (obd_off *)&end);
-
- /* correct, the end is beyond the request */
- if (end > *ppos + count - 1)
- end = *ppos + count - 1;
-
- /* and chunk shouldn't be too large even if striping is wide */
- if (end - *ppos > sbi->ll_max_rw_chunk)
- end = *ppos + sbi->ll_max_rw_chunk - 1;
- lock_start = *ppos;
- lock_end = end;
- } else {
- lock_start = *ppos;
- lock_end = *ppos + count - 1;
- }
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_left = count;
- tree_locked = ll_file_get_tree_lock(&tree, file, buf, count,
- lock_start, lock_end, OBD_BRW_WRITE);
- if (tree_locked < 0)
- GOTO(out, retval = tree_locked);
+ result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
- /* This is ok, g_f_w will overwrite this under i_sem if it races
- * with a local truncate, it just makes our maxbyte checking easier.
- * The i_size value gets updated in ll_extent_lock() as a consequence
- * of the [0,EOF] extent lock we requested above. */
- if (file->f_flags & O_APPEND) {
- *ppos = i_size_read(inode);
- end = *ppos + count - 1;
- }
-
- if (*ppos >= maxbytes) {
- send_sig(SIGXFSZ, current, 0);
- GOTO(out_unlock, retval = -EFBIG);
- }
- if (end > maxbytes - 1)
- end = maxbytes - 1;
-
- /* generic_file_write handles O_APPEND after getting i_mutex */
- chunk = end - *ppos + 1;
- CDEBUG(D_INFO, "Writing inode %lu, "LPSZ" bytes, offset %Lu\n",
- inode->i_ino, chunk, *ppos);
- if (tree_locked)
- retval = generic_file_write(file, buf, chunk, ppos);
- else
- retval = ll_file_lockless_io(file, (char*)buf, chunk,
- ppos, WRITE);
- ll_rw_stats_tally(ll_i2sbi(inode), current->pid, file, chunk, 1);
-
-out_unlock:
- if (tree_locked)
- ll_tree_unlock(&tree);
-
-out:
- if (retval > 0) {
- buf += retval;
- count -= retval;
- sum += retval;
- if (retval == chunk && count > 0)
- goto repeat;
- }
-
- up(&ll_i2info(inode)->lli_write_sem);
-
- retval = (sum > 0) ? sum : retval;
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_WRITE_BYTES,
- retval > 0 ? retval : 0);
- RETURN(retval);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
+#endif
+
/*
* Send file content (through pagecache) somewhere with helper
static ssize_t ll_file_sendfile(struct file *in_file, loff_t *ppos,size_t count,
read_actor_t actor, void *target)
{
- struct inode *inode = in_file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- struct ll_lock_tree tree;
- struct ll_lock_tree_node *node;
- struct ost_lvb lvb;
- struct ll_ra_read bead;
- int rc;
- ssize_t retval;
- __u64 kms;
+ struct lu_env *env;
+ struct ccc_io_args *args;
+ ssize_t result;
+ int refcheck;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
- inode->i_ino, inode->i_generation, inode, count, *ppos);
-
- /* "If nbyte is 0, read() will return 0 and have no other results."
- * -- Single Unix Spec */
- if (count == 0)
- RETURN(0);
-
- ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_READ_BYTES, count);
- /* turn off the kernel's read-ahead */
- in_file->f_ra.ra_pages = 0;
-
- /* File with no objects, nothing to lock */
- if (!lsm)
- RETURN(generic_file_sendfile(in_file, ppos, count, actor, target));
-
- node = ll_node_from_inode(inode, *ppos, *ppos + count - 1, LCK_PR);
- if (IS_ERR(node))
- RETURN(PTR_ERR(node));
- tree.lt_fd = LUSTRE_FPRIVATE(in_file);
- rc = ll_tree_lock(&tree, node, NULL, count,
- in_file->f_flags & O_NONBLOCK?LDLM_FL_BLOCK_NOWAIT:0);
- if (rc != 0)
- RETURN(rc);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
- ll_clear_file_contended(inode);
- ll_inode_size_lock(inode, 1);
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being read and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock acquired by
- * ll_tree_lock() above, because to change class, other client has to
- * take DLM lock conflicting with our lock. Also, any updates to
- * ->i_size by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- inode_init_lvb(inode, &lvb);
- obd_merge_lvb(ll_i2sbi(inode)->ll_dt_exp, lsm, &lvb, 1);
- kms = lvb.lvb_size;
- if (*ppos + count - 1 > kms) {
- /* A glimpse is necessary to determine whether we return a
- * short read (B) or some zeroes at the end of the buffer (C) */
- ll_inode_size_unlock(inode, 1);
- retval = ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
- if (retval)
- goto out;
- } else {
- /* region is within kms and, hence, within real file size (A) */
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode, 1);
- }
-
- CDEBUG(D_INFO, "Send ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
- inode->i_ino, count, *ppos, i_size_read(inode));
-
- bead.lrr_start = *ppos >> CFS_PAGE_SHIFT;
- bead.lrr_count = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- ll_ra_read_in(in_file, &bead);
- /* BUG: 5972 */
- file_accessed(in_file);
- retval = generic_file_sendfile(in_file, ppos, count, actor, target);
- ll_ra_read_ex(in_file, &bead);
-
- out:
- ll_tree_unlock(&tree);
- RETURN(retval);
+ args = &vvp_env_info(env)->vti_args;
+ args->cia_is_sendfile = 1;
+ args->cia_target = target;
+ args->cia_actor = actor;
+ result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ RETURN(result);
}
static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct ll_inode_info *lli = ll_i2info(inode);
struct obd_export *exp = ll_i2dtexp(inode);
struct ll_recreate_obj ucreatp;
struct obd_trans_info oti = { 0 };
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
RETURN(-EPERM);
- rc = copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
- sizeof(struct ll_recreate_obj));
- if (rc) {
+ if (copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
+ sizeof(struct ll_recreate_obj)))
RETURN(-EFAULT);
- }
+
OBDO_ALLOC(oa);
if (oa == NULL)
RETURN(-ENOMEM);
- down(&lli->lli_size_sem);
- lsm = lli->lli_smd;
+ ll_inode_size_lock(inode, 0);
+ lsm = ll_i2info(inode)->lli_smd;
if (lsm == NULL)
GOTO(out, rc = -ENOENT);
lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
OBD_FREE(lsm2, lsm_size);
GOTO(out, rc);
out:
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
OBDO_FREE(oa);
return rc;
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
int flags, struct lov_user_md *lum, int lum_size)
{
- struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
int rc = 0;
ENTRY;
- down(&lli->lli_size_sem);
- lsm = lli->lli_smd;
+ ll_inode_size_lock(inode, 0);
+ lsm = ll_i2info(inode)->lli_smd;
if (lsm) {
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
RETURN(-EEXIST);
ll_release_openhandle(file->f_dentry, &oit);
out:
- up(&lli->lli_size_sem);
+ ll_inode_size_unlock(inode, 0);
ll_intent_release(&oit);
RETURN(rc);
out_req_free:
lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
LASSERT(lmm != NULL);
+ if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
+ (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3)) &&
+ (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_JOIN))) {
+ GOTO(out, rc = -EPROTO);
+ }
+
/*
* This is coming from the MDS, so is probably in
* little endian. We convert it to host endian before
* passing it to userspace.
*/
- if (lmm->lmm_magic == __swab32(LOV_MAGIC)) {
- lustre_swab_lov_user_md((struct lov_user_md *)lmm);
- lustre_swab_lov_user_md_objects((struct lov_user_md *)lmm);
- } else if (lmm->lmm_magic == __swab32(LOV_MAGIC_JOIN)) {
- lustre_swab_lov_user_md_join((struct lov_user_md_join *)lmm);
+ if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC)) {
+ /* if function called for directory - we should
+ * avoid swab not existent lsm objects */
+ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
+ if (S_ISREG(body->mode))
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v1 *)lmm)->lmm_objects,
+ ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
+ } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
+ lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
+ if (S_ISREG(body->mode))
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v3 *)lmm)->lmm_objects,
+ ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
+ } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_JOIN)) {
+ lustre_swab_lov_user_md_join((struct lov_user_md_join *)lmm);
+ }
}
if (lmm->lmm_magic == LOV_MAGIC_JOIN) {
if (lump == NULL) {
RETURN(-ENOMEM);
}
- rc = copy_from_user(lump, (struct lov_user_md *)arg, lum_size);
- if (rc) {
+ if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
OBD_FREE(lump, lum_size);
RETURN(-EFAULT);
}
static int ll_lov_setstripe(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
+ struct lov_user_md_v3 lumv3;
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+ struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
+ struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
+ int lum_size;
int rc;
int flags = FMODE_WRITE;
ENTRY;
- /* Bug 1152: copy properly when this is no longer true */
- LASSERT(sizeof(lum) == sizeof(*lump));
- LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
- rc = copy_from_user(&lum, lump, sizeof(lum));
- if (rc)
+ /* first try with v1 which is smaller than v3 */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(lumv1, lumv1p, lum_size))
RETURN(-EFAULT);
- rc = ll_lov_setstripe_ea_info(inode, file, flags, &lum, sizeof(lum));
+ if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
+ lum_size = sizeof(struct lov_user_md_v3);
+ if (copy_from_user(&lumv3, lumv3p, lum_size))
+ RETURN(-EFAULT);
+ }
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size);
if (rc == 0) {
- put_user(0, &lump->lmm_stripe_count);
+ put_user(0, &lumv1p->lmm_stripe_count);
rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
- 0, ll_i2info(inode)->lli_smd, lump);
+ 0, ll_i2info(inode)->lli_smd,
+ (void *)arg);
}
RETURN(rc);
}
static int ll_get_grouplock(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- ldlm_policy_data_t policy = { .l_extent = { .start = 0,
- .end = OBD_OBJECT_EOF}};
- struct lustre_handle lockh = { 0 };
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int flags = 0, rc;
- ENTRY;
-
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- RETURN(-EINVAL);
- }
-
- policy.l_extent.gid = arg;
- if (file->f_flags & O_NONBLOCK)
- flags = LDLM_FL_BLOCK_NOWAIT;
-
- rc = ll_extent_lock(fd, inode, lsm, LCK_GROUP, &policy, &lockh, flags);
- if (rc)
- RETURN(rc);
-
- fd->fd_flags |= LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK;
- fd->fd_gid = arg;
- memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
-
- RETURN(0);
+ /* XXX */
+ return -ENOSYS;
}
static int ll_put_grouplock(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int rc;
- ENTRY;
-
- if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- /* Ugh, it's already unlocked. */
- RETURN(-EINVAL);
- }
-
- if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
- RETURN(-EINVAL);
-
- fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
-
- rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
- if (rc)
- RETURN(rc);
-
- fd->fd_gid = 0;
- memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
-
- RETURN(0);
+ /* XXX */
+ return -ENOSYS;
}
+#if LUSTRE_FIX >= 50
static int join_sanity_check(struct inode *head, struct inode *tail)
{
ENTRY;
{
struct dentry *tail_dentry = tail_filp->f_dentry;
struct lookup_intent oit = {.it_op = IT_OPEN,
- .it_flags = head_filp->f_flags|O_JOIN_FILE};
+ .it_flags = head_filp->f_flags,
+ .it_create_mode = M_JOIN_FILE};
struct ldlm_enqueue_info einfo = { LDLM_IBITS, LCK_CW,
- ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL };
+ ll_md_blocking_ast, ldlm_completion_ast, NULL, NULL, NULL };
struct lustre_handle lockh;
struct md_op_data *op_data;
ldlm_lock_decref(&lockh, oit.d.lustre.it_lock_mode);
oit.d.lustre.it_lock_mode = 0;
}
+ ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
+ it_clear_disposition(&oit, DISP_ENQ_COMPLETE);
ll_release_openhandle(head_filp->f_dentry, &oit);
out:
ll_intent_release(&oit);
struct file *tail_filp, *first_filp, *second_filp;
struct ll_lock_tree first_tree, second_tree;
struct ll_lock_tree_node *first_node, *second_node;
- struct ll_inode_info *hlli = ll_i2info(head), *tlli;
+ struct ll_inode_info *hlli = ll_i2info(head);
int rc = 0, cleanup_phase = 0;
ENTRY;
}
tail = igrab(tail_filp->f_dentry->d_inode);
- tlli = ll_i2info(tail);
tail_dentry = tail_filp->f_dentry;
LASSERT(tail_dentry);
cleanup_phase = 1;
}
RETURN(rc);
}
+#endif /* LUSTRE_FIX >= 50 */
/**
* Close inode open handle
inode, och);
out:
/* this one is in place of ll_file_open */
- ptlrpc_req_finished(it->d.lustre.it_data);
+ if (it_disposition(it, DISP_ENQ_OPEN_REF))
+ ptlrpc_req_finished(it->d.lustre.it_data);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
RETURN(rc);
}
case EXT3_IOC_GETVERSION:
RETURN(put_user(inode->i_generation, (int *)arg));
case LL_IOC_JOIN: {
+#if LUSTRE_FIX >= 50
+ /* Allow file join in beta builds to allow debuggging */
char *ftail;
int rc;
rc = ll_file_join(inode, file, ftail);
putname(ftail);
RETURN(rc);
+#else
+ CWARN("file join is not supported in this version of Lustre\n");
+ RETURN(-ENOTTY);
+#endif
}
case LL_IOC_GROUP_LOCK:
RETURN(ll_get_grouplock(inode, file, arg));
*/
case LL_IOC_FLUSHCTX:
RETURN(ll_flush_ctx(inode));
+ case LL_IOC_PATH2FID: {
+ if (copy_to_user((void *)arg, &ll_i2info(inode)->lli_fid,
+ sizeof(struct lu_fid)))
+ RETURN(-EFAULT);
+
+ RETURN(0);
+ }
default: {
int err;
loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_dentry->d_inode;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
loff_t retval;
ENTRY;
retval = offset + ((origin == 2) ? i_size_read(inode) :
if (file->f_flags & O_NONBLOCK)
nonblock = LDLM_FL_BLOCK_NOWAIT;
- if (lsm != NULL) {
- rc = ll_glimpse_size(inode, nonblock);
- if (rc != 0)
- RETURN(rc);
- }
+ rc = cl_glimpse_size(inode);
+ if (rc != 0)
+ RETURN(rc);
ll_inode_size_lock(inode, 0);
offset += i_size_read(inode);
if (offset >= 0 && offset <= ll_file_maxbytes(inode)) {
if (offset != file->f_pos) {
file->f_pos = offset;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- file->f_reada = 0;
- file->f_version = ++event;
-#endif
}
retval = offset;
}
break;
default:
CERROR("unknown fcntl lock type: %d\n", file_lock->fl_type);
- LBUG();
+ RETURN (-EINVAL);
}
switch (cmd) {
break;
default:
CERROR("unknown fcntl lock command: %d\n", cmd);
- LBUG();
+ RETURN (-EINVAL);
}
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
- oit.it_flags |= O_CHECK_STALE;
+ oit.it_create_mode |= M_CHECK_STALE;
rc = md_intent_lock(exp, op_data, NULL, 0,
/* we are not interested in name
based lookup */
&oit, 0, &req,
ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
- oit.it_flags &= ~O_CHECK_STALE;
+ oit.it_create_mode &= ~M_CHECK_STALE;
if (rc < 0) {
rc = ll_inode_revalidate_fini(inode, rc);
GOTO (out, rc);
if (ll_i2info(inode)->lli_smd == NULL)
GOTO(out, rc = 0);
- /* ll_glimpse_size will prefer locally cached writes if they extend
+ /* cl_glimpse_size will prefer locally cached writes if they extend
* the file */
- rc = ll_glimpse_size(inode, 0);
+ rc = cl_glimpse_size(inode);
EXIT;
out:
ptlrpc_req_finished(req);
}
#endif
+#ifdef HAVE_FILE_READV
+#define READ_METHOD readv
+#define READ_FUNCTION ll_file_readv
+#define WRITE_METHOD writev
+#define WRITE_FUNCTION ll_file_writev
+#else
+#define READ_METHOD aio_read
+#define READ_FUNCTION ll_file_aio_read
+#define WRITE_METHOD aio_write
+#define WRITE_FUNCTION ll_file_aio_write
+#endif
+
/* -o localflock - only provides locally consistent flock locks */
struct file_operations ll_file_operations = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
struct file_operations ll_file_operations_flock = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,
/* These are for -o noflock - to return ENOSYS on flock calls */
struct file_operations ll_file_operations_noflock = {
.read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
.write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
.ioctl = ll_file_ioctl,
.open = ll_file_open,
.release = ll_file_release,