#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/lustre_dlm.h>
#include <linux/lustre_lite.h>
-#include <linux/obd_lov.h> /* for lov_mds_md_size() in lov_setstripe() */
-#include <linux/random.h>
#include <linux/pagemap.h>
+#include <linux/file.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
#include <linux/lustre_compat25.h>
#endif
-
#include "llite_internal.h"
+#include <linux/obd_lov.h>
-static int ll_mdc_close(struct lustre_handle *mdc_conn, struct inode *inode,
- struct file *file)
+int ll_mdc_close(struct obd_export *mdc_exp, struct inode *inode,
+ struct file *file)
{
struct ll_file_data *fd = file->private_data;
struct ptlrpc_request *req = NULL;
- unsigned long flags;
- struct obd_import *imp;
+ struct obd_client_handle *och = &fd->fd_mds_och;
+ struct obdo obdo;
int rc;
ENTRY;
- /* Complete the open request and remove it from replay list */
- rc = mdc_close(&ll_i2sbi(inode)->ll_mdc_conn, inode->i_ino,
- inode->i_mode, &fd->fd_mds_och.och_fh, &req);
- if (rc)
- CERROR("inode %lu close failed: rc = %d\n", inode->i_ino, rc);
-
- imp = fd->fd_mds_och.och_req->rq_import;
- LASSERT(imp != NULL);
- spin_lock_irqsave(&imp->imp_lock, flags);
-
- DEBUG_REQ(D_HA, fd->fd_mds_och.och_req, "matched open req %p",
- fd->fd_mds_och.och_req);
-
- /* We held on to the request for replay until we saw a close for that
- * file. Now that we've closed it, it gets replayed on the basis of
- * its transno only. */
- spin_lock (&fd->fd_mds_och.och_req->rq_lock);
- fd->fd_mds_och.och_req->rq_replay = 0;
- spin_unlock (&fd->fd_mds_och.och_req->rq_lock);
-
- if (fd->fd_mds_och.och_req->rq_transno) {
- /* This open created a file, so it needs replay as a
- * normal transaction now. Our reference to it now
- * effectively owned by the imp_replay_list, and it'll
- * be committed just like other transno-having
- * requests from here on out. */
-
- /* We now retain this close request, so that it is
- * replayed if the open is replayed. We duplicate the
- * transno, so that we get freed at the right time,
- * and rely on the difference in xid to keep
- * everything ordered correctly.
- *
- * But! If this close was already given a transno
- * (because it caused real unlinking of an
- * open-unlinked file, f.e.), then we'll be ordered on
- * the basis of that and we don't need to do anything
- * magical here. */
- if (!req->rq_transno) {
- req->rq_transno = fd->fd_mds_och.och_req->rq_transno;
- ptlrpc_retain_replayable_request(req, imp);
- }
- spin_unlock_irqrestore(&imp->imp_lock, flags);
+ /* clear group lock, if present */
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
+ fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
+ rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP,
+ &fd->fd_cwlockh);
+ }
- /* Should we free_committed now? we always free before
- * replay, so it's probably a wash. We could check to
- * see if the fd_req should already be committed, in
- * which case we can avoid the whole retain_replayable
- * dance. */
- } else {
- /* No transno means that we can just drop our ref. */
- spin_unlock_irqrestore(&imp->imp_lock, flags);
+ obdo.o_id = inode->i_ino;
+ obdo.o_valid = OBD_MD_FLID;
+ obdo_from_inode(&obdo, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
+ OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME);
+ if (0 /* ll_is_inode_dirty(inode) */) {
+ obdo.o_flags = MDS_BFLAG_UNCOMMITTED_WRITES;
+ obdo.o_valid |= OBD_MD_FLFLAGS;
+ }
+ obdo.o_mds = ll_i2info(inode)->lli_mds;
+ rc = md_close(mdc_exp, &obdo, och, &req);
+
+ if (rc == EAGAIN) {
+ /* We are the last writer, so the MDS has instructed us to get
+ * the file size and any write cookies, then close again. */
+ //ll_queue_done_writing(inode);
+ rc = 0;
+ } else if (rc) {
+ CERROR("inode %lu mdc close failed: rc = %d\n",
+ inode->i_ino, rc);
+ }
+ if (rc == 0) {
+ rc = ll_objects_destroy(req, file->f_dentry->d_inode, 1);
+ if (rc)
+ CERROR("inode %lu ll_objects destroy: rc = %d\n",
+ inode->i_ino, rc);
}
- ptlrpc_req_finished(fd->fd_mds_och.och_req);
- /* Do this after the fd_req->rq_transno check, because we don't want
- * to bounce off zero references. */
+ mdc_clear_open_replay_data(mdc_exp, och);
ptlrpc_req_finished(req);
- fd->fd_mds_och.och_fh.cookie = DEAD_HANDLE_MAGIC;
+ och->och_fh.cookie = DEAD_HANDLE_MAGIC;
file->private_data = NULL;
OBD_SLAB_FREE(fd, ll_file_data_slab, sizeof *fd);
- RETURN(-abs(rc));
+ RETURN(rc);
}
/* While this returns an error code, fput() the caller does not, so we need
int ll_file_release(struct inode *inode, struct file *file)
{
struct ll_file_data *fd;
- struct obdo oa;
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_stripe_md *lsm = lli->lli_smd;
- int rc = 0, rc2;
+ int rc;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%u/%lu/%u(%p)\n",
+ ll_i2info(inode)->lli_mds, inode->i_ino,
inode->i_generation, inode);
/* don't do anything for / */
if (inode->i_sb->s_root == file->f_dentry)
RETURN(0);
- lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_RELEASE);
+ lprocfs_counter_incr(sbi->ll_stats, LPROC_LL_RELEASE);
fd = (struct ll_file_data *)file->private_data;
- if (!fd) /* no process opened the file after an mcreate */
- RETURN(0);
+ LASSERT(fd != NULL);
+
+ rc = ll_mdc_close(sbi->ll_mdc_exp, inode, file);
+ RETURN(rc);
+}
- /* we might not be able to get a valid handle on this file
- * again so we really want to flush our write cache.. */
- if (S_ISREG(inode->i_mode) && lsm) {
- write_inode_now(inode, 0);
- obdo_from_inode(&oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
- memcpy(obdo_handle(&oa), &fd->fd_ost_och, FD_OSTDATA_SIZE);
- oa.o_valid |= OBD_MD_FLHANDLE;
+static int ll_intent_file_open(struct file *file, void *lmm,
+ int lmmsize, struct lookup_intent *itp)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(file->f_dentry->d_inode);
+ struct lustre_handle lockh;
+ struct mdc_op_data data;
+ struct dentry *parent = file->f_dentry->d_parent;
+ const char *name = file->f_dentry->d_name.name;
+ const int len = file->f_dentry->d_name.len;
+ int rc;
- rc = obd_close(&sbi->ll_osc_conn, &oa, lsm, NULL);
- if (rc)
- CERROR("inode %lu object close failed: rc %d\n",
- inode->i_ino, rc);
- }
+ if (!parent)
+ RETURN(-ENOENT);
- rc2 = ll_mdc_close(&sbi->ll_mdc_conn, inode, file);
- if (rc2 && !rc)
- rc = rc2;
+ ll_prepare_mdc_op_data(&data, parent->d_inode, NULL, name, len, O_RDWR);
+ rc = md_enqueue(sbi->ll_mdc_exp, LDLM_IBITS, itp, LCK_PR, &data,
+ &lockh, lmm, lmmsize, ldlm_completion_ast,
+ ll_mdc_blocking_ast, NULL);
+ if (rc == 0) {
+ if (itp->d.lustre.it_lock_mode)
+ memcpy(&itp->d.lustre.it_lock_handle,
+ &lockh, sizeof(lockh));
+ } else if (rc < 0) {
+ CERROR("lock enqueue: err: %d\n", rc);
+ }
+
RETURN(rc);
}
-static int ll_local_open(struct file *file, struct lookup_intent *it)
+int ll_local_open(struct file *file, struct lookup_intent *it)
{
- struct ptlrpc_request *req = it->it_data;
+ struct ptlrpc_request *req = it->d.lustre.it_data;
+ struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct obd_export *mdc_exp = ll_i2mdcexp(file->f_dentry->d_inode);
struct ll_file_data *fd;
struct mds_body *body;
ENTRY;
* ll_mdc_close, so don't even try right now. */
LASSERT(fd != NULL);
- memset(fd, 0, sizeof(*fd));
-
memcpy(&fd->fd_mds_och.och_fh, &body->handle, sizeof(body->handle));
- fd->fd_mds_och.och_req = it->it_data;
+ fd->fd_mds_och.och_magic = OBD_CLIENT_HANDLE_MAGIC;
file->private_data = fd;
+ ll_readahead_init(file->f_dentry->d_inode, &fd->fd_ras);
- RETURN(0);
-}
-
-static int ll_osc_open(struct lustre_handle *conn, struct inode *inode,
- struct file *file, struct lov_stripe_md *lsm)
-{
- struct ll_file_data *fd = file->private_data;
- struct obdo *oa;
- int rc;
- ENTRY;
-
- oa = obdo_alloc();
- if (!oa)
- RETURN(-ENOMEM);
- oa->o_id = lsm->lsm_object_id;
- oa->o_mode = S_IFREG;
- oa->o_valid = OBD_MD_FLID;
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE);
- rc = obd_open(conn, oa, lsm, NULL, &fd->fd_ost_och);
- if (rc)
- GOTO(out, rc);
-
- file->f_flags &= ~O_LOV_DELAY_CREATE;
- obdo_refresh_inode(inode, oa, OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLATIME | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME);
- EXIT;
-out:
- obdo_free(oa);
- return rc;
-}
-
-/* Caller must hold lli_open_sem to protect lli->lli_smd from changing and
- * duplicate objects from being created. We only install lsm to lli_smd if
- * the mdc open was successful (hence stored stripe MD on MDS), otherwise
- * other nodes could try to create different objects for the same file.
- */
-static int ll_create_obj(struct lustre_handle *conn, struct inode *inode,
- struct file *file, struct lov_stripe_md *lsm)
-{
- struct ptlrpc_request *req = NULL;
- struct ll_inode_info *lli = ll_i2info(inode);
- struct lov_mds_md *lmm = NULL;
- struct obdo *oa;
- struct iattr iattr;
- struct mdc_op_data op_data;
- struct obd_trans_info oti = { 0 };
- int rc, err, lmm_size = 0;
- ENTRY;
-
- oa = obdo_alloc();
- if (!oa)
- RETURN(-ENOMEM);
-
- LASSERT(S_ISREG(inode->i_mode));
- oa->o_mode = S_IFREG | 0600;
- oa->o_id = inode->i_ino;
- oa->o_generation = inode->i_generation;
- /* Keep these 0 for now, because chown/chgrp does not change the
- * ownership on the OST, and we don't want to allow BA OST NFS
- * users to access these objects by mistake. */
- oa->o_uid = 0;
- oa->o_gid = 0;
- oa->o_valid = OBD_MD_FLID | OBD_MD_FLGENER | OBD_MD_FLTYPE |
- OBD_MD_FLMODE | OBD_MD_FLUID | OBD_MD_FLGID;
-#ifdef ENABLE_ORPHANS
- oa->o_valid |= OBD_MD_FLCOOKIE;
-#endif
-
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE|OBD_MD_FLATIME|OBD_MD_FLMTIME|
- OBD_MD_FLCTIME | (inode->i_size ? OBD_MD_FLSIZE : 0));
-
- rc = obd_create(conn, oa, &lsm, &oti);
- if (rc) {
- CERROR("error creating objects for inode %lu: rc = %d\n",
- inode->i_ino, rc);
- if (rc > 0) {
- CERROR("obd_create returned invalid rc %d\n", rc);
- rc = -EIO;
- }
- GOTO(out_oa, rc);
- }
- obdo_refresh_inode(inode, oa, OBD_MD_FLBLKSZ);
-
- LASSERT(lsm && lsm->lsm_object_id);
- rc = obd_packmd(conn, &lmm, lsm);
- if (rc < 0)
- GOTO(out_destroy, rc);
-
- lmm_size = rc;
-
- /* Save the stripe MD with this file on the MDS */
- memset(&iattr, 0, sizeof(iattr));
- iattr.ia_valid = ATTR_FROM_OPEN;
-
- ll_prepare_mdc_op_data(&op_data, inode, NULL, NULL, 0, 0);
-
-#if 0
-#warning FIXME: next line is for debugging purposes only
- obd_log_cancel(&ll_i2sbi(inode)->ll_osc_conn, lsm, oti.oti_numcookies,
- oti.oti_logcookies, OBD_LLOG_FL_SENDNOW);
-#endif
-
- rc = mdc_setattr(&ll_i2sbi(inode)->ll_mdc_conn, &op_data, &iattr,
- lmm, lmm_size, oti.oti_logcookies,
- oti.oti_numcookies * sizeof(oti.oti_onecookie), &req);
- ptlrpc_req_finished(req);
+ lli->lli_io_epoch = body->io_epoch;
- obd_free_diskmd(conn, &lmm);
+ mdc_set_open_replay_data(mdc_exp, &fd->fd_mds_och, it->d.lustre.it_data);
- /* If we couldn't complete mdc_open() and store the stripe MD on the
- * MDS, we need to destroy the objects now or they will be leaked.
- */
- if (rc) {
- CERROR("error: storing stripe MD for %lu: rc %d\n",
- inode->i_ino, rc);
- GOTO(out_destroy, rc);
- }
- lli->lli_smd = lsm;
- lli->lli_maxbytes = lsm->lsm_maxbytes;
-
- EXIT;
-out_oa:
- oti_free_cookies(&oti);
- obdo_free(oa);
- return rc;
-
-out_destroy:
- oa->o_id = lsm->lsm_object_id;
- oa->o_valid = OBD_MD_FLID;
- obdo_from_inode(oa, inode, OBD_MD_FLTYPE);
-#if 0
- err = obd_log_cancel(conn, lsm, oti.oti_numcookies, oti.oti_logcookies,
- OBD_LLOG_FL_SENDNOW);
- if (err)
- CERROR("error cancelling inode %lu log cookies: rc %d\n",
- inode->i_ino, err);
-#endif
- err = obd_destroy(conn, oa, lsm, NULL);
- obd_free_memmd(conn, &lsm);
- if (err)
- CERROR("error uncreating inode %lu objects: rc %d\n",
- inode->i_ino, err);
- goto out_oa;
+ RETURN(0);
}
/* Open a file, and (for the very first open) create objects on the OSTs at
*/
int ll_file_open(struct inode *inode, struct file *file)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
- struct lustre_handle *conn = ll_i2obdconn(inode);
- struct lookup_intent *it;
+ struct lookup_intent *it, oit = { .it_op = IT_OPEN,
+ .it_flags = file->f_flags };
struct lov_stripe_md *lsm;
+ struct ptlrpc_request *req;
int rc = 0;
ENTRY;
RETURN(0);
it = file->f_it;
- lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_OPEN);
- rc = ll_it_open_error(DISP_OPEN_OPEN, it);
+ if (!it || !it->d.lustre.it_disposition) {
+ it = &oit;
+ rc = ll_intent_file_open(file, NULL, 0, it);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_OPEN);
+ rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc)
- RETURN(rc);
+ GOTO(out, rc);
rc = ll_local_open(file, it);
if (rc)
LBUG();
- mdc_set_open_replay_data(&((struct ll_file_data *)
- file->private_data)->fd_mds_och);
if (!S_ISREG(inode->i_mode))
- RETURN(0);
+ GOTO(out, rc);
lsm = lli->lli_smd;
if (lsm == NULL) {
if (file->f_flags & O_LOV_DELAY_CREATE ||
!(file->f_mode & FMODE_WRITE)) {
- CDEBUG(D_INODE, "delaying object creation\n");
- RETURN(0);
- }
- down(&lli->lli_open_sem);
- if (!lli->lli_smd) {
- rc = ll_create_obj(conn, inode, file, NULL);
- up(&lli->lli_open_sem);
- if (rc)
- GOTO(out_close, rc);
- } else {
- CERROR("warning: stripe already set on ino %lu\n",
- inode->i_ino);
- up(&lli->lli_open_sem);
+ CDEBUG(D_INODE, "object creation was delayed\n");
+ GOTO(out, rc);
}
- lsm = lli->lli_smd;
}
-
- rc = ll_osc_open(conn, inode, file, lsm);
- if (rc)
- GOTO(out_close, rc);
- RETURN(0);
-
- out_close:
- ll_mdc_close(&sbi->ll_mdc_conn, inode, file);
+ file->f_flags &= ~O_LOV_DELAY_CREATE;
+ GOTO(out, rc);
+ out:
+ req = it->d.lustre.it_data;
+ ptlrpc_req_finished(req);
+ if (rc == 0)
+ ll_open_complete(inode);
return rc;
}
-/*
- * really does the getattr on the inode and updates its fields
- */
-int ll_inode_getattr(struct inode *inode, struct lov_stripe_md *lsm,
- void *ostdata)
+/* Fills the obdo with the attributes for the inode defined by lsm */
+int ll_lsm_getattr(struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct obdo *oa)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
struct ptlrpc_request_set *set;
- struct obdo oa;
- int bef, aft;
- unsigned long before, after;
int rc;
ENTRY;
- LASSERT(lsm);
- LASSERT(sbi);
- LASSERT(lli);
+ LASSERT(lsm != NULL);
- memset(&oa, 0, sizeof oa);
- oa.o_id = lsm->lsm_object_id;
- oa.o_mode = S_IFREG;
- oa.o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE |
+ memset(oa, 0, sizeof *oa);
+ oa->o_id = lsm->lsm_object_id;
+ oa->o_gr = lsm->lsm_object_gr;
+ oa->o_mode = S_IFREG;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE |
OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME;
-
- if (ostdata != NULL) {
- memcpy(obdo_handle(&oa), ostdata, FD_OSTDATA_SIZE);
- oa.o_valid |= OBD_MD_FLHANDLE;
- }
-
- /* getattr can race with writeback. we don't want to trust a getattr
- * that doesn't include the writeback of our farthest cached pages
- * that it raced with. */
- /* Now that the OSC knows the cached-page status, it can and should be
- * adjusting its getattr results to include the maximum cached offset
- * for its stripe(s). */
- do {
- bef = obd_last_dirty_offset(ll_i2obdconn(inode), lli->lli_smd,
- &before);
-#if 0
- rc = obd_getattr(&sbi->ll_osc_conn, &oa, lsm);
-#else
- set = ptlrpc_prep_set ();
- if (set == NULL) {
- CERROR ("ENOMEM allocing request set\n");
- rc = -ENOMEM;
- } else {
- rc = obd_getattr_async(&sbi->ll_osc_conn, &oa, lsm, set);
- if (rc == 0)
- rc = ptlrpc_set_wait (set);
- ptlrpc_set_destroy (set);
- }
-#endif
- if (rc)
- RETURN(rc);
+ OBD_MD_FLCTIME | OBD_MD_FLGROUP;
- aft = obd_last_dirty_offset(ll_i2obdconn(inode), lli->lli_smd,
- &after);
- CDEBUG(D_INODE, " %d,%lu -> %d,%lu\n", bef, before, aft, after);
- } while (bef == 0 &&
- (aft != 0 || after < before) &&
- oa.o_size < ((u64)before + 1) << PAGE_CACHE_SHIFT);
-
- obdo_refresh_inode(inode, &oa, OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
- if (inode->i_blksize < PAGE_CACHE_SIZE)
- inode->i_blksize = PAGE_CACHE_SIZE;
-
- /* make sure getattr doesn't return a size that causes writeback
- * to forget about cached writes */
- if ((aft == 0) && oa.o_size < ((u64)after + 1) << PAGE_CACHE_SHIFT) {
- CDEBUG(D_INODE, "cached at %lu, keeping %llu i_size instead "
- "of oa "LPU64"\n", after, inode->i_size,
- oa.o_size);
- RETURN(0);
+ set = ptlrpc_prep_set();
+ if (set == NULL) {
+ CERROR ("ENOMEM allocing request set\n");
+ rc = -ENOMEM;
+ } else {
+ rc = obd_getattr_async(exp, oa, lsm, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
}
+ if (rc)
+ RETURN(rc);
- obdo_to_inode(inode, &oa, OBD_MD_FLSIZE);
-
- CDEBUG(D_INODE, "objid "LPX64" size %Lu/%Lu blksize %lu\n",
- lsm->lsm_object_id, inode->i_size, inode->i_size,
- inode->i_blksize);
+ oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME | OBD_MD_FLSIZE);
RETURN(0);
}
}
}
-#if 0
-static void ll_update_atime(struct inode *inode)
+static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
{
- if (IS_RDONLY(inode)) return;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ struct obd_export *exp = ll_i2obdexp(inode);
+ struct {
+ char name[16];
+ struct ldlm_lock *lock;
+ struct lov_stripe_md *lsm;
+ } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
+ __u32 stripe, vallen = sizeof(stripe);
+ int rc;
+ ENTRY;
- /* update atime, but don't explicitly write it out just this change */
- inode->i_atime = CURRENT_TIME;
+ if (lsm->lsm_stripe_count == 1)
+ GOTO(check, stripe = 0);
+
+ /* get our offset in the lov */
+ rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe);
+ if (rc != 0) {
+ CERROR("obd_get_info: rc = %d\n", rc);
+ RETURN(rc);
+ }
+ LASSERT(stripe < lsm->lsm_stripe_count);
+
+check:
+ if (lsm->lsm_oinfo[stripe].loi_id != lock->l_resource->lr_name.name[0]||
+ lsm->lsm_oinfo[stripe].loi_gr != lock->l_resource->lr_name.name[2]){
+ LDLM_ERROR(lock, "resource doesn't match object "LPU64"/"LPU64
+ " inode=%lu/%u (%p)\n",
+ lsm->lsm_oinfo[stripe].loi_id,
+ lsm->lsm_oinfo[stripe].loi_gr,
+ inode->i_ino, inode->i_generation, inode);
+ RETURN(-ELDLM_NO_LOCK_DATA);
+ }
+
+ RETURN(stripe);
}
-#endif
-/*
- * flush the page cache for an extent as its canceled. when we're on an
- * lov we get a lock cancelation for each of the obd locks under the lov
- * so we have to map the obd's region back onto the stripes in the file
- * that it held.
+/* Flush the page cache for an extent as its canceled. When we're on an LOV,
+ * we get a lock cancellation for each stripe, so we have to map the obd's
+ * region back onto the stripes in the file that it held.
*
- * no one can dirty the extent until we've finished our work and they
- * can enqueue another lock.
+ * No one can dirty the extent until we've finished our work and they can
+ * enqueue another lock. The DLM protects us from ll_file_read/write here,
+ * but other kernel actors could have pages locked.
*
- * XXX this could be asking the inode's dirty tree for info
- */
+ * Called with the DLM lock held. */
void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
- struct ldlm_lock *lock)
+ struct ldlm_lock *lock, __u32 stripe)
{
- struct ldlm_extent *extent = &lock->l_extent;
+ ldlm_policy_data_t tmpex;
unsigned long start, end, count, skip, i, j;
struct page *page;
- int ret;
+ int rc, rc2, discard = lock->l_flags & LDLM_FL_DISCARD_DATA;
+ struct lustre_handle lockh;
ENTRY;
- CDEBUG(D_INODE, "obdo %lu inode %p ["LPU64"->"LPU64"] size: %llu\n",
- inode->i_ino, inode, extent->start, extent->end, inode->i_size);
+ memcpy(&tmpex, &lock->l_policy_data, sizeof(tmpex));
+ CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
+ inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
+ inode->i_size);
+
+ /* our locks are page granular thanks to osc_enqueue, we invalidate the
+ * whole page. */
+ LASSERT((tmpex.l_extent.start & ~PAGE_CACHE_MASK) == 0);
+ LASSERT(((tmpex.l_extent.end + 1) & ~PAGE_CACHE_MASK) == 0);
- start = extent->start >> PAGE_CACHE_SHIFT;
count = ~0;
skip = 0;
- end = (extent->end >> PAGE_CACHE_SHIFT) + 1;
- if ((end << PAGE_CACHE_SHIFT) < extent->end)
- end = ~0;
+ start = tmpex.l_extent.start >> PAGE_CACHE_SHIFT;
+ end = tmpex.l_extent.end >> PAGE_CACHE_SHIFT;
if (lsm->lsm_stripe_count > 1) {
- struct {
- char name[16];
- struct ldlm_lock *lock;
- struct lov_stripe_md *lsm;
- } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
- __u32 stripe;
- __u32 vallen = sizeof(stripe);
- int rc;
-
- /* get our offset in the lov */
- rc = obd_get_info(ll_i2obdconn(inode), sizeof(key),
- &key, &vallen, &stripe);
- if (rc != 0) {
- CERROR("obd_get_info: rc = %d\n", rc);
- LBUG();
- }
- LASSERT(stripe < lsm->lsm_stripe_count);
-
count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
skip = (lsm->lsm_stripe_count - 1) * count;
- start += (start/count * skip) + (stripe * count);
+ start += start/count * skip + stripe * count;
if (end != ~0)
- end += (end/count * skip) + (stripe * count);
+ end += end/count * skip + stripe * count;
}
+ if (end < tmpex.l_extent.end >> PAGE_CACHE_SHIFT)
+ end = ~0;
i = (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
- if (end >= i)
- clear_bit(LLI_F_HAVE_SIZE_LOCK, &(ll_i2info(inode)->lli_flags));
if (i < end)
end = i;
- CDEBUG(D_INODE, "start: %lu j: %lu count: %lu skip: %lu end: %lu\n",
- start, start % count, count, skip, end);
-
- /* start writeback on dirty pages in the extent when its PW */
- for (i = start, j = start % count;
- lock->l_granted_mode == LCK_PW && i < end; j++, i++) {
+ CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
+ "count: %lu skip: %lu end: %lu%s\n", start, start % count,
+ count, skip, end, discard ? " (DISCARDING)" : "");
+
+ /* this is the simplistic implementation of page eviction at
+ * cancelation. It is careful to get races with other page
+ * lockers handled correctly. fixes from bug 20 will make it
+ * more efficient by associating locks with pages and with
+ * batching writeback under the lock explicitly. */
+ for (i = start, j = start % count; i <= end;
+ j++, i++, tmpex.l_extent.start += PAGE_CACHE_SIZE) {
if (j == count) {
+ CDEBUG(D_PAGE, "skip index %lu to %lu\n", i, i + skip);
i += skip;
j = 0;
+ if (i > end)
+ break;
}
- /* its unlikely, but give us a chance to bail when we're out */
- ll_pgcache_lock(inode->i_mapping);
- if (list_empty(&inode->i_mapping->dirty_pages)) {
- CDEBUG(D_INODE, "dirty list empty\n");
- ll_pgcache_unlock(inode->i_mapping);
+ LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
+ LPU64" >= "LPU64" start %lu i %lu end %lu\n",
+ tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
+ start, i, end);
+
+ if (!mapping_has_pages(inode->i_mapping)) {
+ CDEBUG(D_INODE|D_PAGE, "nothing left\n");
break;
}
- ll_pgcache_unlock(inode->i_mapping);
- if (need_resched())
- schedule();
+ cond_resched();
page = find_get_page(inode->i_mapping, i);
if (page == NULL)
continue;
- if (!PageDirty(page) || TryLockPage(page)) {
- page_cache_release(page);
- continue;
- }
- if (PageDirty(page)) {
- CDEBUG(D_INODE, "writing page %p\n", page);
- ll_pgcache_lock(inode->i_mapping);
- list_del(&page->list);
- list_add(&page->list, &inode->i_mapping->locked_pages);
- ll_pgcache_unlock(inode->i_mapping);
-
- /* this writepage might write out pages outside
- * this extent, but that's ok, the pages are only
- * still dirty because a lock still covers them */
- ClearPageDirty(page);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- ret = inode->i_mapping->a_ops->writepage(page);
-#else
- ret = inode->i_mapping->a_ops->writepage(page, NULL);
-#endif
- if (ret != 0)
- unlock_page(page);
- } else {
- unlock_page(page);
- }
- page_cache_release(page);
-
- }
+ LL_CDEBUG_PAGE(D_PAGE, page, "lock page idx %lu ext "LPU64"\n",
+ i, tmpex.l_extent.start);
+ lock_page(page);
- /* our locks are page granular thanks to osc_enqueue, we invalidate the
- * whole page. */
- LASSERT((extent->start & ~PAGE_CACHE_MASK) == 0);
- LASSERT(((extent->end+1) & ~PAGE_CACHE_MASK) == 0);
- for (i = start, j = start % count ; i < end ; j++, i++) {
- if (j == count) {
- i += skip;
- j = 0;
+ /* page->mapping to check with racing against teardown */
+ if (!discard && clear_page_dirty_for_io(page)) {
+ rc = ll_call_writepage(inode, page);
+ if (rc != 0)
+ CERROR("writepage of page %p failed: %d\n",
+ page, rc);
+ /* either waiting for io to complete or reacquiring
+ * the lock that the failed writepage released */
+ lock_page(page);
}
- ll_pgcache_lock(inode->i_mapping);
- if (list_empty(&inode->i_mapping->dirty_pages) &&
- list_empty(&inode->i_mapping->clean_pages) &&
- list_empty(&inode->i_mapping->locked_pages)) {
- CDEBUG(D_INODE, "nothing left\n");
- ll_pgcache_unlock(inode->i_mapping);
- break;
+
+ tmpex.l_extent.end = tmpex.l_extent.start + PAGE_CACHE_SIZE - 1;
+ /* check to see if another DLM lock covers this page */
+ rc2 = ldlm_lock_match(lock->l_resource->lr_namespace,
+ LDLM_FL_BLOCK_GRANTED|LDLM_FL_CBPENDING |
+ LDLM_FL_TEST_LOCK,
+ &lock->l_resource->lr_name, LDLM_EXTENT,
+ &tmpex, LCK_PR | LCK_PW, &lockh);
+ if (rc2 == 0 && page->mapping != NULL) {
+ // checking again to account for writeback's lock_page()
+ LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
+ ll_truncate_complete_page(page);
}
- ll_pgcache_unlock(inode->i_mapping);
- if (need_resched())
- schedule();
- page = find_get_page(inode->i_mapping, i);
- if (page == NULL)
- continue;
- CDEBUG(D_INODE, "dropping page %p at %lu\n", page, page->index);
- lock_page(page);
- if (page->mapping) /* might have raced */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- truncate_complete_page(page);
-#else
- truncate_complete_page(page->mapping, page);
-#endif
unlock_page(page);
page_cache_release(page);
}
+ LASSERTF(tmpex.l_extent.start <=
+ (lock->l_policy_data.l_extent.end == ~0ULL ? ~0ULL :
+ lock->l_policy_data.l_extent.end + 1),
+ "loop too long "LPU64" > "LPU64" start %lu i %lu end %lu\n",
+ tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
+ start, i, end);
EXIT;
}
struct ldlm_lock_desc *new, void *data,
int flag)
{
- struct inode *inode = data;
- struct ll_inode_info *lli = ll_i2info(inode);
struct lustre_handle lockh = { 0 };
int rc;
ENTRY;
- if ((unsigned long)inode < 0x1000) {
+ if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
LBUG();
}
if (rc != ELDLM_OK)
CERROR("ldlm_cli_cancel failed: %d\n", rc);
break;
- case LDLM_CB_CANCELING:
- /* FIXME: we could be given 'canceling intents' so that we
- * could know to write-back or simply throw away the pages
- * based on if the cancel comes from a desire to, say,
- * read or truncate.. */
- if ((unsigned long)lli->lli_smd < 0x1000) {
- /* note that lli is part of the inode itself, so it
- * is valid if as checked the inode pointer above. */
- CERROR("inode %lu, sb %p, lli %p, lli_smd %p\n",
- inode->i_ino, inode->i_sb, lli, lli->lli_smd);
- LDLM_ERROR(lock, "cancel lock on bad inode %p", inode);
- LBUG();
- }
+ case LDLM_CB_CANCELING: {
+ struct inode *inode;
+ struct ll_inode_info *lli;
+ struct lov_stripe_md *lsm;
+ __u32 stripe;
+ __u64 kms;
- ll_pgcache_remove_extent(inode, lli->lli_smd, lock);
+ /* This lock wasn't granted, don't try to evict pages */
+ if (lock->l_req_mode != lock->l_granted_mode)
+ RETURN(0);
+
+ inode = ll_inode_from_lock(lock);
+ if (inode == NULL)
+ RETURN(0);
+ lli = ll_i2info(inode);
+ if (lli == NULL)
+ goto iput;
+ if (lli->lli_smd == NULL)
+ goto iput;
+ lsm = lli->lli_smd;
+
+ stripe = ll_lock_to_stripe_offset(inode, lock);
+ if (stripe < 0)
+ goto iput;
+ ll_pgcache_remove_extent(inode, lsm, lock, stripe);
+
+ down(&inode->i_sem);
+ kms = ldlm_extent_shift_kms(lock,
+ lsm->lsm_oinfo[stripe].loi_kms);
+
+ if (lsm->lsm_oinfo[stripe].loi_kms != kms)
+ LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
+ lsm->lsm_oinfo[stripe].loi_kms, kms);
+ lsm->lsm_oinfo[stripe].loi_kms = kms;
+ up(&inode->i_sem);
+ //ll_try_done_writing(inode);
+ iput:
+ iput(inode);
break;
+ }
default:
LBUG();
}
RETURN(0);
}
-/*
- * some callers, notably truncate, really don't want i_size set based
- * on the the size returned by the getattr, or lock acquisition in
- * the future.
- */
-int ll_extent_lock_no_validate(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm,
- int mode, struct ldlm_extent *extent,
- struct lustre_handle *lockh)
+#if 0
+int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+{
+ /* XXX ALLOCATE - 160 bytes */
+ struct inode *inode = ll_inode_from_lock(lock);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lustre_handle lockh = { 0 };
+ struct ost_lvb *lvb;
+ __u32 stripe;
+ ENTRY;
+
+ if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV)) {
+ LBUG(); /* not expecting any blocked async locks yet */
+ LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
+ "lock, returning");
+ ldlm_lock_dump(D_OTHER, lock, 0);
+ ldlm_reprocess_all(lock->l_resource);
+ RETURN(0);
+ }
+
+ LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
+
+ stripe = ll_lock_to_stripe_offset(inode, lock);
+ if (stripe < 0)
+ goto iput;
+
+ if (lock->l_lvb_len) {
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ __u64 kms;
+ lvb = lock->l_lvb_data;
+ lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
+
+ down(&inode->i_sem);
+ kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
+ kms = ldlm_extent_shift_kms(NULL, kms);
+ if (lsm->lsm_oinfo[stripe].loi_kms != kms)
+ LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
+ lsm->lsm_oinfo[stripe].loi_kms, kms);
+ lsm->lsm_oinfo[stripe].loi_kms = kms;
+ up(&inode->i_sem);
+ }
+
+iput:
+ iput(inode);
+ wake_up(&lock->l_waitq);
+
+ ldlm_lock2handle(lock, &lockh);
+ ldlm_lock_decref(&lockh, LCK_PR);
+ RETURN(0);
+}
+#endif
+
+static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
+{
+ struct ptlrpc_request *req = reqp;
+ struct inode *inode = ll_inode_from_lock(lock);
+ struct ll_inode_info *lli;
+ struct ost_lvb *lvb;
+ int rc, size = sizeof(*lvb), stripe;
+ ENTRY;
+
+ if (inode == NULL)
+ GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
+ lli = ll_i2info(inode);
+ if (lli == NULL)
+ GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+ if (lli->lli_smd == NULL)
+ GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+
+ /* First, find out which stripe index this lock corresponds to. */
+ stripe = ll_lock_to_stripe_offset(inode, lock);
+ if (stripe < 0)
+ GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+
+ rc = lustre_pack_reply(req, 1, &size, NULL);
+ if (rc) {
+ CERROR("lustre_pack_reply: %d\n", rc);
+ GOTO(iput, rc);
+ }
+
+ lvb = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*lvb));
+ lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe].loi_kms;
+
+ LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64,
+ inode->i_size, stripe, lvb->lvb_size);
+ GOTO(iput, 0);
+ iput:
+ iput(inode);
+
+ out:
+ /* These errors are normal races, so we don't want to fill the console
+ * with messages by calling ptlrpc_error() */
+ if (rc == -ELDLM_NO_LOCK_DATA)
+ lustre_pack_reply(req, 0, NULL, NULL);
+
+ req->rq_status = rc;
+ return rc;
+}
+
+__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
+__u64 lov_merge_blocks(struct lov_stripe_md *lsm);
+__u64 lov_merge_mtime(struct lov_stripe_md *lsm, __u64 current_time);
+
+/* NB: lov_merge_size will prefer locally cached writes if they extend the
+ * file (because it prefers KMS over RSS when larger) */
+int ll_glimpse_size(struct inode *inode, struct ost_lvb *lvb)
{
+ struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc, flags = 0;
+ ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
+ struct lustre_handle lockh = { 0 };
+ int rc, flags = LDLM_FL_HAS_INTENT;
ENTRY;
- LASSERT(lockh->cookie == 0);
+ CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
+ rc = obd_enqueue(sbi->ll_osc_exp, lli->lli_smd, LDLM_EXTENT, &policy,
+ LCK_PR, &flags, ll_extent_lock_callback,
+ ldlm_completion_ast, ll_glimpse_callback, inode,
+ sizeof(*lvb), lustre_swab_ost_lvb, &lockh);
+ if (rc != 0) {
+ CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
+ RETURN(rc > 0 ? -EIO : rc);
+ }
- CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
- inode->i_ino, extent->start, extent->end);
+ lvb->lvb_size = lov_merge_size(lli->lli_smd, 0);
+ inode->i_blocks = lov_merge_blocks(lli->lli_smd);
+ //inode->i_mtime = lov_merge_mtime(lli->lli_smd, inode->i_mtime);
+
+ CDEBUG(D_DLMTRACE, "glimpse: size: "LPU64", blocks: "LPU64"\n",
+ lvb->lvb_size, lvb->lvb_blocks);
- rc = obd_enqueue(&sbi->ll_osc_conn, lsm, NULL, LDLM_EXTENT, extent,
- sizeof(extent), mode, &flags, ll_extent_lock_callback,
- inode, lockh);
+ obd_cancel(sbi->ll_osc_exp, lli->lli_smd, LCK_PR, &lockh);
RETURN(rc);
}
-/*
- * this grabs a lock and manually implements behaviour that makes it look like
- * the OST is returning the file size with each lock acquisition.
- */
int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
struct lov_stripe_md *lsm, int mode,
- struct ldlm_extent *extent, struct lustre_handle *lockh)
+ ldlm_policy_data_t *policy, struct lustre_handle *lockh,
+ int ast_flags)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ldlm_extent size_lock;
- struct lustre_handle match_lockh = {0};
- int flags, rc, matched;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int rc;
ENTRY;
- rc = ll_extent_lock_no_validate(fd, inode, lsm, mode, extent, lockh);
- if (rc != ELDLM_OK)
- RETURN(rc);
+ LASSERT(lockh->cookie == 0);
- if (test_bit(LLI_F_HAVE_SIZE_LOCK, &lli->lli_flags))
+ /* XXX phil: can we do this? won't it screw the file size up? */
+ if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
+ (sbi->ll_flags & LL_SBI_NOLCK))
RETURN(0);
- rc = ll_inode_getattr(inode, lsm, fd ? &fd->fd_ost_och : NULL);
- if (rc) {
- ll_extent_unlock(fd, inode, lsm, mode, lockh);
- RETURN(rc);
- }
+ CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
+ inode->i_ino, policy->l_extent.start, policy->l_extent.end);
- size_lock.start = inode->i_size;
- size_lock.end = OBD_OBJECT_EOF;
-
- /* XXX I bet we should be checking the lock ignore flags.. */
- flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED | LDLM_FL_MATCH_DATA;
- matched = obd_match(&ll_i2sbi(inode)->ll_osc_conn, lsm, LDLM_EXTENT,
- &size_lock, sizeof(size_lock), LCK_PR, &flags,
- inode, &match_lockh);
-
- /* hey, alright, we hold a size lock that covers the size we
- * just found, its not going to change for a while.. */
- if (matched == 1) {
- set_bit(LLI_F_HAVE_SIZE_LOCK, &lli->lli_flags);
- obd_cancel(&ll_i2sbi(inode)->ll_osc_conn, lsm, LCK_PR,
- &match_lockh);
- }
+ rc = obd_enqueue(sbi->ll_osc_exp, lsm, LDLM_EXTENT, policy, mode,
+ &ast_flags, ll_extent_lock_callback,
+ ldlm_completion_ast, ll_glimpse_callback, inode,
+ sizeof(struct ost_lvb), lustre_swab_ost_lvb, lockh);
+ if (rc > 0)
+ rc = -EIO;
- RETURN(0);
+ if (policy->l_extent.start == 0 &&
+ policy->l_extent.end == OBD_OBJECT_EOF)
+ inode->i_size = lov_merge_size(lsm, 1);
+
+ //inode->i_mtime = lov_merge_mtime(lsm, inode->i_mtime);
+
+ RETURN(rc);
}
int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm, int mode,
- struct lustre_handle *lockh)
+ struct lov_stripe_md *lsm, int mode,
+ struct lustre_handle *lockh)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
int rc;
(sbi->ll_flags & LL_SBI_NOLCK))
RETURN(0);
- rc = obd_cancel(&sbi->ll_osc_conn, lsm, mode, lockh);
+ rc = obd_cancel(sbi->ll_osc_exp, lsm, mode, lockh);
RETURN(rc);
}
struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm = lli->lli_smd;
struct lustre_handle lockh = { 0 };
- struct ll_read_extent rextent;
- ldlm_error_t err;
+ ldlm_policy_data_t policy;
+ int rc;
ssize_t retval;
+ __u64 kms;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
inode->i_ino, inode->i_generation, inode, count, *ppos);
if (!lsm)
RETURN(0);
- /* grab a -> eof extent to push extending writes out of node's caches
- * so we can see them at the getattr after lock acquisition. this will
- * turn into a seperate [*ppos + count, EOF] 'size intent' lock attempt
- * in the future. */
- rextent.re_extent.start = *ppos;
- rextent.re_extent.end = OBD_OBJECT_EOF;
+ policy.l_extent.start = *ppos;
+ policy.l_extent.end = *ppos + count - 1;
+
+ rc = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
+ (filp->f_flags & O_NONBLOCK) ?
+ LDLM_FL_BLOCK_NOWAIT: 0);
+ if (rc != 0)
+ RETURN(rc);
- err = ll_extent_lock(fd, inode, lsm, LCK_PR, &rextent.re_extent,&lockh);
- if (err != ELDLM_OK)
- RETURN(-ENOLCK);
+ kms = lov_merge_size(lsm, 1);
+ if (*ppos + count - 1 > kms) {
+ /* A glimpse is necessary to determine whether we return a short
+ * read or some zeroes at the end of the buffer */
+ struct ost_lvb lvb;
+ retval = ll_glimpse_size(inode, &lvb);
+ if (retval)
+ goto out;
+ inode->i_size = lvb.lvb_size;
+ } else {
+ inode->i_size = kms;
+ }
- /* XXX tell ll_readpage what pages have a PR lock.. */
- rextent.re_task = current;
- spin_lock(&lli->lli_read_extent_lock);
- list_add(&rextent.re_lli_item, &lli->lli_read_extents);
- spin_unlock(&lli->lli_read_extent_lock);
+ CDEBUG(D_INFO, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
+ inode->i_ino, count, *ppos, inode->i_size);
- CDEBUG(D_INFO, "Reading inode %lu, "LPSZ" bytes, offset %Ld\n",
- inode->i_ino, count, *ppos);
+ /* turn off the kernel's read-ahead */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+ filp->f_ramax = 0;
+#else
+ filp->f_ra.ra_pages = 0;
+#endif
retval = generic_file_read(filp, buf, count, ppos);
- spin_lock(&lli->lli_read_extent_lock);
- list_del(&rextent.re_lli_item);
- spin_unlock(&lli->lli_read_extent_lock);
-
- /* XXX errors? */
+ out:
ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
RETURN(retval);
}
struct inode *inode = file->f_dentry->d_inode;
struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
struct lustre_handle lockh = { 0 };
- struct ldlm_extent extent;
+ ldlm_policy_data_t policy;
loff_t maxbytes = ll_file_maxbytes(inode);
- ldlm_error_t err;
ssize_t retval;
- char should_validate = 1;
+ int nonblock = 0, rc;
ENTRY;
+ if (file->f_flags & O_NONBLOCK)
+ nonblock = LDLM_FL_BLOCK_NOWAIT;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
inode->i_ino, inode->i_generation, inode, count, *ppos);
SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
- /*
- * sleep doing some writeback work of this mount's dirty data
- * if the VM thinks we're low on memory.. other dirtying code
- * paths should think about doing this, too, but they should be
- * careful not to hold locked pages while they do so. like
- * ll_prepare_write. *cough*
- */
- ll_check_dirty(inode->i_sb);
/* POSIX, but surprised the VFS doesn't check this already */
if (count == 0)
RETURN(0);
+ /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
+ * called on the file, don't fail the below assertion (bug 2388). */
+ if (file->f_flags & O_LOV_DELAY_CREATE && lsm == NULL)
+ RETURN(-EBADF);
+
LASSERT(lsm);
if (file->f_flags & O_APPEND) {
- extent.start = 0;
- extent.end = OBD_OBJECT_EOF;
+ policy.l_extent.start = 0;
+ policy.l_extent.end = OBD_OBJECT_EOF;
} else {
- extent.start = *ppos;
- extent.end = *ppos + count - 1;
- /* we really don't care what i_size is if we're doing
- * fully page aligned writes */
- if ((*ppos & ~PAGE_CACHE_MASK) == 0 &&
- (count & ~PAGE_CACHE_MASK) == 0)
- should_validate = 0;
+ policy.l_extent.start = *ppos;
+ policy.l_extent.end = *ppos + count - 1;
}
- if (should_validate)
- err = ll_extent_lock(fd, inode, lsm, LCK_PW, &extent, &lockh);
- else
- err = ll_extent_lock_no_validate(fd, inode, lsm, LCK_PW,
- &extent, &lockh);
- if (err != ELDLM_OK)
- RETURN(-ENOLCK);
+ rc = ll_extent_lock(fd, inode, lsm, LCK_PW, &policy, &lockh, nonblock);
+ if (rc != 0)
+ RETURN(rc);
/* this is ok, g_f_w will overwrite this under i_sem if it races
* with a local truncate, it just makes our maxbyte checking easier */
retval = generic_file_write(file, buf, count, ppos);
out:
- /* XXX errors? */
- lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_WRITE_BYTES,
- retval);
ll_extent_unlock(fd, inode, lsm, LCK_PW, &lockh);
+ lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_WRITE_BYTES,
+ retval > 0 ? retval : 0);
RETURN(retval);
}
-static int ll_lov_setstripe(struct inode *inode, struct file *file,
- unsigned long arg)
+static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_export *exp = ll_i2obdexp(inode);
+ struct ll_recreate_obj ucreatp;
+ struct obd_trans_info oti = { 0 };
+ struct obdo *oa = NULL;
+ int lsm_size;
+ int rc = 0;
+ struct lov_stripe_md *lsm, *lsm2;
+ ENTRY;
+
+ if (!capable (CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+
+ rc = copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
+ sizeof(struct ll_recreate_obj));
+ if (rc) {
+ RETURN(-EFAULT);
+ }
+ oa = obdo_alloc();
+ if (oa == NULL) {
+ RETURN(-ENOMEM);
+ }
+
+ down(&lli->lli_open_sem);
+ lsm = lli->lli_smd;
+ if (lsm == NULL) {
+ up(&lli->lli_open_sem);
+ obdo_free(oa);
+ RETURN (-ENOENT);
+ }
+ lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
+ (lsm->lsm_stripe_count));
+
+ OBD_ALLOC(lsm2, lsm_size);
+ if (lsm2 == NULL) {
+ up(&lli->lli_open_sem);
+ obdo_free(oa);
+ RETURN(-ENOMEM);
+ }
+
+ oa->o_id = ucreatp.lrc_id;
+ oa->o_nlink = ucreatp.lrc_ost_idx;
+ oa->o_gr = ucreatp.lrc_group;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
+ oa->o_flags |= OBD_FL_RECREATE_OBJS;
+ obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+
+ oti.oti_objid = NULL;
+ memcpy(lsm2, lsm, lsm_size);
+ rc = obd_create(exp, oa, &lsm2, &oti);
+
+ up(&lli->lli_open_sem);
+ OBD_FREE(lsm2, lsm_size);
+ obdo_free(oa);
+ RETURN (rc);
+}
+
+static int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
+ int flags, struct lov_user_md *lum,
+ int lum_size)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct lustre_handle *conn = ll_i2obdconn(inode);
+ struct file *f;
+ struct obd_export *exp = ll_i2obdexp(inode);
struct lov_stripe_md *lsm;
- int rc;
+ struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
+ struct ptlrpc_request *req = NULL;
+ int rc = 0;
+ struct lustre_md md;
ENTRY;
down(&lli->lli_open_sem);
up(&lli->lli_open_sem);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
- /* If we haven't already done the open, do so now */
- if (file->f_flags & O_LOV_DELAY_CREATE) {
- int rc2 = ll_osc_open(conn, inode, file, lsm);
- if (rc2)
- RETURN(rc2);
- }
-
RETURN(-EEXIST);
}
- rc = obd_iocontrol(LL_IOC_LOV_SETSTRIPE, conn, 0, &lsm, (void *)arg);
- if (rc) {
- up(&lli->lli_open_sem);
- RETURN(rc);
- }
- rc = ll_create_obj(conn, inode, file, lsm);
+ f = get_empty_filp();
+ if (!f)
+ GOTO(out, -ENOMEM);
+
+ f->f_dentry = file->f_dentry;
+ f->f_vfsmnt = file->f_vfsmnt;
+
+ rc = ll_intent_file_open(f, lum, lum_size, &oit);
+ if (rc)
+ GOTO(out, rc);
+ if (it_disposition(&oit, DISP_LOOKUP_NEG))
+ GOTO(out, -ENOENT);
+ req = oit.d.lustre.it_data;
+ rc = oit.d.lustre.it_status;
+
+ if (rc < 0)
+ GOTO(out, rc);
+
+ rc = mdc_req2lustre_md(ll_i2mdcexp(inode), req, 1, exp, &md);
+ if (rc)
+ GOTO(out, rc);
+ ll_update_inode(f->f_dentry->d_inode, &md);
+
+ rc = ll_local_open(f, &oit);
+ if (rc)
+ GOTO(out, rc);
+ ll_intent_release(&oit);
+
+ rc = ll_file_release(f->f_dentry->d_inode, f);
+
+ out:
+ if (f)
+ put_filp(f);
up(&lli->lli_open_sem);
+ if (req != NULL)
+ ptlrpc_req_finished(req);
+ RETURN(rc);
+}
+
+static int ll_lov_setea(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
+ struct lov_user_md *lump;
+ int lum_size = sizeof(struct lov_user_md) +
+ sizeof(struct lov_user_ost_data);
+ int rc;
+ ENTRY;
+
+ if (!capable (CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+ OBD_ALLOC(lump, lum_size);
+ if (lump == NULL) {
+ RETURN(-ENOMEM);
+ }
+ rc = copy_from_user(lump, (struct lov_user_md *)arg, lum_size);
if (rc) {
- obd_free_memmd(conn, &lsm);
- RETURN(rc);
+ OBD_FREE(lump, lum_size);
+ RETURN(-EFAULT);
}
- rc = ll_osc_open(conn, inode, file, lli->lli_smd);
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
+
+ OBD_FREE(lump, lum_size);
+ RETURN(rc);
+}
+
+static int ll_lov_setstripe(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
+ int rc;
+ int flags = FMODE_WRITE;
+ ENTRY;
+
+ /* Bug 1152: copy properly when this is no longer true */
+ LASSERT(sizeof(lum) == sizeof(*lump));
+ LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
+ rc = copy_from_user(&lum, lump, sizeof(lum));
+ if (rc)
+ RETURN(-EFAULT);
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, &lum, sizeof(lum));
RETURN(rc);
}
static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
{
struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
- struct lustre_handle *conn = ll_i2obdconn(inode);
if (!lsm)
RETURN(-ENODATA);
- return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, conn, 0, lsm, (void *)arg);
+ return obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2obdexp(inode), 0, lsm,
+ (void *)arg);
+}
+
+static int ll_get_grouplock(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct ll_file_data *fd = file->private_data;
+ ldlm_policy_data_t policy = { .l_extent = { .start = 0,
+ .end = OBD_OBJECT_EOF}};
+ struct lustre_handle lockh = { 0 };
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ int flags = 0, rc;
+ ENTRY;
+
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ RETURN(-EINVAL);
+ }
+
+ policy.l_extent.gid = arg;
+ if (file->f_flags & O_NONBLOCK)
+ flags = LDLM_FL_BLOCK_NOWAIT;
+
+ rc = ll_extent_lock(fd, inode, lsm, LCK_GROUP, &policy, &lockh, flags);
+ if (rc != 0)
+ RETURN(rc);
+
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK;
+ fd->fd_gid = arg;
+ memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
+
+ RETURN(0);
+}
+
+static int ll_put_grouplock(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct ll_file_data *fd = file->private_data;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ int rc;
+ ENTRY;
+
+ if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ /* Ugh, it's already unlocked. */
+ RETURN(-EINVAL);
+ }
+
+ if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
+ RETURN(-EINVAL);
+
+ fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
+
+ rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
+ if (rc)
+ RETURN(rc);
+
+ fd->fd_gid = 0;
+ memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
+
+ RETURN(0);
}
int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
struct ll_file_data *fd = file->private_data;
- struct lustre_handle *conn;
int flags;
+ ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%u\n", inode->i_ino,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
inode->i_generation, inode, cmd);
if (_IOC_TYPE(cmd) == 'T') /* tty ioctls */
- return -ENOTTY;
+ RETURN(-ENOTTY);
lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_IOCTL);
switch(cmd) {
* not abused, and to handle any flag side effects.
*/
if (get_user(flags, (int *) arg))
- return -EFAULT;
+ RETURN(-EFAULT);
if (cmd == LL_IOC_SETFLAGS)
fd->fd_flags |= flags;
else
fd->fd_flags &= ~flags;
- return 0;
+ RETURN(0);
case LL_IOC_LOV_SETSTRIPE:
- return ll_lov_setstripe(inode, file, arg);
+ RETURN(ll_lov_setstripe(inode, file, arg));
+ case LL_IOC_LOV_SETEA:
+ RETURN(ll_lov_setea(inode, file, arg));
case LL_IOC_LOV_GETSTRIPE:
- return ll_lov_getstripe(inode, arg);
-
+ RETURN(ll_lov_getstripe(inode, arg));
+ case LL_IOC_RECREATE_OBJ:
+ RETURN(ll_lov_recreate_obj(inode, file, arg));
+ case EXT3_IOC_GETFLAGS:
+ case EXT3_IOC_SETFLAGS:
+ RETURN( ll_iocontrol(inode, file, cmd, arg) );
+ case LL_IOC_GROUP_LOCK:
+ RETURN(ll_get_grouplock(inode, file, arg));
+ case LL_IOC_GROUP_UNLOCK:
+ RETURN(ll_put_grouplock(inode, file, arg));
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
* network encode the arg field.
- case EXT2_IOC_GETFLAGS:
- case EXT2_IOC_SETFLAGS:
case EXT2_IOC_GETVERSION_OLD:
case EXT2_IOC_GETVERSION_NEW:
case EXT2_IOC_SETVERSION_OLD:
case EXT2_IOC_SETVERSION_NEW:
*/
default:
- conn = ll_i2obdconn(inode);
- return obd_iocontrol(cmd, conn, 0, NULL, (void *)arg);
+ RETURN( obd_iocontrol(cmd, ll_i2obdexp(inode), 0, NULL,
+ (void *)arg) );
}
}
lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_LLSEEK);
if (origin == 2) { /* SEEK_END */
- ldlm_error_t err;
- struct ldlm_extent extent = {0, OBD_OBJECT_EOF};
- err = ll_extent_lock(fd, inode, lsm, LCK_PR, &extent, &lockh);
- if (err != ELDLM_OK)
- RETURN(-ENOLCK);
+ int nonblock = 0, rc;
+ ldlm_policy_data_t policy = { .l_extent = {0, OBD_OBJECT_EOF }};
+
+ if (file->f_flags & O_NONBLOCK)
+ nonblock = LDLM_FL_BLOCK_NOWAIT;
+
+ rc = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
+ nonblock);
+ if (rc != 0)
+ RETURN(rc);
offset += inode->i_size;
} else if (origin == 1) { /* SEEK_CUR */
int ll_fsync(struct file *file, struct dentry *dentry, int data)
{
struct inode *inode = dentry->d_inode;
- int rc;
+ struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
+ struct ll_fid fid;
+ struct ptlrpc_request *req;
+ int rc, err;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_FSYNC);
- /*
- * filemap_fdata{sync,wait} are also called at PW lock cancelation so
- * we know that they can only find data to writeback here if we are
- * still holding the PW lock that covered the dirty pages. XXX we
- * should probably get a reference on it, though, just to be clear.
- */
- rc = filemap_fdatasync(inode->i_mapping);
- if (rc == 0)
- rc = filemap_fdatawait(inode->i_mapping);
+ /* fsync's caller has already called _fdata{sync,write}, we want
+ * that IO to finish before calling the osc and mdc sync methods */
+ rc = filemap_fdatawait(inode->i_mapping);
+
+ ll_inode2fid(&fid, inode);
+ err = md_sync(ll_i2sbi(inode)->ll_mdc_exp, &fid, &req);
+ if (!rc)
+ rc = err;
+ if (!err)
+ ptlrpc_req_finished(req);
+
+ if (data && lsm) {
+ struct obdo *oa = obdo_alloc();
+
+ if (!oa)
+ RETURN(rc ? rc : -ENOMEM);
+
+ oa->o_id = lsm->lsm_object_id;
+ oa->o_gr = lsm->lsm_object_gr;
+ oa->o_valid = OBD_MD_FLID;
+ obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLGROUP);
+
+ err = obd_sync(ll_i2sbi(inode)->ll_osc_exp, oa, lsm,
+ 0, OBD_OBJECT_EOF);
+ if (!rc)
+ rc = err;
+ obdo_free(oa);
+ }
+
+ RETURN(rc);
+}
+
+int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obd_device *obddev;
+ struct ldlm_res_id res_id =
+ { .name = {inode->i_ino, inode->i_generation, LDLM_FLOCK} };
+ struct lustre_handle lockh = {0};
+ ldlm_policy_data_t flock;
+ ldlm_mode_t mode = 0;
+ int flags = 0;
+ int rc;
+ ENTRY;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
+ inode->i_ino, file_lock);
+
+ flock.l_flock.pid = file_lock->fl_pid;
+ flock.l_flock.start = file_lock->fl_start;
+ flock.l_flock.end = file_lock->fl_end;
+
+ switch (file_lock->fl_type) {
+ case F_RDLCK:
+ mode = LCK_PR;
+ break;
+ case F_UNLCK:
+ /* An unlock request may or may not have any relation to
+ * existing locks so we may not be able to pass a lock handle
+ * via a normal ldlm_lock_cancel() request. The request may even
+ * unlock a byte range in the middle of an existing lock. In
+ * order to process an unlock request we need all of the same
+ * information that is given with a normal read or write record
+ * lock request. To avoid creating another ldlm unlock (cancel)
+ * message we'll treat a LCK_NL flock request as an unlock. */
+ mode = LCK_NL;
+ break;
+ case F_WRLCK:
+ mode = LCK_PW;
+ break;
+ default:
+ CERROR("unknown fcntl lock type: %d\n", file_lock->fl_type);
+ LBUG();
+ }
+
+ switch (cmd) {
+ case F_SETLKW:
+#ifdef F_SETLKW64
+ case F_SETLKW64:
+#endif
+ flags = 0;
+ break;
+ case F_SETLK:
+#ifdef F_SETLK64
+ case F_SETLK64:
+#endif
+ flags = LDLM_FL_BLOCK_NOWAIT;
+ break;
+ case F_GETLK:
+#ifdef F_GETLK64
+ case F_GETLK64:
+#endif
+ flags = LDLM_FL_TEST_LOCK;
+ /* Save the old mode so that if the mode in the lock changes we
+ * can decrement the appropriate reader or writer refcount. */
+ file_lock->fl_type = mode;
+ break;
+ default:
+ CERROR("unknown fcntl lock command: %d\n", cmd);
+ LBUG();
+ }
+
+ CDEBUG(D_DLMTRACE, "inode=%lu, pid="LPU64", flags=%#x, mode=%u, "
+ "start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
+ flags, mode, flock.l_flock.start, flock.l_flock.end);
+
+ obddev = md_get_real_obd(sbi->ll_mdc_exp, NULL, 0);
+ rc = ldlm_cli_enqueue(obddev->obd_self_export, NULL,
+ obddev->obd_namespace,
+ res_id, LDLM_FLOCK, &flock, mode, &flags,
+ NULL, ldlm_flock_completion_ast, NULL, file_lock,
+ NULL, 0, NULL, &lockh);
RETURN(rc);
}
int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
{
struct inode *inode = dentry->d_inode;
+ struct ll_inode_info *lli;
struct lov_stripe_md *lsm;
+ struct ll_fid fid;
+ int rc;
ENTRY;
if (!inode) {
CERROR("REPORT THIS LINE TO PETER\n");
RETURN(0);
}
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n",
- inode->i_ino, inode->i_generation, inode, dentry->d_name.name);
+ ll_inode2fid(&fid, inode);
+ lli = ll_i2info(inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s,intent=%s\n",
+ inode->i_ino, inode->i_generation, inode, dentry->d_name.name,
+ LL_IT2STR(it));
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_REVALIDATE);
#endif
- /* this is very tricky. it is unsafe to call ll_have_md_lock
- when we have a referenced lock: because it may cause an RPC
- below when the lock is marked CB_PENDING. That RPC may not
- go out because someone else may be in another RPC waiting for
- that lock*/
- if (!(it && it->it_lock_mode) && !ll_have_md_lock(dentry)) {
- struct lustre_md md;
+ if (!md_valid_attrs(ll_i2mdcexp(inode), &fid)) {
struct ptlrpc_request *req = NULL;
struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
struct ll_fid fid;
unsigned long valid = 0;
- int rc;
int ealen = 0;
if (S_ISREG(inode->i_mode)) {
- ealen = obd_size_diskmd(&sbi->ll_osc_conn, NULL);
+ ealen = obd_size_diskmd(sbi->ll_osc_exp, NULL);
valid |= OBD_MD_FLEASIZE;
}
ll_inode2fid(&fid, inode);
- rc = mdc_getattr(&sbi->ll_mdc_conn, &fid, valid, ealen, &req);
+ rc = md_getattr(sbi->ll_mdc_exp, &fid, valid, ealen, &req);
if (rc) {
CERROR("failure %d inode %lu\n", rc, inode->i_ino);
RETURN(-abs(rc));
}
- rc = mdc_req2lustre_md(req, 0, &sbi->ll_osc_conn, &md);
-
- /* XXX Too paranoid? */
- if ((md.body->valid ^ valid) & OBD_MD_FLEASIZE)
- CERROR("Asked for %s eadata but got %s\n",
- (valid & OBD_MD_FLEASIZE) ? "some" : "no",
- (md.body->valid & OBD_MD_FLEASIZE) ? "some":
- "none");
+ rc = ll_prep_inode(sbi->ll_osc_exp, sbi->ll_mdc_exp,
+ &inode, req, 0, NULL);
if (rc) {
ptlrpc_req_finished(req);
RETURN(rc);
}
-
- ll_update_inode(inode, md.body, md.lsm);
- if (md.lsm != NULL && ll_i2info(inode)->lli_smd != md.lsm)
- obd_free_memmd(&sbi->ll_osc_conn, &md.lsm);
-
ptlrpc_req_finished(req);
}
- lsm = ll_i2info(inode)->lli_smd;
- if (!lsm) /* object not yet allocated, don't validate size */
+ lsm = lli->lli_smd;
+ if (lsm == NULL) /* object not yet allocated, don't validate size */
RETURN(0);
- /*
- * unfortunately stat comes in through revalidate and we don't
- * differentiate this use from initial instantiation. we're
- * also being wildly conservative and flushing write caches
- * so that stat really returns the proper size.
- */
+ /* ll_glimpse_size will prefer locally cached writes if they extend
+ * the file */
{
- struct ldlm_extent extent = {0, OBD_OBJECT_EOF};
- struct lustre_handle lockh = {0};
- ldlm_error_t err;
-
- err = ll_extent_lock(NULL, inode, lsm, LCK_PR, &extent, &lockh);
- if (err != ELDLM_OK)
- RETURN(err);
+ struct ost_lvb lvb;
- ll_extent_unlock(NULL, inode, lsm, LCK_PR, &lockh);
+ rc = ll_glimpse_size(inode, &lvb);
+ inode->i_size = lvb.lvb_size;
}
- RETURN(0);
+ RETURN(rc);
}
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
int ll_getattr(struct vfsmount *mnt, struct dentry *de,
- struct lookup_intent *it,
- struct kstat *stat)
+ struct lookup_intent *it, struct kstat *stat)
{
int res = 0;
struct inode *inode = de->d_inode;
#endif
struct file_operations ll_file_operations = {
- read: ll_file_read,
- write: ll_file_write,
- ioctl: ll_file_ioctl,
- open: ll_file_open,
- release: ll_file_release,
- mmap: generic_file_mmap,
- llseek: ll_file_seek,
- fsync: ll_fsync,
+ .read = ll_file_read,
+ .write = ll_file_write,
+ .ioctl = ll_file_ioctl,
+ .open = ll_file_open,
+ .release = ll_file_release,
+ .mmap = generic_file_mmap,
+ .llseek = ll_file_seek,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+ .sendfile = generic_file_sendfile,
+#endif
+ .fsync = ll_fsync,
+ .lock = ll_file_flock
};
struct inode_operations ll_file_inode_operations = {
- setattr_raw: ll_setattr_raw,
- setattr: ll_setattr,
- truncate: ll_truncate,
+ .setattr_raw = ll_setattr_raw,
+ .setattr = ll_setattr,
+ .truncate = ll_truncate,
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
- getattr_it: ll_getattr,
+ .getattr_it = ll_getattr,
#else
- revalidate_it: ll_inode_revalidate_it,
+ .revalidate_it = ll_inode_revalidate_it,
#endif
};
-struct inode_operations ll_special_inode_operations = {
- setattr_raw: ll_setattr_raw,
- setattr: ll_setattr,
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
- getattr_it: ll_getattr,
-#else
- revalidate_it: ll_inode_revalidate_it,
-#endif
-};