#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/lustre_dlm.h>
#include <linux/lustre_lite.h>
-#include <linux/obd_lov.h> /* for lov_mds_md_size() in lov_setstripe() */
-#include <linux/random.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
#include <linux/lustre_compat25.h>
#endif
#include "llite_internal.h"
+#include <linux/obd_lov.h>
int ll_mdc_close(struct obd_export *mdc_exp, struct inode *inode,
- struct file *file)
+ struct file *file)
{
struct ll_file_data *fd = file->private_data;
struct ptlrpc_request *req = NULL;
struct obd_client_handle *och = &fd->fd_mds_och;
- struct ll_inode_info *lli = ll_i2info(inode);
struct obdo obdo;
- int rc, valid;
+ int rc;
ENTRY;
- valid = OBD_MD_FLID;
- if (test_bit(LLI_F_HAVE_OST_SIZE_LOCK, &lli->lli_flags))
- valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+ /* clear group lock, if present */
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
+ fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
+ rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP,
+ &fd->fd_cwlockh);
+ }
- memset(&obdo, 0, sizeof(obdo));
obdo.o_id = inode->i_ino;
- obdo.o_mode = inode->i_mode;
- obdo.o_size = inode->i_size;
- obdo.o_blocks = inode->i_blocks;
+ obdo.o_valid = OBD_MD_FLID;
+ obdo_from_inode(&obdo, inode, OBD_MD_FLTYPE | OBD_MD_FLMODE |
+ OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME);
if (0 /* ll_is_inode_dirty(inode) */) {
obdo.o_flags = MDS_BFLAG_UNCOMMITTED_WRITES;
- valid |= OBD_MD_FLFLAGS;
+ obdo.o_valid |= OBD_MD_FLFLAGS;
}
- obdo.o_valid = valid;
- rc = mdc_close(mdc_exp, &obdo, och, &req);
+ obdo.o_mds = ll_i2info(inode)->lli_mds;
+ rc = md_close(mdc_exp, &obdo, och, &req);
+
if (rc == EAGAIN) {
/* We are the last writer, so the MDS has instructed us to get
* the file size and any write cookies, then close again. */
inode->i_ino, rc);
}
if (rc == 0) {
- rc = ll_objects_destroy(req, file->f_dentry->d_inode);
+ rc = ll_objects_destroy(req, file->f_dentry->d_inode, 1);
if (rc)
CERROR("inode %lu ll_objects destroy: rc = %d\n",
inode->i_ino, rc);
}
- mdc_clear_open_replay_data(och);
+ mdc_clear_open_replay_data(mdc_exp, och);
ptlrpc_req_finished(req);
och->och_fh.cookie = DEAD_HANDLE_MAGIC;
file->private_data = NULL;
int rc;
ENTRY;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%u/%lu/%u(%p)\n",
+ ll_i2info(inode)->lli_mds, inode->i_ino,
inode->i_generation, inode);
/* don't do anything for / */
ll_prepare_mdc_op_data(&data, parent->d_inode, NULL, name, len, O_RDWR);
- rc = mdc_enqueue(sbi->ll_mdc_exp, LDLM_PLAIN, itp, LCK_PR, &data,
- &lockh, lmm, lmmsize, ldlm_completion_ast,
- ll_mdc_blocking_ast, parent->d_inode);
- if (rc < 0)
+ rc = md_enqueue(sbi->ll_mdc_exp, LDLM_IBITS, itp, LCK_PR, &data,
+ &lockh, lmm, lmmsize, ldlm_completion_ast,
+ ll_mdc_blocking_ast, NULL);
+ if (rc == 0) {
+ if (itp->d.lustre.it_lock_mode)
+ memcpy(&itp->d.lustre.it_lock_handle,
+ &lockh, sizeof(lockh));
+ } else if (rc < 0) {
CERROR("lock enqueue: err: %d\n", rc);
+ }
+
RETURN(rc);
}
{
struct ptlrpc_request *req = it->d.lustre.it_data;
struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct obd_export *mdc_exp = ll_i2mdcexp(file->f_dentry->d_inode);
struct ll_file_data *fd;
struct mds_body *body;
ENTRY;
memcpy(&fd->fd_mds_och.och_fh, &body->handle, sizeof(body->handle));
fd->fd_mds_och.och_magic = OBD_CLIENT_HANDLE_MAGIC;
file->private_data = fd;
+ ll_readahead_init(file->f_dentry->d_inode, &fd->fd_ras);
lli->lli_io_epoch = body->io_epoch;
- mdc_set_open_replay_data(&fd->fd_mds_och, it->d.lustre.it_data);
+ mdc_set_open_replay_data(mdc_exp, &fd->fd_mds_och, it->d.lustre.it_data);
RETURN(0);
}
int ll_file_open(struct inode *inode, struct file *file)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct lookup_intent *it;
+ struct lookup_intent *it, oit = { .it_op = IT_OPEN,
+ .it_flags = file->f_flags };
struct lov_stripe_md *lsm;
struct ptlrpc_request *req;
int rc = 0;
it = file->f_it;
- if (!it->d.lustre.it_disposition) {
- struct lookup_intent oit = { .it_op = IT_OPEN,
- .it_flags = file->f_flags };
+ if (!it || !it->d.lustre.it_disposition) {
it = &oit;
rc = ll_intent_file_open(file, NULL, 0, it);
if (rc)
memset(oa, 0, sizeof *oa);
oa->o_id = lsm->lsm_object_id;
+ oa->o_gr = lsm->lsm_object_gr;
oa->o_mode = S_IFREG;
oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLSIZE |
OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME;
+ OBD_MD_FLCTIME | OBD_MD_FLGROUP;
set = ptlrpc_prep_set();
if (set == NULL) {
if (rc)
RETURN(rc);
- oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
+ oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
OBD_MD_FLCTIME | OBD_MD_FLSIZE);
RETURN(0);
}
}
}
-/* Flush the page cache for an extent as its canceled. No one can dirty the
- * extent until we've finished our work and they can enqueue another lock.
- * The DLM protects us from ll_file_read/write here, but other kernel actors
- * could have pages locked */
-void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
- struct ldlm_lock *lock)
+static int ll_lock_to_stripe_offset(struct inode *inode, struct ldlm_lock *lock)
{
- struct ldlm_extent *extent = &lock->l_policy_data.l_extent;
- struct obd_export *exp = ll_i2obdexp(inode);
struct ll_inode_info *lli = ll_i2info(inode);
- unsigned long start, end, i;
- struct page *page;
- int rc, discard = lock->l_flags & LDLM_FL_DISCARD_DATA;
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ struct obd_export *exp = ll_i2obdexp(inode);
+ struct {
+ char name[16];
+ struct ldlm_lock *lock;
+ struct lov_stripe_md *lsm;
+ } key = { .name = "lock_to_stripe", .lock = lock, .lsm = lsm };
+ __u32 stripe, vallen = sizeof(stripe);
+ int rc;
ENTRY;
- CDEBUG(D_INODE, "obdo %lu inode %p ["LPU64"->"LPU64"] size: %llu\n",
- inode->i_ino, inode, extent->start, extent->end, inode->i_size);
+ if (lsm->lsm_stripe_count == 1)
+ GOTO(check, stripe = 0);
+
+ /* get our offset in the lov */
+ rc = obd_get_info(exp, sizeof(key), &key, &vallen, &stripe);
+ if (rc != 0) {
+ CERROR("obd_get_info: rc = %d\n", rc);
+ RETURN(rc);
+ }
+ LASSERT(stripe < lsm->lsm_stripe_count);
+
+check:
+ if (lsm->lsm_oinfo[stripe].loi_id != lock->l_resource->lr_name.name[0]||
+ lsm->lsm_oinfo[stripe].loi_gr != lock->l_resource->lr_name.name[2]){
+ LDLM_ERROR(lock, "resource doesn't match object "LPU64"/"LPU64
+ " inode=%lu/%u (%p)\n",
+ lsm->lsm_oinfo[stripe].loi_id,
+ lsm->lsm_oinfo[stripe].loi_gr,
+ inode->i_ino, inode->i_generation, inode);
+ RETURN(-ELDLM_NO_LOCK_DATA);
+ }
+
+ RETURN(stripe);
+}
- start = extent->start >> PAGE_CACHE_SHIFT;
- end = (extent->end >> PAGE_CACHE_SHIFT) + 1;
- if ((end << PAGE_CACHE_SHIFT) < extent->end)
+/* Flush the page cache for an extent as its canceled. When we're on an LOV,
+ * we get a lock cancellation for each stripe, so we have to map the obd's
+ * region back onto the stripes in the file that it held.
+ *
+ * No one can dirty the extent until we've finished our work and they can
+ * enqueue another lock. The DLM protects us from ll_file_read/write here,
+ * but other kernel actors could have pages locked.
+ *
+ * Called with the DLM lock held. */
+void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
+ struct ldlm_lock *lock, __u32 stripe)
+{
+ ldlm_policy_data_t tmpex;
+ unsigned long start, end, count, skip, i, j;
+ struct page *page;
+ int rc, rc2, discard = lock->l_flags & LDLM_FL_DISCARD_DATA;
+ struct lustre_handle lockh;
+ ENTRY;
+
+ memcpy(&tmpex, &lock->l_policy_data, sizeof(tmpex));
+ CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
+ inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
+ inode->i_size);
+
+ /* our locks are page granular thanks to osc_enqueue, we invalidate the
+ * whole page. */
+ LASSERT((tmpex.l_extent.start & ~PAGE_CACHE_MASK) == 0);
+ LASSERT(((tmpex.l_extent.end + 1) & ~PAGE_CACHE_MASK) == 0);
+
+ count = ~0;
+ skip = 0;
+ start = tmpex.l_extent.start >> PAGE_CACHE_SHIFT;
+ end = tmpex.l_extent.end >> PAGE_CACHE_SHIFT;
+ if (lsm->lsm_stripe_count > 1) {
+ count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
+ skip = (lsm->lsm_stripe_count - 1) * count;
+ start += start/count * skip + stripe * count;
+ if (end != ~0)
+ end += end/count * skip + stripe * count;
+ }
+ if (end < tmpex.l_extent.end >> PAGE_CACHE_SHIFT)
end = ~0;
i = (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
- if (end >= i)
- clear_bit(LLI_F_HAVE_OST_SIZE_LOCK,
- &(ll_i2info(inode)->lli_flags));
if (i < end)
end = i;
- CDEBUG(D_INODE, "walking page indices start: %lu end: %lu\n", start,
- end);
+ CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
+ "count: %lu skip: %lu end: %lu%s\n", start, start % count,
+ count, skip, end, discard ? " (DISCARDING)" : "");
+
+ /* this is the simplistic implementation of page eviction at
+ * cancelation. It is careful to get races with other page
+ * lockers handled correctly. fixes from bug 20 will make it
+ * more efficient by associating locks with pages and with
+ * batching writeback under the lock explicitly. */
+ for (i = start, j = start % count; i <= end;
+ j++, i++, tmpex.l_extent.start += PAGE_CACHE_SIZE) {
+ if (j == count) {
+ CDEBUG(D_PAGE, "skip index %lu to %lu\n", i, i + skip);
+ i += skip;
+ j = 0;
+ if (i > end)
+ break;
+ }
+ LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
+ LPU64" >= "LPU64" start %lu i %lu end %lu\n",
+ tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
+ start, i, end);
- for (i = start; i < end; i++) {
- ll_pgcache_lock(inode->i_mapping);
- if (list_empty(&inode->i_mapping->dirty_pages) &&
- list_empty(&inode->i_mapping->clean_pages) &&
- list_empty(&inode->i_mapping->locked_pages)) {
- CDEBUG(D_INODE, "nothing left\n");
- ll_pgcache_unlock(inode->i_mapping);
+ if (!mapping_has_pages(inode->i_mapping)) {
+ CDEBUG(D_INODE|D_PAGE, "nothing left\n");
break;
}
- ll_pgcache_unlock(inode->i_mapping);
- conditional_schedule();
+ cond_resched();
page = find_get_page(inode->i_mapping, i);
if (page == NULL)
continue;
-
- LL_CDEBUG_PAGE(page, "locking\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "lock page idx %lu ext "LPU64"\n",
+ i, tmpex.l_extent.start);
lock_page(page);
/* page->mapping to check with racing against teardown */
- if (page->mapping && PageDirty(page) && !discard) {
- ClearPageDirty(page);
- LL_CDEBUG_PAGE(page, "found dirty\n");
- ll_pgcache_lock(inode->i_mapping);
- list_del(&page->list);
- list_add(&page->list, &inode->i_mapping->locked_pages);
- ll_pgcache_unlock(inode->i_mapping);
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- rc = inode->i_mapping->a_ops->writepage(page);
-#else
- rc = inode->i_mapping->a_ops->writepage(page, NULL);
-#endif
- if (rc != 0) {
+ if (!discard && clear_page_dirty_for_io(page)) {
+ rc = ll_call_writepage(inode, page);
+ if (rc != 0)
CERROR("writepage of page %p failed: %d\n",
page, rc);
- } else {
- lock_page(page); /* wait for io to complete */
- }
+ /* either waiting for io to complete or reacquiring
+ * the lock that the failed writepage released */
+ lock_page(page);
}
- /* checking again to account for writeback's lock_page() */
- if (page->mapping != NULL) {
- LL_CDEBUG_PAGE(page, "truncating\n");
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- truncate_complete_page(page);
-#else
- truncate_complete_page(page->mapping, page);
-#endif
+ tmpex.l_extent.end = tmpex.l_extent.start + PAGE_CACHE_SIZE - 1;
+ /* check to see if another DLM lock covers this page */
+ rc2 = ldlm_lock_match(lock->l_resource->lr_namespace,
+ LDLM_FL_BLOCK_GRANTED|LDLM_FL_CBPENDING |
+ LDLM_FL_TEST_LOCK,
+ &lock->l_resource->lr_name, LDLM_EXTENT,
+ &tmpex, LCK_PR | LCK_PW, &lockh);
+ if (rc2 == 0 && page->mapping != NULL) {
+ // checking again to account for writeback's lock_page()
+ LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
+ ll_truncate_complete_page(page);
}
unlock_page(page);
page_cache_release(page);
}
-
- if (test_bit(LLI_F_PREFER_EXTENDED_SIZE, &lli->lli_flags)) {
- rc = obd_lock_contains(exp, lsm, lock, inode->i_size - 1);
- if (rc != 0) {
- if (rc < 0)
- CERROR("obd_lock_contains: rc = %d\n", rc);
- clear_bit(LLI_F_PREFER_EXTENDED_SIZE, &lli->lli_flags);
- }
- }
-
+ LASSERTF(tmpex.l_extent.start <=
+ (lock->l_policy_data.l_extent.end == ~0ULL ? ~0ULL :
+ lock->l_policy_data.l_extent.end + 1),
+ "loop too long "LPU64" > "LPU64" start %lu i %lu end %lu\n",
+ tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
+ start, i, end);
EXIT;
}
int rc;
ENTRY;
-
if ((unsigned long)data > 0 && (unsigned long)data < 0x1000) {
LDLM_ERROR(lock, "cancelling lock with bad data %p", data);
LBUG();
CERROR("ldlm_cli_cancel failed: %d\n", rc);
break;
case LDLM_CB_CANCELING: {
- struct inode *inode = ll_inode_from_lock(lock);
+ struct inode *inode;
struct ll_inode_info *lli;
+ struct lov_stripe_md *lsm;
+ __u32 stripe;
+ __u64 kms;
- if (!inode)
- RETURN(0);
- lli= ll_i2info(inode);
- if (!lli)
- RETURN(0);
- if (!lli->lli_smd)
+ /* This lock wasn't granted, don't try to evict pages */
+ if (lock->l_req_mode != lock->l_granted_mode)
RETURN(0);
- ll_pgcache_remove_extent(inode, lli->lli_smd, lock);
+ inode = ll_inode_from_lock(lock);
+ if (inode == NULL)
+ RETURN(0);
+ lli = ll_i2info(inode);
+ if (lli == NULL)
+ goto iput;
+ if (lli->lli_smd == NULL)
+ goto iput;
+ lsm = lli->lli_smd;
+
+ stripe = ll_lock_to_stripe_offset(inode, lock);
+ if (stripe < 0)
+ goto iput;
+ ll_pgcache_remove_extent(inode, lsm, lock, stripe);
+
+ down(&inode->i_sem);
+ kms = ldlm_extent_shift_kms(lock,
+ lsm->lsm_oinfo[stripe].loi_kms);
+
+ if (lsm->lsm_oinfo[stripe].loi_kms != kms)
+ LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
+ lsm->lsm_oinfo[stripe].loi_kms, kms);
+ lsm->lsm_oinfo[stripe].loi_kms = kms;
+ up(&inode->i_sem);
//ll_try_done_writing(inode);
+ iput:
iput(inode);
break;
}
RETURN(0);
}
-/*
- * some callers, notably truncate, really don't want i_size set based
- * on the the size returned by the getattr, or lock acquisition in
- * the future.
- */
-int ll_extent_lock_no_validate(struct ll_file_data *fd, struct inode *inode,
- struct lov_stripe_md *lsm,
- int mode, struct ldlm_extent *extent,
- struct lustre_handle *lockh, int ast_flags)
+#if 0
+int ll_async_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+{
+ /* XXX ALLOCATE - 160 bytes */
+ struct inode *inode = ll_inode_from_lock(lock);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lustre_handle lockh = { 0 };
+ struct ost_lvb *lvb;
+ __u32 stripe;
+ ENTRY;
+
+ if (flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV)) {
+ LBUG(); /* not expecting any blocked async locks yet */
+ LDLM_DEBUG(lock, "client-side async enqueue returned a blocked "
+ "lock, returning");
+ ldlm_lock_dump(D_OTHER, lock, 0);
+ ldlm_reprocess_all(lock->l_resource);
+ RETURN(0);
+ }
+
+ LDLM_DEBUG(lock, "client-side async enqueue: granted/glimpsed");
+
+ stripe = ll_lock_to_stripe_offset(inode, lock);
+ if (stripe < 0)
+ goto iput;
+
+ if (lock->l_lvb_len) {
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ __u64 kms;
+ lvb = lock->l_lvb_data;
+ lsm->lsm_oinfo[stripe].loi_rss = lvb->lvb_size;
+
+ down(&inode->i_sem);
+ kms = MAX(lsm->lsm_oinfo[stripe].loi_kms, lvb->lvb_size);
+ kms = ldlm_extent_shift_kms(NULL, kms);
+ if (lsm->lsm_oinfo[stripe].loi_kms != kms)
+ LDLM_DEBUG(lock, "updating kms from "LPU64" to "LPU64,
+ lsm->lsm_oinfo[stripe].loi_kms, kms);
+ lsm->lsm_oinfo[stripe].loi_kms = kms;
+ up(&inode->i_sem);
+ }
+
+iput:
+ iput(inode);
+ wake_up(&lock->l_waitq);
+
+ ldlm_lock2handle(lock, &lockh);
+ ldlm_lock_decref(&lockh, LCK_PR);
+ RETURN(0);
+}
+#endif
+
+static int ll_glimpse_callback(struct ldlm_lock *lock, void *reqp)
{
+ struct ptlrpc_request *req = reqp;
+ struct inode *inode = ll_inode_from_lock(lock);
+ struct ll_inode_info *lli;
+ struct ost_lvb *lvb;
+ int rc, size = sizeof(*lvb), stripe;
+ ENTRY;
+
+ if (inode == NULL)
+ GOTO(out, rc = -ELDLM_NO_LOCK_DATA);
+ lli = ll_i2info(inode);
+ if (lli == NULL)
+ GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+ if (lli->lli_smd == NULL)
+ GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+
+ /* First, find out which stripe index this lock corresponds to. */
+ stripe = ll_lock_to_stripe_offset(inode, lock);
+ if (stripe < 0)
+ GOTO(iput, rc = -ELDLM_NO_LOCK_DATA);
+
+ rc = lustre_pack_reply(req, 1, &size, NULL);
+ if (rc) {
+ CERROR("lustre_pack_reply: %d\n", rc);
+ GOTO(iput, rc);
+ }
+
+ lvb = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*lvb));
+ lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe].loi_kms;
+
+ LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64,
+ inode->i_size, stripe, lvb->lvb_size);
+ GOTO(iput, 0);
+ iput:
+ iput(inode);
+
+ out:
+ /* These errors are normal races, so we don't want to fill the console
+ * with messages by calling ptlrpc_error() */
+ if (rc == -ELDLM_NO_LOCK_DATA)
+ lustre_pack_reply(req, 0, NULL, NULL);
+
+ req->rq_status = rc;
+ return rc;
+}
+
+__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
+__u64 lov_merge_blocks(struct lov_stripe_md *lsm);
+__u64 lov_merge_mtime(struct lov_stripe_md *lsm, __u64 current_time);
+
+/* NB: lov_merge_size will prefer locally cached writes if they extend the
+ * file (because it prefers KMS over RSS when larger) */
+int ll_glimpse_size(struct inode *inode, struct ost_lvb *lvb)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- int rc;
+ ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
+ struct lustre_handle lockh = { 0 };
+ int rc, flags = LDLM_FL_HAS_INTENT;
ENTRY;
- LASSERT(lockh->cookie == 0);
+ CDEBUG(D_DLMTRACE, "Glimpsing inode %lu\n", inode->i_ino);
- /* XXX phil: can we do this? won't it screw the file size up? */
- if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
- (sbi->ll_flags & LL_SBI_NOLCK))
- RETURN(0);
+ rc = obd_enqueue(sbi->ll_osc_exp, lli->lli_smd, LDLM_EXTENT, &policy,
+ LCK_PR, &flags, ll_extent_lock_callback,
+ ldlm_completion_ast, ll_glimpse_callback, inode,
+ sizeof(*lvb), lustre_swab_ost_lvb, &lockh);
+ if (rc != 0) {
+ CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
+ RETURN(rc > 0 ? -EIO : rc);
+ }
- CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
- inode->i_ino, extent->start, extent->end);
+ lvb->lvb_size = lov_merge_size(lli->lli_smd, 0);
+ inode->i_blocks = lov_merge_blocks(lli->lli_smd);
+ //inode->i_mtime = lov_merge_mtime(lli->lli_smd, inode->i_mtime);
+
+ CDEBUG(D_DLMTRACE, "glimpse: size: "LPU64", blocks: "LPU64"\n",
+ lvb->lvb_size, lvb->lvb_blocks);
+
+ obd_cancel(sbi->ll_osc_exp, lli->lli_smd, LCK_PR, &lockh);
- rc = obd_enqueue(sbi->ll_osc_exp, lsm, NULL, LDLM_EXTENT, extent,
- sizeof(extent), mode, &ast_flags,
- ll_extent_lock_callback, inode, lockh);
- if (rc > 0)
- rc = -EIO;
RETURN(rc);
}
-/*
- * this grabs a lock and manually implements behaviour that makes it look like
- * the OST is returning the file size with each lock acquisition.
- */
int ll_extent_lock(struct ll_file_data *fd, struct inode *inode,
struct lov_stripe_md *lsm, int mode,
- struct ldlm_extent *extent, struct lustre_handle *lockh)
+ ldlm_policy_data_t *policy, struct lustre_handle *lockh,
+ int ast_flags)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_export *exp = ll_i2obdexp(inode);
- struct ldlm_extent size_lock;
- struct lustre_handle match_lockh = {0};
- struct obdo oa;
- obd_flag refresh_valid;
- int flags, rc, matched;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int rc;
ENTRY;
- rc = ll_extent_lock_no_validate(fd, inode, lsm, mode, extent, lockh, 0);
- if (rc != ELDLM_OK)
- RETURN(rc);
+ LASSERT(lockh->cookie == 0);
- if (test_bit(LLI_F_HAVE_OST_SIZE_LOCK, &lli->lli_flags))
+ /* XXX phil: can we do this? won't it screw the file size up? */
+ if ((fd && (fd->fd_flags & LL_FILE_IGNORE_LOCK)) ||
+ (sbi->ll_flags & LL_SBI_NOLCK))
RETURN(0);
- rc = ll_lsm_getattr(exp, lsm, &oa);
- if (rc) {
- ll_extent_unlock(fd, inode, lsm, mode, lockh);
- RETURN(rc);
- }
+ CDEBUG(D_DLMTRACE, "Locking inode %lu, start "LPU64" end "LPU64"\n",
+ inode->i_ino, policy->l_extent.start, policy->l_extent.end);
- /* We set this flag in commit write as we extend the file size. When
- * the bit is set and the lock is canceled that covers the file size,
- * we clear the bit. This is enough to protect the window where our
- * local size extension is needed for writeback. However, it relies on
- * behaviour that won't be true in the near future. This assumes that
- * all getattr callers get extent locks, which they currnetly do. It
- * also assumes that we only send discarding asts for {0,eof} truncates
- * as is currently the case. This will have to be replaced by the
- * proper eoc communication between clients and the ost, which is on
- * its way. */
- refresh_valid = (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ | OBD_MD_FLMTIME |
- OBD_MD_FLCTIME | OBD_MD_FLSIZE);
- if (test_bit(LLI_F_PREFER_EXTENDED_SIZE, &lli->lli_flags)) {
- if (oa.o_size < inode->i_size)
- refresh_valid &= ~OBD_MD_FLSIZE;
- else
- clear_bit(LLI_F_PREFER_EXTENDED_SIZE, &lli->lli_flags);
- }
- obdo_refresh_inode(inode, &oa, refresh_valid);
-
- CDEBUG(D_INODE, "objid "LPX64" size %Lu, blocks %lu, blksize %lu\n",
- lsm->lsm_object_id, inode->i_size, inode->i_blocks,
- inode->i_blksize);
-
- size_lock.start = inode->i_size;
- size_lock.end = OBD_OBJECT_EOF;
-
- /* XXX I bet we should be checking the lock ignore flags.. */
- flags = LDLM_FL_CBPENDING | LDLM_FL_BLOCK_GRANTED;
- matched = obd_match(exp, lsm, LDLM_EXTENT, &size_lock,
- sizeof(size_lock), LCK_PR, &flags, inode,
- &match_lockh);
- if (matched < 0)
- RETURN(matched);
-
- /* hey, alright, we hold a size lock that covers the size we
- * just found, its not going to change for a while.. */
- if (matched == 1) {
- set_bit(LLI_F_HAVE_OST_SIZE_LOCK, &lli->lli_flags);
- obd_cancel(exp, lsm, LCK_PR, &match_lockh);
- }
+ rc = obd_enqueue(sbi->ll_osc_exp, lsm, LDLM_EXTENT, policy, mode,
+ &ast_flags, ll_extent_lock_callback,
+ ldlm_completion_ast, ll_glimpse_callback, inode,
+ sizeof(struct ost_lvb), lustre_swab_ost_lvb, lockh);
+ if (rc > 0)
+ rc = -EIO;
- RETURN(0);
+ if (policy->l_extent.start == 0 &&
+ policy->l_extent.end == OBD_OBJECT_EOF)
+ inode->i_size = lov_merge_size(lsm, 1);
+
+ //inode->i_mtime = lov_merge_mtime(lsm, inode->i_mtime);
+
+ RETURN(rc);
}
int ll_extent_unlock(struct ll_file_data *fd, struct inode *inode,
struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm = lli->lli_smd;
struct lustre_handle lockh = { 0 };
- struct ldlm_extent extent;
- ldlm_error_t err;
+ ldlm_policy_data_t policy;
+ int rc;
ssize_t retval;
+ __u64 kms;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
inode->i_ino, inode->i_generation, inode, count, *ppos);
if (!lsm)
RETURN(0);
- /* grab a -> eof extent to push extending writes out of node's caches
- * so we can see them at the getattr after lock acquisition. this will
- * turn into a seperate [*ppos + count, EOF] 'size intent' lock attempt
- * in the future. */
- extent.start = *ppos;
- extent.end = OBD_OBJECT_EOF;
+ policy.l_extent.start = *ppos;
+ policy.l_extent.end = *ppos + count - 1;
- err = ll_extent_lock(fd, inode, lsm, LCK_PR, &extent, &lockh);
- if (err != ELDLM_OK)
- RETURN(err);
+ rc = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
+ (filp->f_flags & O_NONBLOCK) ?
+ LDLM_FL_BLOCK_NOWAIT: 0);
+ if (rc != 0)
+ RETURN(rc);
- CDEBUG(D_INFO, "Reading inode %lu, "LPSZ" bytes, offset %Ld\n",
- inode->i_ino, count, *ppos);
+ kms = lov_merge_size(lsm, 1);
+ if (*ppos + count - 1 > kms) {
+ /* A glimpse is necessary to determine whether we return a short
+ * read or some zeroes at the end of the buffer */
+ struct ost_lvb lvb;
+ retval = ll_glimpse_size(inode, &lvb);
+ if (retval)
+ goto out;
+ inode->i_size = lvb.lvb_size;
+ } else {
+ inode->i_size = kms;
+ }
+
+ CDEBUG(D_INFO, "Read ino %lu, "LPSZ" bytes, offset %lld, i_size %llu\n",
+ inode->i_ino, count, *ppos, inode->i_size);
+
+ /* turn off the kernel's read-ahead */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- filp->f_ramax = 0; /* turn off generic_file_readahead() */
+ filp->f_ramax = 0;
#else
filp->f_ra.ra_pages = 0;
#endif
retval = generic_file_read(filp, buf, count, ppos);
- /* XXX errors? */
+ out:
ll_extent_unlock(fd, inode, lsm, LCK_PR, &lockh);
RETURN(retval);
}
struct inode *inode = file->f_dentry->d_inode;
struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
struct lustre_handle lockh = { 0 };
- struct ldlm_extent extent;
+ ldlm_policy_data_t policy;
loff_t maxbytes = ll_file_maxbytes(inode);
- ldlm_error_t err;
ssize_t retval;
- char should_validate = 1;
+ int nonblock = 0, rc;
ENTRY;
+ if (file->f_flags & O_NONBLOCK)
+ nonblock = LDLM_FL_BLOCK_NOWAIT;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),size="LPSZ",offset=%Ld\n",
inode->i_ino, inode->i_generation, inode, count, *ppos);
if (count == 0)
RETURN(0);
+ /* If file was opened for LL_IOC_LOV_SETSTRIPE but the ioctl wasn't
+ * called on the file, don't fail the below assertion (bug 2388). */
+ if (file->f_flags & O_LOV_DELAY_CREATE && lsm == NULL)
+ RETURN(-EBADF);
+
LASSERT(lsm);
if (file->f_flags & O_APPEND) {
- extent.start = 0;
- extent.end = OBD_OBJECT_EOF;
+ policy.l_extent.start = 0;
+ policy.l_extent.end = OBD_OBJECT_EOF;
} else {
- extent.start = *ppos;
- extent.end = *ppos + count - 1;
- /* we really don't care what i_size is if we're doing
- * fully page aligned writes */
- if ((*ppos & ~PAGE_CACHE_MASK) == 0 &&
- (count & ~PAGE_CACHE_MASK) == 0)
- should_validate = 0;
+ policy.l_extent.start = *ppos;
+ policy.l_extent.end = *ppos + count - 1;
}
- if (should_validate)
- err = ll_extent_lock(fd, inode, lsm, LCK_PW, &extent, &lockh);
- else
- err = ll_extent_lock_no_validate(fd, inode, lsm, LCK_PW,
- &extent, &lockh, 0);
- if (err != ELDLM_OK)
- RETURN(err);
+ rc = ll_extent_lock(fd, inode, lsm, LCK_PW, &policy, &lockh, nonblock);
+ if (rc != 0)
+ RETURN(rc);
/* this is ok, g_f_w will overwrite this under i_sem if it races
* with a local truncate, it just makes our maxbyte checking easier */
retval = generic_file_write(file, buf, count, ppos);
out:
- /* XXX errors? */
- lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_WRITE_BYTES,
- retval);
ll_extent_unlock(fd, inode, lsm, LCK_PW, &lockh);
+ lprocfs_counter_add(ll_i2sbi(inode)->ll_stats, LPROC_LL_WRITE_BYTES,
+ retval > 0 ? retval : 0);
RETURN(retval);
}
-static int ll_lov_setstripe(struct inode *inode, struct file *file,
- unsigned long arg)
+static int ll_lov_recreate_obj(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_export *exp = ll_i2obdexp(inode);
+ struct ll_recreate_obj ucreatp;
+ struct obd_trans_info oti = { 0 };
+ struct obdo *oa = NULL;
+ int lsm_size;
+ int rc = 0;
+ struct lov_stripe_md *lsm, *lsm2;
+ ENTRY;
+
+ if (!capable (CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+
+ rc = copy_from_user(&ucreatp, (struct ll_recreate_obj *)arg,
+ sizeof(struct ll_recreate_obj));
+ if (rc) {
+ RETURN(-EFAULT);
+ }
+ oa = obdo_alloc();
+ if (oa == NULL) {
+ RETURN(-ENOMEM);
+ }
+
+ down(&lli->lli_open_sem);
+ lsm = lli->lli_smd;
+ if (lsm == NULL) {
+ up(&lli->lli_open_sem);
+ obdo_free(oa);
+ RETURN (-ENOENT);
+ }
+ lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
+ (lsm->lsm_stripe_count));
+
+ OBD_ALLOC(lsm2, lsm_size);
+ if (lsm2 == NULL) {
+ up(&lli->lli_open_sem);
+ obdo_free(oa);
+ RETURN(-ENOMEM);
+ }
+
+ oa->o_id = ucreatp.lrc_id;
+ oa->o_nlink = ucreatp.lrc_ost_idx;
+ oa->o_gr = ucreatp.lrc_group;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
+ oa->o_flags |= OBD_FL_RECREATE_OBJS;
+ obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+
+ oti.oti_objid = NULL;
+ memcpy(lsm2, lsm, lsm_size);
+ rc = obd_create(exp, oa, &lsm2, &oti);
+
+ up(&lli->lli_open_sem);
+ OBD_FREE(lsm2, lsm_size);
+ obdo_free(oa);
+ RETURN (rc);
+}
+
+static int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
+ int flags, struct lov_user_md *lum,
+ int lum_size)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct file *f;
struct obd_export *exp = ll_i2obdexp(inode);
struct lov_stripe_md *lsm;
- struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = FMODE_WRITE};
- struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
+ struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
struct ptlrpc_request *req = NULL;
+ int rc = 0;
struct lustre_md md;
- int rc;
ENTRY;
- /* Bug 1152: copy properly when this is no longer true */
- LASSERT(sizeof(lum) == sizeof(*lump));
- LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
- rc = copy_from_user(&lum, lump, sizeof(lum));
- if (rc)
- RETURN(-EFAULT);
-
down(&lli->lli_open_sem);
lsm = lli->lli_smd;
if (lsm) {
f->f_dentry = file->f_dentry;
f->f_vfsmnt = file->f_vfsmnt;
- rc = ll_intent_file_open(f, &lum, sizeof(lum), &oit);
+ rc = ll_intent_file_open(f, lum, lum_size, &oit);
if (rc)
GOTO(out, rc);
if (it_disposition(&oit, DISP_LOOKUP_NEG))
if (rc < 0)
GOTO(out, rc);
- rc = mdc_req2lustre_md(req, 1, exp, &md);
+ rc = mdc_req2lustre_md(ll_i2mdcexp(inode), req, 1, exp, &md);
if (rc)
GOTO(out, rc);
- ll_update_inode(f->f_dentry->d_inode, md.body, md.lsm);
+ ll_update_inode(f->f_dentry->d_inode, &md);
rc = ll_local_open(f, &oit);
if (rc)
RETURN(rc);
}
+static int ll_lov_setea(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
+ struct lov_user_md *lump;
+ int lum_size = sizeof(struct lov_user_md) +
+ sizeof(struct lov_user_ost_data);
+ int rc;
+ ENTRY;
+
+ if (!capable (CAP_SYS_ADMIN))
+ RETURN(-EPERM);
+
+ OBD_ALLOC(lump, lum_size);
+ if (lump == NULL) {
+ RETURN(-ENOMEM);
+ }
+ rc = copy_from_user(lump, (struct lov_user_md *)arg, lum_size);
+ if (rc) {
+ OBD_FREE(lump, lum_size);
+ RETURN(-EFAULT);
+ }
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
+
+ OBD_FREE(lump, lum_size);
+ RETURN(rc);
+}
+
+static int ll_lov_setstripe(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct lov_user_md lum, *lump = (struct lov_user_md *)arg;
+ int rc;
+ int flags = FMODE_WRITE;
+ ENTRY;
+
+ /* Bug 1152: copy properly when this is no longer true */
+ LASSERT(sizeof(lum) == sizeof(*lump));
+ LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
+ rc = copy_from_user(&lum, lump, sizeof(lum));
+ if (rc)
+ RETURN(-EFAULT);
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, &lum, sizeof(lum));
+ RETURN(rc);
+}
+
static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
{
struct lov_stripe_md *lsm = ll_i2info(inode)->lli_smd;
(void *)arg);
}
+static int ll_get_grouplock(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct ll_file_data *fd = file->private_data;
+ ldlm_policy_data_t policy = { .l_extent = { .start = 0,
+ .end = OBD_OBJECT_EOF}};
+ struct lustre_handle lockh = { 0 };
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ int flags = 0, rc;
+ ENTRY;
+
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ RETURN(-EINVAL);
+ }
+
+ policy.l_extent.gid = arg;
+ if (file->f_flags & O_NONBLOCK)
+ flags = LDLM_FL_BLOCK_NOWAIT;
+
+ rc = ll_extent_lock(fd, inode, lsm, LCK_GROUP, &policy, &lockh, flags);
+ if (rc != 0)
+ RETURN(rc);
+
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK;
+ fd->fd_gid = arg;
+ memcpy(&fd->fd_cwlockh, &lockh, sizeof(lockh));
+
+ RETURN(0);
+}
+
+static int ll_put_grouplock(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct ll_file_data *fd = file->private_data;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lov_stripe_md *lsm = lli->lli_smd;
+ int rc;
+ ENTRY;
+
+ if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ /* Ugh, it's already unlocked. */
+ RETURN(-EINVAL);
+ }
+
+ if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
+ RETURN(-EINVAL);
+
+ fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
+
+ rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
+ if (rc)
+ RETURN(rc);
+
+ fd->fd_gid = 0;
+ memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
+
+ RETURN(0);
+}
+
int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
RETURN(0);
case LL_IOC_LOV_SETSTRIPE:
RETURN(ll_lov_setstripe(inode, file, arg));
+ case LL_IOC_LOV_SETEA:
+ RETURN(ll_lov_setea(inode, file, arg));
case LL_IOC_LOV_GETSTRIPE:
RETURN(ll_lov_getstripe(inode, arg));
+ case LL_IOC_RECREATE_OBJ:
+ RETURN(ll_lov_recreate_obj(inode, file, arg));
case EXT3_IOC_GETFLAGS:
case EXT3_IOC_SETFLAGS:
RETURN( ll_iocontrol(inode, file, cmd, arg) );
+ case LL_IOC_GROUP_LOCK:
+ RETURN(ll_get_grouplock(inode, file, arg));
+ case LL_IOC_GROUP_UNLOCK:
+ RETURN(ll_put_grouplock(inode, file, arg));
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
* network encode the arg field.
lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_LLSEEK);
if (origin == 2) { /* SEEK_END */
- ldlm_error_t err;
- struct ldlm_extent extent = {0, OBD_OBJECT_EOF};
- err = ll_extent_lock(fd, inode, lsm, LCK_PR, &extent, &lockh);
- if (err != ELDLM_OK)
- RETURN(err);
+ int nonblock = 0, rc;
+ ldlm_policy_data_t policy = { .l_extent = {0, OBD_OBJECT_EOF }};
+
+ if (file->f_flags & O_NONBLOCK)
+ nonblock = LDLM_FL_BLOCK_NOWAIT;
+
+ rc = ll_extent_lock(fd, inode, lsm, LCK_PR, &policy, &lockh,
+ nonblock);
+ if (rc != 0)
+ RETURN(rc);
offset += inode->i_size;
} else if (origin == 1) { /* SEEK_CUR */
rc = filemap_fdatawait(inode->i_mapping);
ll_inode2fid(&fid, inode);
- err = mdc_sync(ll_i2sbi(inode)->ll_mdc_exp, &fid, &req);
+ err = md_sync(ll_i2sbi(inode)->ll_mdc_exp, &fid, &req);
if (!rc)
rc = err;
if (!err)
RETURN(rc ? rc : -ENOMEM);
oa->o_id = lsm->lsm_object_id;
+ oa->o_gr = lsm->lsm_object_gr;
oa->o_valid = OBD_MD_FLID;
obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
- OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLGROUP);
err = obd_sync(ll_i2sbi(inode)->ll_osc_exp, oa, lsm,
0, OBD_OBJECT_EOF);
struct ldlm_res_id res_id =
{ .name = {inode->i_ino, inode->i_generation, LDLM_FLOCK} };
struct lustre_handle lockh = {0};
- struct ldlm_flock flock;
+ ldlm_policy_data_t flock;
ldlm_mode_t mode = 0;
int flags = 0;
int rc;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
inode->i_ino, file_lock);
- flock.pid = file_lock->fl_pid;
- flock.start = file_lock->fl_start;
- flock.end = file_lock->fl_end;
+ flock.l_flock.pid = file_lock->fl_pid;
+ flock.l_flock.start = file_lock->fl_start;
+ flock.l_flock.end = file_lock->fl_end;
switch (file_lock->fl_type) {
case F_RDLCK:
switch (cmd) {
case F_SETLKW:
+#ifdef F_SETLKW64
+ case F_SETLKW64:
+#endif
flags = 0;
break;
case F_SETLK:
+#ifdef F_SETLK64
+ case F_SETLK64:
+#endif
flags = LDLM_FL_BLOCK_NOWAIT;
break;
case F_GETLK:
+#ifdef F_GETLK64
+ case F_GETLK64:
+#endif
flags = LDLM_FL_TEST_LOCK;
/* Save the old mode so that if the mode in the lock changes we
* can decrement the appropriate reader or writer refcount. */
LBUG();
}
- CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#x, mode=%u, "
- "start="LPU64", end="LPU64"\n", inode->i_ino, flock.pid,
- flags, mode, flock.start, flock.end);
+ CDEBUG(D_DLMTRACE, "inode=%lu, pid="LPU64", flags=%#x, mode=%u, "
+ "start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
+ flags, mode, flock.l_flock.start, flock.l_flock.end);
- obddev = sbi->ll_mdc_exp->exp_obd;
- rc = ldlm_cli_enqueue(sbi->ll_mdc_exp, NULL, obddev->obd_namespace,
- NULL, res_id, LDLM_FLOCK, &flock, sizeof(flock),
- mode, &flags, ldlm_flock_completion_ast, NULL,
- file_lock, &lockh);
+ obddev = md_get_real_obd(sbi->ll_mdc_exp, NULL, 0);
+ rc = ldlm_cli_enqueue(obddev->obd_self_export, NULL,
+ obddev->obd_namespace,
+ res_id, LDLM_FLOCK, &flock, mode, &flags,
+ NULL, ldlm_flock_completion_ast, NULL, file_lock,
+ NULL, 0, NULL, &lockh);
RETURN(rc);
}
-static int ll_have_md_lock(struct dentry *de)
-{
- struct ll_sb_info *sbi = ll_s2sbi(de->d_sb);
- struct lustre_handle lockh;
- struct ldlm_res_id res_id = { .name = {0} };
- struct obd_device *obddev;
- int flags;
- ENTRY;
-
- if (!de->d_inode)
- RETURN(0);
-
- obddev = sbi->ll_mdc_exp->exp_obd;
- res_id.name[0] = de->d_inode->i_ino;
- res_id.name[1] = de->d_inode->i_generation;
-
- CDEBUG(D_INFO, "trying to match res "LPU64"\n", res_id.name[0]);
-
- flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
- if (ldlm_lock_match(obddev->obd_namespace, flags, &res_id, LDLM_PLAIN,
- NULL, 0, LCK_PR, &lockh)) {
- ldlm_lock_decref(&lockh, LCK_PR);
- RETURN(1);
- }
-
- if (ldlm_lock_match(obddev->obd_namespace, flags, &res_id, LDLM_PLAIN,
- NULL, 0, LCK_PW, &lockh)) {
- ldlm_lock_decref(&lockh, LCK_PW);
- RETURN(1);
- }
- RETURN(0);
-}
-
int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it)
{
struct inode *inode = dentry->d_inode;
+ struct ll_inode_info *lli;
struct lov_stripe_md *lsm;
+ struct ll_fid fid;
+ int rc;
ENTRY;
if (!inode) {
CERROR("REPORT THIS LINE TO PETER\n");
RETURN(0);
}
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n",
- inode->i_ino, inode->i_generation, inode, dentry->d_name.name);
+ ll_inode2fid(&fid, inode);
+ lli = ll_i2info(inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s,intent=%s\n",
+ inode->i_ino, inode->i_generation, inode, dentry->d_name.name,
+ LL_IT2STR(it));
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0))
lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats, LPROC_LL_REVALIDATE);
#endif
- if (!ll_have_md_lock(dentry)) {
+ if (!md_valid_attrs(ll_i2mdcexp(inode), &fid)) {
struct ptlrpc_request *req = NULL;
struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
struct ll_fid fid;
unsigned long valid = 0;
- int rc, ealen = 0;
+ int ealen = 0;
if (S_ISREG(inode->i_mode)) {
ealen = obd_size_diskmd(sbi->ll_osc_exp, NULL);
valid |= OBD_MD_FLEASIZE;
}
ll_inode2fid(&fid, inode);
- rc = mdc_getattr(sbi->ll_mdc_exp, &fid, valid, ealen, &req);
+ rc = md_getattr(sbi->ll_mdc_exp, &fid, valid, ealen, &req);
if (rc) {
CERROR("failure %d inode %lu\n", rc, inode->i_ino);
RETURN(-abs(rc));
}
- rc = ll_prep_inode(sbi->ll_osc_exp, &inode, req, 0, NULL);
+ rc = ll_prep_inode(sbi->ll_osc_exp, sbi->ll_mdc_exp,
+ &inode, req, 0, NULL);
if (rc) {
ptlrpc_req_finished(req);
RETURN(rc);
ptlrpc_req_finished(req);
}
-#if 0
- if (ll_have_md_lock(dentry) &&
- test_bit(LLI_F_HAVE_MDS_SIZE_LOCK, &ll_i2info(inode)->lli_flags))
- RETURN(0);
-#endif
-
- lsm = ll_i2info(inode)->lli_smd;
- if (!lsm) /* object not yet allocated, don't validate size */
+ lsm = lli->lli_smd;
+ if (lsm == NULL) /* object not yet allocated, don't validate size */
RETURN(0);
- /* unfortunately stat comes in through revalidate and we don't
- * differentiate this use from initial instantiation. we're
- * also being wildly conservative and flushing write caches
- * so that stat really returns the proper size. */
+ /* ll_glimpse_size will prefer locally cached writes if they extend
+ * the file */
{
- struct ldlm_extent extent = {0, OBD_OBJECT_EOF};
- struct lustre_handle lockh = {0};
- ldlm_error_t err;
-
- err = ll_extent_lock(NULL, inode, lsm, LCK_PR, &extent, &lockh);
- if (err != ELDLM_OK)
- RETURN(err);
+ struct ost_lvb lvb;
- ll_extent_unlock(NULL, inode, lsm, LCK_PR, &lockh);
+ rc = ll_glimpse_size(inode, &lvb);
+ inode->i_size = lvb.lvb_size;
}
- RETURN(0);
+ RETURN(rc);
}
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
}
#endif
-
struct file_operations ll_file_operations = {
- read: ll_file_read,
- write: ll_file_write,
- ioctl: ll_file_ioctl,
- open: ll_file_open,
- release: ll_file_release,
- mmap: generic_file_mmap,
- llseek: ll_file_seek,
- fsync: ll_fsync,
- //lock: ll_file_flock
+ .read = ll_file_read,
+ .write = ll_file_write,
+ .ioctl = ll_file_ioctl,
+ .open = ll_file_open,
+ .release = ll_file_release,
+ .mmap = generic_file_mmap,
+ .llseek = ll_file_seek,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+ .sendfile = generic_file_sendfile,
+#endif
+ .fsync = ll_fsync,
+ .lock = ll_file_flock
};
struct inode_operations ll_file_inode_operations = {
- setattr_raw: ll_setattr_raw,
- setattr: ll_setattr,
- truncate: ll_truncate,
+ .setattr_raw = ll_setattr_raw,
+ .setattr = ll_setattr,
+ .truncate = ll_truncate,
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
- getattr_it: ll_getattr,
+ .getattr_it = ll_getattr,
#else
- revalidate_it: ll_inode_revalidate_it,
+ .revalidate_it = ll_inode_revalidate_it,
#endif
};