SUBDIRS += $(LIBLUSTRE_SUBDIRS)
endif
-DIST_SUBDIRS := $(ALWAYS_SUBDIRS) $(SERVER_SUBDIRS) $(CLIENT_SUBDIRS) $(LIBLUSTRE_SUBDIRS)
+DIST_SUBDIRS := $(ALWAYS_SUBDIRS) $(SERVER_SUBDIRS) $(CLIENT_SUBDIRS) \
+ $(LIBLUSTRE_SUBDIRS)
EXTRA_DIST = BUGS FDL kernel_patches
# include/lustre/lustre_user.h
# See note there re: __ASM_X86_64_PROCESSOR_H
+AC_CHECK_HEADERS([linux/quota.h])
AC_CHECK_TYPES([struct if_dqinfo],[],[],[
#define __ASM_X86_64_PROCESSOR_H
# See the file COPYING in this distribution
SUBDIRS = linux lustre
-EXTRA_DIST = ioctl.h liblustre.h
+EXTRA_DIST = ioctl.h liblustre.h types.h
#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_
+#ifdef HAVE_ASM_TYPES_H
+#include <asm/types.h>
+#else
+#include "types.h"
+#endif
+
#ifdef __KERNEL__
-# include <asm/types.h>
# include <linux/types.h>
-# include <linux/fs.h> /* to check for FMODE_EXEC, lest we redefine */
+# include <linux/fs.h> /* to check for FMODE_EXEC, dev_t, lest we redefine */
#else
#ifdef __CYGWIN__
# include <sys/types.h>
#else
-# include <asm/types.h>
# include <stdint.h>
#endif
#endif
#endif
#endif
-#ifndef LPU64
-/* x86_64 defines __u64 as "long" in userspace, but "long long" in the kernel */
-#if defined(__x86_64__) && defined(__KERNEL__)
-# define LPU64 "%Lu"
-# define LPD64 "%Ld"
-# define LPX64 "%#Lx"
-# define LPSZ "%lu"
-# define LPSSZ "%ld"
-#elif (BITS_PER_LONG == 32 || __WORDSIZE == 32)
-# define LPU64 "%Lu"
-# define LPD64 "%Ld"
-# define LPX64 "%#Lx"
-# define LPSZ "%u"
-# define LPSSZ "%d"
-#elif (BITS_PER_LONG == 64 || __WORDSIZE == 64)
-# define LPU64 "%lu"
-# define LPD64 "%ld"
-# define LPX64 "%#lx"
-# define LPSZ "%lu"
-# define LPSSZ "%ld"
-#endif
-#ifndef LPU64
-# error "No word size defined"
-#endif
-#endif
-
/* target.c */
struct ptlrpc_request;
struct recovd_data;
#define LLAP_FROM_COOKIE(c) \
(LASSERT(((struct ll_async_page *)(c))->llap_magic == LLAP_MAGIC), \
(struct ll_async_page *)(c))
+
#define LL_MAX_BLKSIZE (4UL * 1024 * 1024)
#include <lustre/lustre_user.h>
#define ur_cap ur_uc.luc_cap
#define ur_suppgid1 ur_uc.luc_suppgid1
#define ur_suppgid2 ur_uc.luc_suppgid2
-#define ur_umask ur_uc.luc_umask
#define MDS_LR_SERVER_SIZE 512
struct list_head srv_history_rqbds; /* request buffer history */
int srv_n_history_rqbds; /* # request buffers in history */
int srv_max_history_rqbds; /* max # request buffers in history */
-
+
atomic_t srv_outstanding_replies;
struct list_head srv_reply_queue; /* replies waiting for service */
#if defined __KERNEL__
#include <linux/lvfs_linux.h>
-#endif
+#endif
#ifdef LIBLUSTRE
#include <lvfs_user_fs.h>
int obd_minor;
unsigned int obd_attached:1, obd_set_up:1, obd_recovering:1,
obd_abort_recovery:1, obd_replayable:1, obd_no_transno:1,
- obd_no_recov:1, obd_stopping:1, obd_starting:1,
+ obd_no_recov:1, obd_stopping:1, obd_starting:1,
obd_force:1, obd_fail:1;
atomic_t obd_refcount;
wait_queue_head_t obd_refcount_waitq;
#ifndef _LUSTRE_USER_H
#define _LUSTRE_USER_H
+
+#ifdef HAVE_ASM_TYPES_H
#include <asm/types.h>
+#else
+#include "types.h"
+#endif
+
+#ifdef HAVE_LINUX_QUOTA_H
+#include <linux/quota.h>
+#endif
/*
* asm-x86_64/processor.h on some SLES 9 distros seems to use
*/
#define __ASM_X86_64_PROCESSOR_H
-#include <linux/quota.h>
#ifdef __KERNEL__
#include <linux/string.h>
#else
struct obd_uuid obd_uuid;
};
+#ifndef LPU64
+/* x86_64 defines __u64 as "long" in userspace, but "long long" in the kernel */
+#if defined(__x86_64__) && defined(__KERNEL__)
+# define LPU64 "%Lu"
+# define LPD64 "%Ld"
+# define LPX64 "%#Lx"
+# define LPSZ "%lu"
+# define LPSSZ "%ld"
+#elif (BITS_PER_LONG == 32 || __WORDSIZE == 32)
+# define LPU64 "%Lu"
+# define LPD64 "%Ld"
+# define LPX64 "%#Lx"
+# define LPSZ "%u"
+# define LPSSZ "%d"
+#elif (BITS_PER_LONG == 64 || __WORDSIZE == 64)
+# define LPU64 "%lu"
+# define LPD64 "%ld"
+# define LPX64 "%#lx"
+# define LPSZ "%lu"
+# define LPSSZ "%ld"
+#endif
+#endif /* !LPU64 */
+
#endif /* _LUSTRE_USER_H */
/* Determine if the lock is compatible with all locks on the queue.
* We stop walking the queue if we hit ourselves so we don't take
* conflicting locks enqueued after us into accound, or we'd wait forever.
+ *
* 0 if the lock is not compatible
* 1 if the lock is compatible
* 2 if this group lock is compatible and requires no further checking
/* locks are compatible, overlap doesn't matter */
if (lockmode_compat(lock->l_req_mode, req_mode)) {
- /* non-group locks are compatible, overlap doesn't
- matter */
+ /* non-group locks are compatible, overlap doesn't
+ matter */
if (likely(req_mode != LCK_GROUP))
continue;
-
+
/* If we are trying to get a GROUP lock and there is
another one of this kind, we need to compare gid */
if (req->l_policy_data.l_extent.gid ==
imp->imp_dlm_handle = *dlm_handle;
rc = ptlrpc_init_import(imp);
- if (rc != 0)
+ if (rc != 0)
GOTO(out_ldlm, rc);
if (data)
LBUG();
memcpy(saved_req, req, sizeof *saved_req);
memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
-
+
/* Don't race cleanup */
spin_lock_bh(&obd->obd_processing_task_lock);
if (obd->obd_stopping) {
if (llu_is_root_inode(inode))
RETURN(0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%llu\n", st->st_ino);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%llu\n", (long long)st->st_ino);
LL_GET_INTENT(inode, it);
if (!it->d.lustre.it_disposition) {
lvb->lvb_size = lli->lli_smd->lsm_oinfo[stripe].loi_kms;
LDLM_DEBUG(lock, "i_size: %llu -> stripe number %u -> kms "LPU64,
- llu_i2stat(inode)->st_size, stripe, lvb->lvb_size);
+ (long long)llu_i2stat(inode)->st_size, stripe,lvb->lvb_size);
iput:
I_RELE(inode);
out:
int rc, flags = LDLM_FL_HAS_INTENT;
ENTRY;
- CDEBUG(D_DLMTRACE, "Glimpsing inode %llu\n", st->st_ino);
+ CDEBUG(D_DLMTRACE, "Glimpsing inode %llu\n", (long long)st->st_ino);
rc = obd_enqueue(sbi->ll_osc_exp, lli->lli_smd, LDLM_EXTENT, &policy,
LCK_PR, &flags, llu_extent_lock_callback,
st->st_size = lov_merge_size(lli->lli_smd, 0);
st->st_blocks = lov_merge_blocks(lli->lli_smd);
- //lli->lli_st_mtime = lov_merge_mtime(lli->lli_smd, inode->i_mtime);
+ st->st_mtime = lov_merge_mtime(lli->lli_smd, st->st_mtime);
CDEBUG(D_DLMTRACE, "glimpse: size: %llu, blocks: %llu\n",
- st->st_size, st->st_blocks);
+ (long long)st->st_size, st->st_blocks);
obd_cancel(sbi->ll_osc_exp, lli->lli_smd, LCK_PR, &lockh);
RETURN(0);
CDEBUG(D_DLMTRACE, "Locking inode %llu, start "LPU64" end "LPU64"\n",
- st->st_ino, policy->l_extent.start, policy->l_extent.end);
+ (long long)st->st_ino, policy->l_extent.start,
+ policy->l_extent.end);
rc = obd_enqueue(sbi->ll_osc_exp, lsm, LDLM_EXTENT, policy, mode,
&ast_flags, llu_extent_lock_callback,
policy->l_extent.end == OBD_OBJECT_EOF)
st->st_size = lov_merge_size(lsm, 1);
- //inode->i_mtime = lov_merge_mtime(lsm, inode->i_mtime);
+ if (rc == 0)
+ st->st_mtime = lov_merge_mtime(lsm, st->st_mtime);
RETURN(rc);
}
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
RETURN(0);
- types = ext2_filetype_table;
-
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
ext2_dirent *de;
static int ll_get_grouplock(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct ll_file_data *fd = file->private_data;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
ldlm_policy_data_t policy = { .l_extent = { .start = 0,
.end = OBD_OBJECT_EOF}};
struct lustre_handle lockh = { 0 };
static int ll_put_grouplock(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct ll_file_data *fd = file->private_data;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ll_inode_info *lli = ll_i2info(inode);
struct lov_stripe_md *lsm = lli->lli_smd;
int rc;
if (fd->fd_gid != arg) /* Ugh? Unlocking with different gid? */
RETURN(-EINVAL);
-
+
fd->fd_flags &= ~(LL_FILE_GROUP_LOCKED|LL_FILE_IGNORE_LOCK);
rc = ll_extent_unlock(fd, inode, lsm, LCK_GROUP, &fd->fd_cwlockh);
memset(&fd->fd_cwlockh, 0, sizeof(fd->fd_cwlockh));
RETURN(0);
-}
+}
int ll_file_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{ .name = {inode->i_ino, inode->i_generation} };
struct obd_device *obddev = class_conn2obd(conn);
ENTRY;
-
+
RETURN(ldlm_cli_cancel_unused(obddev->obd_namespace, &res_id, flags,
opaque));
}
}
static int lov_llog_origin_connect(struct llog_ctxt *ctxt, int count,
- struct llog_logid *logid,
+ struct llog_logid *logid,
struct llog_gen *gen,
struct obd_uuid *uuid)
{
loi = lsm->lsm_oinfo;
lov = &obd->u.lov;
for (i = 0; i < count; i++, cookies++, loi++) {
- struct obd_device *child = lov->tgts[loi->loi_ost_idx].ltd_exp->exp_obd;
+ struct obd_device *child = lov->tgts[loi->loi_ost_idx].ltd_exp->exp_obd;
struct llog_ctxt *cctxt = llog_get_context(child, ctxt->loc_idx);
int err;
rc = lustre_pack_reply(req, 1, &size, NULL);
if (rc) {
- CERROR("mds: out of memory while packing readpage reply\n");
- RETURN(-ENOMEM);
+ CERROR("error packing readpage reply: rc %d\n", rc);
+ GOTO(out, rc);
}
body = lustre_swab_reqbuf(req, 0, sizeof(*body), lustre_swab_mds_body);
int recovering, abort_recovery;
if (req->rq_export == NULL) {
- CERROR("lustre_mds: operation %d on unconnected MDS\n",
- req->rq_reqmsg->opc);
+ CERROR("operation %d on unconnected MDS from %s\n",
+ req->rq_reqmsg->opc,
+ req->rq_peerstr);
req->rq_status = -ENOTCONN;
GOTO(out, rc = -ENOTCONN);
}
cfg.cfg_instance = NULL;
cfg.cfg_uuid = mds->mds_lov_uuid;
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- rc = class_config_parse_llog(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT),
+ rc = class_config_parse_llog(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT),
mds->mds_profile, &cfg);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
switch (rc) {
cfg.cfg_uuid = mds->mds_lov_uuid;
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- class_config_parse_llog(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT),
+ class_config_parse_llog(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT),
cln_prof, &cfg);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
LBUG();
}
+
+ /* Make sure the server's last_transno is up to date. Do this
+ * after the client is freed so we know all the client's
+ * transactions have been committed. */
+ mds_update_server_data(exp->exp_obd, 0);
+
EXIT;
-free:
+ free:
OBD_FREE(med->med_mcd, sizeof(*med->med_mcd));
med->med_mcd = NULL;
void *handle;
int err, namelen, rc = 0;
ENTRY;
-
+
memset(&ucred, 0, sizeof(ucred));
ucred.luc_cap = current->cap_effective | CAP_SYS_RESOURCE;
push_ctxt(&saved, &obd->obd_lvfs_ctxt, &ucred);
};
static struct llog_operations mds_size_repl_logops = {
- lop_cancel: mds_llog_repl_cancel
+ lop_cancel: mds_llog_repl_cancel,
};
int mds_llog_init(struct obd_device *obd, struct obd_device *tgt,
RETURN(0);
}
-
if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_ALLOC_OBDO))
GOTO(out_ids, rc = -ENOMEM);
}
EXPORT_SYMBOL(oig_release);
-void oig_add_one(struct obd_io_group *oig,
- struct oig_callback_context *occ)
+void oig_add_one(struct obd_io_group *oig, struct oig_callback_context *occ)
{
unsigned long flags;
CDEBUG(D_CACHE, "oig %p ready to roll\n", oig);
rc2 = block_debug_check("test_brw",
addr + delta, OBD_ECHO_BLOCK_SIZE,
stripe_off, stripe_id);
- if (rc2 != 0)
+ if (rc2 != 0) {
+ CERROR ("Error in echo object "LPX64"\n", id);
rc = rc2;
+ }
}
kunmap(page);
if (iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
orig_uid = dentry->d_inode->i_uid;
orig_gid = dentry->d_inode->i_gid;
- handle = fsfilt_start_log(exp->exp_obd, dentry->d_inode,
+ handle = fsfilt_start_log(exp->exp_obd, dentry->d_inode,
FSFILT_OP_SETATTR, oti, 1);
} else {
handle = fsfilt_start(exp->exp_obd, dentry->d_inode,
recreate_obj = 1;
} else {
OBD_ALLOC(osfs, sizeof(*osfs));
- if(osfs == NULL)
+ if (osfs == NULL)
RETURN(-ENOMEM);
rc = filter_statfs(obd, osfs, jiffies-HZ);
- if(rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
- CDEBUG(D_HA, "This OST has not enough space! avail "LPU64"\n",
- osfs->os_bavail << filter->fo_sb->s_blocksize_bits);
+ if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
+ CDEBUG(D_HA, "OST out of space! avail "LPU64"\n",
+ osfs->os_bavail<<filter->fo_sb->s_blocksize_bits);
*num=0;
rc = -ENOSPC;
}
rc = class_register_type(&filter_obd_ops, lvars.module_vars,
OBD_FILTER_DEVICENAME);
- if (rc) {
+ if (rc)
GOTO(out, rc);
- return rc;
- }
rc = class_register_type(&filter_sanobd_ops, lvars.module_vars,
OBD_FILTER_SAN_DEVICENAME);
if (rc)
return rc;
- if (val < 0 || val > OSC_MAX_DIRTY_MB_MAX || val > num_physpages / 4)
+ if (val < 0 || val > OSC_MAX_DIRTY_MB_MAX ||
+ val > num_physpages >> (20 - PAGE_SHIFT - 2)) /* 1/4 of RAM */
return -ERANGE;
spin_lock(&cli->cl_loi_list_lock);
/* this is trying to propogate async writeback errors back up to the
* application. As an async write fails we record the error code for later if
- * the app does an fsync. as long as errors persist we force future rpcs to be
+ * the app does an fsync. As long as errors persist we force future rpcs to be
* sync so that the app can get a sync error and break the cycle of queueing
* pages for which writeback will fail. */
static void osc_process_ar(struct osc_async_rc *ar, struct ptlrpc_request *req,
spin_lock_irqsave(&imp->imp_lock, flags);
if (req->rq_transno != 0)
ptlrpc_retain_replayable_request(req, imp);
- else if (req->rq_commit_cb != NULL)
+ else if (req->rq_commit_cb != NULL) {
+ spin_unlock_irqrestore(&imp->imp_lock, flags);
req->rq_commit_cb(req);
+ spin_lock_irqsave(&imp->imp_lock, flags);
+ }
if (req->rq_transno > imp->imp_max_transno)
imp->imp_max_transno = req->rq_transno;
struct list_head pc_req_list;
wait_queue_head_t pc_waitq;
struct ptlrpc_request_set *pc_set;
+ char pc_name[16];
#ifndef __KERNEL__
int pc_recurred;
void *pc_callback;
unsigned long flags;
ENTRY;
- kportal_daemonize("ptlrpcd");
+ kportal_daemonize(pc->pc_name);
SIGNAL_MASK_LOCK(current, flags);
sigfillset(¤t->blocked);
}
#endif
-static int ptlrpcd_start(struct ptlrpcd_ctl *pc)
+static int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc)
{
int rc = 0;
pc->pc_flags = 0;
spin_lock_init(&pc->pc_lock);
INIT_LIST_HEAD(&pc->pc_req_list);
+ snprintf (pc->pc_name, sizeof (pc->pc_name), name);
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
if (++ptlrpcd_users != 1)
GOTO(out, rc);
- rc = ptlrpcd_start(&ptlrpcd_pc);
+ rc = ptlrpcd_start("ptlrpcd", &ptlrpcd_pc);
if (rc) {
--ptlrpcd_users;
GOTO(out, rc);
}
- rc = ptlrpcd_start(&ptlrpcd_recovery_pc);
+ rc = ptlrpcd_start("ptlrpcd-recov", &ptlrpcd_recovery_pc);
if (rc) {
ptlrpcd_stop(&ptlrpcd_pc);
--ptlrpcd_users;
}
void
-ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
+ptlrpc_free_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
{
struct ptlrpc_srv_ni *sni = rqbd->rqbd_srv_ni;
struct ptlrpc_service *svc = sni->sni_service;
}
void
-ptlrpc_save_lock (struct ptlrpc_request *req,
+ptlrpc_save_lock (struct ptlrpc_request *req,
struct lustre_handle *lock, int mode)
{
struct ptlrpc_reply_state *rs = req->rq_reply_state;
wake_up (&svc->srv_waitq);
}
-void
+void
ptlrpc_commit_replies (struct obd_device *obd)
{
struct list_head *tmp;
struct list_head *nxt;
unsigned long flags;
-
+
/* Find any replies that have been committed and get their service
* to attend to complete them. */
list_for_each_safe (tmp, nxt, &obd->obd_uncommitted_replies) {
struct ptlrpc_reply_state *rs =
- list_entry (tmp, struct ptlrpc_reply_state, rs_obd_list);
+ list_entry(tmp, struct ptlrpc_reply_state, rs_obd_list);
LASSERT (rs->rs_difficult);
spin_unlock (&svc->srv_lock);
}
}
-
+
spin_unlock_irqrestore (&obd->obd_uncommitted_replies_lock, flags);
}
LASSERT (ptlrpc_ninterfaces > 0);
LASSERT (nbufs > 0);
LASSERT (bufsize >= max_req_size);
-
+
ssize = offsetof (struct ptlrpc_service,
srv_interfaces[ptlrpc_ninterfaces]);
OBD_ALLOC(service, ssize);
spin_lock (&ptlrpc_all_services_lock);
list_add (&service->srv_list, &ptlrpc_all_services);
spin_unlock (&ptlrpc_all_services_lock);
-
+
/* Now allocate the request buffers, assuming all interfaces require
* the same number. */
for (i = 0; i < ptlrpc_ninterfaces; i++) {
spin_unlock_irqrestore (&svc->srv_lock, flags);
RETURN(0);
}
-
+
rs = list_entry (svc->srv_reply_queue.next,
struct ptlrpc_reply_state, rs_list);
been_handled = rs->rs_handled;
rs->rs_handled = 1;
-
+
nlocks = rs->rs_nlocks; /* atomic "steal", but */
rs->rs_nlocks = 0; /* locks still on rs_locks! */
if ((!been_handled && rs->rs_on_net) ||
nlocks > 0) {
spin_unlock_irqrestore(&svc->srv_lock, flags);
-
+
if (!been_handled && rs->rs_on_net) {
PtlMDUnlink(rs->rs_md_h);
/* Ignore return code; we're racing with
int rc;
struct list_head *tmp, *nxt;
ENTRY;
-
+
/* I'm relying on being single threaded, not to have to lock
* ptlrpc_all_services etc */
list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
struct ptlrpc_service *svc =
list_entry (tmp, struct ptlrpc_service, srv_list);
-
+
if (svc->srv_nthreads != 0) /* I've recursed */
continue;
* (arbitrarily) to recursing 1 stack frame per service.
* Note that the problem with recursion is that we have to
* unwind completely before our caller can resume. */
-
+
svc->srv_nthreads++;
-
+
do {
rc = ptlrpc_server_handle_reply(svc);
rc |= ptlrpc_server_handle_request(svc);
rc |= (ptlrpc_server_post_idle_rqbds(svc) > 0);
did_something |= rc;
} while (rc);
-
+
svc->srv_nthreads--;
}
ptlrpc_retry_rqbds(void *arg)
{
struct ptlrpc_service *svc = (struct ptlrpc_service *)arg;
-
+
svc->srv_rqbd_timeout = 0;
return (-ETIMEDOUT);
}
if (thread == NULL)
RETURN(-ENOMEM);
init_waitqueue_head(&thread->t_ctl_waitq);
-
+
d.dev = dev;
d.svc = svc;
d.name = name;
* event with its 'unlink' flag set for each posted rqbd */
list_for_each(tmp, &srv_ni->sni_active_rqbds) {
struct ptlrpc_request_buffer_desc *rqbd =
- list_entry(tmp, struct ptlrpc_request_buffer_desc,
+ list_entry(tmp, struct ptlrpc_request_buffer_desc,
rqbd_list);
rc = PtlMDUnlink(rqbd->rqbd_md_h);
if (rc == 0)
break;
-
+
/* Network access will complete in finite time but
* the HUGE timeout lets us CWARN for visibility of
* sluggish NALs */
while (!list_empty(&service->srv_idle_rqbds)) {
struct ptlrpc_request_buffer_desc *rqbd =
list_entry(service->srv_idle_rqbds.next,
- struct ptlrpc_request_buffer_desc,
+ struct ptlrpc_request_buffer_desc,
rqbd_list);
ptlrpc_free_rqbd(rqbd);
# Lustre test Makefile
-AM_CPPFLAGS = $(LLCPPFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+AM_CPPFLAGS = $(LLCPPFLAGS) -I/opt/lam/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
AM_CFLAGS = $(LLCFLAGS)
# LDADD = -lldap
# LDADD := -lreadline -ltermcap # -lefence
noinst_PROGRAMS += small_write multiop sleeptest ll_sparseness_verify cmknod
noinst_PROGRAMS += ll_sparseness_write mrename ll_dirstripe_verify mkdirmany rmdirmany
noinst_PROGRAMS += openfilleddirunlink rename_many memhog iopentest1 iopentest2
-noinst_PROGRAMS += mmap_sanity writemany
+noinst_PROGRAMS += mmap_sanity flock_test writemany
if MPITESTS
-noinst_PROGRAMS += write_append_truncate createmany_mpi
+noinst_PROGRAMS += parallel_grouplock write_append_truncate createmany_mpi
endif
# noinst_PROGRAMS += ldaptest copy_attr mkdirdeep
bin_PROGRAMS = mcreate munlink
endif # TESTS
-mmap_sanity_SOURCES= mmap_sanity.c
stat_SOURCES = stat.c stat_fs.h
# mkdirdeep_LDADD=-L$(top_builddir)/portals/utils -lptlctl $(LIBREADLINE)
+mmap_sanity_SOURCES= mmap_sanity.c
if MPITESTS
LAM_LD_FLAGS=-L/opt/lam/lib -lmpi -llam -lpthread
write_append_truncate_LDADD=$(LAM_LD_FLAGS)
createmany_mpi_SOURCES=createmany-mpi.c
createmany_mpi_LDADD=$(LAM_LD_FLAGS)
+parallel_grouplock_SOURCES=parallel_grouplock.c lp_utils.c
+parallel_grouplock_LDADD=$(LAM_LD_FLAGS)
endif
#copy_attr_LDADD= -lattr
set -e
+ONLY=${ONLY:-"$*"}
+# bug number for skipped test:
+ALWAYS_EXCEPT=${ALWAYS_EXCEPT:-""}
+# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+
+[ "$ALWAYS_EXCEPT$EXCEPT" ] && echo "Skipping tests: $ALWAYS_EXCEPT $EXCEPT"
+
SRCDIR=`dirname $0`
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/wait.h>
+
+void chd_lock_unlock(int);
+char fname[1024];
+
+int main(int argc, char **argv)
+{
+ pid_t pid;
+ int cfd, fd, rc;
+
+ if (argc != 2) {
+ fprintf(stderr, "\nUSAGE: flock_test filepath\n");
+ exit(2);
+ }
+ strncpy(fname, argv[1], 1023);
+ fname[1023] ='\0';
+ fd = open(fname, O_RDWR|O_CREAT, (mode_t)0666);
+ if (fd == -1) {
+ fprintf(stderr, "flock_test: failed to open %s : ", fname);
+ perror("");
+ exit(1);
+ }
+ if (flock(fd, LOCK_EX | LOCK_NB) == -1) {
+ fprintf(stderr, "flock_test: parent attempt to lock %s failed : ", \
+ fname);
+ perror("");
+ exit(1);
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ fprintf(stderr, "flock_test: fork failed : ");
+ perror("");
+ exit(1);
+ }
+
+ if (pid == 0) {
+ pid = getpid();
+ sleep(2);
+ if ((cfd = open(fname, O_RDWR)) == -1) {
+ fprintf(stderr, "flock_test child (%d) cannot open %s: ", \
+ pid, fname);
+ perror("");
+ exit(1);
+ }
+ if(flock(cfd, LOCK_EX | LOCK_NB) != -1) {
+ fprintf(stderr, "flock_test child (%d): %s not yet locked : ", \
+ pid, fname);
+ exit(1);
+ }
+ if(flock(fd, LOCK_UN) == -1) {
+ fprintf(stderr, "flock_test child (%d): cannot unlock %s: ", \
+ pid, fname);
+ perror("");
+ exit(1);
+ }
+ if(flock(cfd, LOCK_EX | LOCK_NB) == -1 ) {
+ fprintf(stderr, \
+ "flock_test: child (%d) cannot re-lock %s after unlocking : ", \
+ pid, fname);
+ perror("");
+ exit(1);
+ }
+ close(cfd);
+ exit(0);
+ }
+
+ waitpid(pid, &rc, 0);
+ close(fd);
+ unlink(fname);
+ if (WIFEXITED(rc) && WEXITSTATUS(rc) != 0) {
+ fprintf(stderr, "flock_test: child (%d) exit code = %d\n", \
+ pid, WEXITSTATUS(rc));
+ exit(1);
+ }
+ exit(0);
+}
${LMC} --add net --node localhost --nid `hostname` --nettype $NETTYPE || exit 11
${LMC} --add net --node client --nid '*' --nettype $NETTYPE || exit 12
+[ "x$MDS_MOUNT_OPTS" != "x" ] &&
+ MDS_MOUNT_OPTS="--mountfsoptions $MDS_MOUNT_OPTS"
+
# configure mds server
${LMC} --add mds --node localhost --mds mds1 --fstype $FSTYPE \
- --dev $MDSDEV --size $MDSSIZE $JARG $IARG $MDSOPT || exit 20
+ --dev $MDSDEV \
+ $MDS_MOUNT_OPTS --size $MDSSIZE $JARG $IARG $MDSOPT || exit 20
+
+[ "x$OST_MOUNT_OPTS" != "x" ] &&
+ OST_MOUNT_OPTS="--mountfsoptions $OST_MOUNT_OPTS"
# configure ost
${LMC} -m $config --add lov --lov lov1 --mds mds1 --stripe_sz $STRIPE_BYTES \
--stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 $LOVOPT || exit 20
-${LMC} --add ost --nspath /mnt/ost_ns --node localhost --lov lov1 \
- --fstype $FSTYPE --dev $OSTDEV --size $OSTSIZE $JARG $OSTOPT || exit 30
+
+${LMC} --add ost --node localhost --lov lov1 --fstype $FSTYPE \
+ --dev $OSTDEV \
+ $OST_MOUNT_OPTS --size $OSTSIZE $JARG $OSTOPT || exit 30
# create client config
-${LMC} --add mtpt --node localhost --path $MOUNT --mds mds1 --lov lov1 \
- $CLIENTOPT || exit 40
-${LMC} --add mtpt --node client --path $MOUNT2 --mds mds1 --lov lov1 \
- $CLIENTOPT || exit 41
+${LMC} --add mtpt --node localhost --path $MOUNT \
+ --mds mds1 --lov lov1 $CLIENTOPT || exit 40
+${LMC} --add mtpt --node client --path $MOUNT2 \
+ --mds mds1 --lov lov1 $CLIENTOPT || exit 41
sysctl -w lustre.fail_loc=0
do_facet client "df $DIR"
# expect cmp to fail
- do_facet client "cmp /etc/termcap $DIR/$tfile" && return 1
- do_facet client "rm $DIR/$tfile" || return 2
+ do_facet client "cmp /etc/termcap $DIR/$tfile" && return 3
+ do_facet client "rm $DIR/$tfile" || return 4
return 0
}
run_test 17 "timeout bulk get, evict client (2732)"
--target_cluster_id $SERVER_CLUSTER
done
---dev $OSTDEV --size $OSTSIZE
# OSTNODE
COUNT=1
for OSTNODE in $OSTNODES; do
run_test 71 "Running dbench on lustre (don't segment fault) ===="
test_72() { # bug 5695 - Test that on 2.6 remove_suid works properly
- check_kernel_version 43 || return 0
+ check_kernel_version 43 || return 0
[ "$RUNAS_ID" = "$UID" ] && echo "skipping test 72" && return
touch $DIR/f72
chmod 777 $DIR/f72
test_18() {
./mmap_sanity -d $MOUNT1 -m $MOUNT2
+ sync; sleep 1; sync
}
run_test 18 "mmap sanity check ================================="
eval ONLY_${O}=true
done
[ "$EXCEPT$ALWAYS_EXCEPT" ] && \
- log "skipping `echo $EXCEPT $ALWAYS_EXCEPT`"
+ log "skipping test `echo $EXCEPT $ALWAYS_EXCEPT`"
for E in $EXCEPT $ALWAYS_EXCEPT; do
eval EXCEPT_${E}=true
done
AM_CFLAGS=$(LLCFLAGS)
AM_CPPFLAGS=$(LLCPPFLAGS) -DLUSTRE_UTILS=1
+AM_LDFLAGS := -L$(top_builddir)/portals/utils
LIBPTLCTL := $(top_builddir)/portals/utils/libptlctl.a
wiretest_SOURCES = wiretest.c
lctl_SOURCES = parser.c obd.c lustre_cfg.c lctl.c parser.h obdctl.h
-
lload_SOURCES = lload.c
obdio_SOURCES = obdio.c obdiolib.c obdiolib.h
obdbarrier_SOURCES = obdbarrier.c obdiolib.c obdiolib.h
lfs_SOURCES = lfs.c parser.c obd.c
-
llmount_SOURCES = llmount.c
llmount_LDADD = $(LIBREADLINE) $(LIBPTLCTL)
llmount_DEPENDENCIES := $(LIBPTLCTL)
#include <sys/syscall.h>
#ifdef HAVE_LINUX_TYPES_H
#include <linux/types.h>
-#else
-#include "types.h"
#endif
#ifdef HAVE_LINUX_UNISTD_H
#include <linux/unistd.h>
#include <liblustre.h>
#include <linux/obd.h>
#include <linux/lustre_lib.h>
-#include <lustre/lustre_user.h>
+#include <lustre/liblustreapi.h>
#include <linux/obd_lov.h>
static void err_msg(char *fmt, ...)
return errno;
}
- strncpy((char *)lum, fname, sizeof(*lum));
+ strcpy((char *)lum, fname);
if (ioctl(fd, IOC_MDC_GETSTRIPE, (void *)lum) == -1) {
close(fd);
free(dname);
$subsys->{GMNAL} = 1 << 19;
$subsys->{PTLROUTER} = 1 << 20;
$subsys->{COBD} = 1 << 21;
-$subsys->{IBNAL} = 1 << 22;
+$subsys->{OPENIBNAL} = 1 << 22;
$masks->{TRACE} = 1 << 0; # /* ENTRY/EXIT markers */
$linesubsys == $subsys->{PORTALS} ||
$linesubsys == $subsys->{SOCKNAL} ||
$linesubsys == $subsys->{QSWNAL} ||
- $linesubsys == $subsys->{IBNAL} ||
+ $linesubsys == $subsys->{OPENIBNAL} ||
$linesubsys == $subsys->{GMNAL}));
if(!$entering_rpc) {entering_rpc($_);}
if(!$leaving_rpc) {leaving_rpc($_);}
--node node_name
--mds mds_name
--dev path
- --fstype extN|ext3
+ --fstype ldiskfs|ext3
--size size
--nspath
--journal_size size
--add ost
--node node_name
--ost ost_name
+ --failover
--lov lov_name
--dev path
--size size
- --fstype extN|ext3
+ --fstype ldiskfs|ext3
--journal_size size
--inode_size size
--osdtype obdecho|obdfilter
('stripe_pattern', "Specify the stripe pattern. RAID 0 is the only one currently supported.", PARAM, 0),
# cobd
- ('real_obd', "", PARAM),
- ('cache_obd', "", PARAM),
+ ('real_obd', "Specify the real device for the cache obd system.", PARAM),
+ ('cache_obd', "Specify the cache device for the cache obd system.", PARAM),
('mgmt', "Specify management/monitoring service name.", PARAM, ""),
]
ldlm = self.newService("ldlm", name, uuid)
return ldlm
- def osd(self, name, uuid, fs, osdtype, devname, format, ost_uuid,
+ def osd(self, name, uuid, fstype, osdtype, devname, format, ost_uuid,
node_uuid, dev_size=0, journal_size=0, inode_size=0, nspath="",
mkfsoptions="", mountfsoptions=""):
osd = self.newService("osd", name, uuid)
osd.setAttribute('osdtype', osdtype)
osd.appendChild(self.ref("target", ost_uuid))
osd.appendChild(self.ref("node", node_uuid))
- if fs:
- self.addElement(osd, "fstype", fs)
+ if fstype:
+ self.addElement(osd, "fstype", fstype)
if devname:
dev = self.addElement(osd, "devpath", devname)
self.addElement(osd, "autoformat", format)
self.addElement(osd, "inodesize", "%s" % (inode_size))
if mkfsoptions:
self.addElement(osd, "mkfsoptions", mkfsoptions)
- if mountfsoptions:
+ if mountfsoptions:
self.addElement(osd, "mountfsoptions", mountfsoptions)
if nspath:
self.addElement(osd, "nspath", nspath)
self.addElement(mds, "group", group)
return mds
- def mdsdev(self, name, uuid, fs, devname, format, node_uuid,
+ def mdsdev(self, name, uuid, fstype, devname, format, node_uuid,
mds_uuid, dev_size=0, journal_size=0, inode_size=256,
nspath="", mkfsoptions="", mountfsoptions=""):
mdd = self.newService("mdsdev", name, uuid)
- self.addElement(mdd, "fstype", fs)
+ self.addElement(mdd, "fstype", fstype)
dev = self.addElement(mdd, "devpath", devname)
self.addElement(mdd, "autoformat", format)
if dev_size:
self.addElement(mdd, "nspath", nspath)
if mkfsoptions:
self.addElement(mdd, "mkfsoptions", mkfsoptions)
- if mountfsoptions:
+ if mountfsoptions:
self.addElement(mdd, "mountfsoptions", mountfsoptions)
mdd.appendChild(self.ref("node", node_uuid))
mtpt = self.newService("mountpoint", name, uuid)
mtpt.appendChild(self.ref("filesystem", fs_uuid))
self.addElement(mtpt, "path", path)
- if clientoptions:
+ if clientoptions:
self.addElement(mtpt, "clientoptions", clientoptions)
return mtpt
if net_type in ('tcp','openib','ra'):
port = get_option_int(options, 'port')
- elif net_type in ('elan','gm','iib','vib','lo'):
+ elif net_type in ('elan','gm','iib','vib','lo','cray_kern_nal'):
port = 0
else:
print "Unknown net_type: ", net_type
journal_size = ''
inode_size = ''
mkfsoptions = ''
- mountfsoptions = ''
+ mountfsoptions = ''
else:
devname = get_option(options, 'dev') # can be unset for bluearcs
size = get_option(options, 'size')
journal_size = get_option(options, 'journal_size')
inode_size = get_option(options, 'inode_size')
mkfsoptions = get_option(options, 'mkfsoptions')
- mountfsoptions = get_option(options, 'mountfsoptions')
+ mountfsoptions = get_option(options, 'mountfsoptions')
nspath = get_option(options, 'nspath')
{"setattr", jt_obd_setattr, 0, "setattr <id> <mode>"},
{"newconn", jt_obd_newconn, 0, "newconn <olduuid> [newuuid]"},
{"test_getattr", jt_obd_test_getattr, 0, "test_getattr <count> [verbose [[t]objid]]"},
+ {"test_setattr", jt_obd_test_setattr, 0, "test_setattr <count> [verbose [[t]objid]]"},
{"test_brw", jt_obd_test_brw, 0, "test_brw [t]<count> [write [verbose [pages [[t]objid]]]]"},
{"dump_ldlm", jt_obd_dump_ldlm, 0, "dump all lock manager state (no args)"},
CHECK_STRUCT(ldlm_extent);
CHECK_MEMBER(ldlm_extent, start);
CHECK_MEMBER(ldlm_extent, end);
+ CHECK_MEMBER(ldlm_extent, gid);
}
void
CHECK_STRUCT(ldlm_flock);
CHECK_MEMBER(ldlm_flock, start);
CHECK_MEMBER(ldlm_flock, end);
- CHECK_MEMBER(ldlm_flock, blocking_export);
CHECK_MEMBER(ldlm_flock, blocking_pid);
CHECK_MEMBER(ldlm_flock, pid);
}