/* In-memory descriptor for a log object or log catalog */
struct llog_handle {
- struct semaphore lgh_lock;
+ struct rw_semaphore lgh_lock;
struct llog_logid lgh_id; /* id of this log */
struct llog_log_hdr *lgh_hdr;
struct file *lgh_file;
int obd_llog_finish(struct obd_device *obd, int count);
+/* llog_ioctl.c */
+int llog_ioctl(struct llog_ctxt *ctxt, int cmd, struct obd_ioctl_data *data);
+int llog_catlog_list(struct obd_device *obd, int count,
+ struct obd_ioctl_data *data);
+
/* llog_net.c */
int llog_initiator_connect(struct llog_ctxt *ctxt);
int llog_receptor_accept(struct llog_ctxt *ctxt, struct obd_import *imp);
#include <linux/module.h>
#include <linux/random.h>
#include <linux/version.h>
+
#include <linux/lustre_lite.h>
#include <linux/lustre_ha.h>
#include <linux/lustre_dlm.h>
-#include <linux/init.h>
-#include <linux/fs.h>
#include <linux/lprocfs_status.h>
#include "llite_internal.h"
GOTO(out_root, err = -EBADF);
}
+ err = ll_close_thread_start(&sbi->ll_lcq);
+ if (err) {
+ CERROR("cannot start close thread: rc %d\n", err);
+ GOTO(out_root, err);
+ }
+
sb->s_root = d_alloc_root(root);
RETURN(err);
struct hlist_node *tmp, *next;
ENTRY;
+ ll_close_thread_shutdown(sbi->ll_lcq);
+
list_del(&sbi->ll_conn_chain);
obd_disconnect(sbi->ll_osc_exp, 0);
sema_init(&lli->lli_open_sem, 1);
lli->lli_flags = 0;
lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
+ spin_lock_init(&lli->lli_lock);
+ INIT_LIST_HEAD(&lli->lli_pending_write_llaps);
}
int ll_fill_super(struct super_block *sb, void *data, int silent)
cfg.cfg_instance = sbi->ll_instance;
cfg.cfg_uuid = sbi->ll_sb_uuid;
+ cfg.cfg_local_nid = lmd->lmd_local_nid;
err = lustre_process_log(lmd, lmd->lmd_profile, &cfg);
if (err < 0) {
CERROR("Unable to process log: %s\n", lmd->lmd_profile);
/* XXX when we fix the AST intents to pass the discard-range
* XXX extent, make ast_flags always LDLM_AST_DISCARD_DATA
* XXX here. */
- if (extent.start == 0)
+ if (attr->ia_size == 0)
ast_flags = LDLM_AST_DISCARD_DATA;
/* bug 1639: avoid write/truncate i_sem/DLM deadlock */
set_bit(LLI_F_HAVE_OST_SIZE_LOCK,
&ll_i2info(inode)->lli_flags);
+ //ll_try_done_writing(inode);
+
/* unlock now as we don't mind others file lockers racing with
* the mds updates below? */
err = ll_extent_unlock(NULL, inode, lsm, LCK_PW, &lockh);
kdev_t_to_nr(inode->i_rdev));
#else
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+
+ lli->ll_save_ifop = inode->i_fop;
+ if (S_ISCHR(inode->i_mode)) {
+ inode->i_fop = &ll_special_chr_inode_fops;
+ }else if (S_ISBLK(inode->i_mode)) {
+ inode->i_fop = &ll_special_blk_inode_fops;
+ }else if (S_ISFIFO(inode->i_mode)){
+ inode->i_fop = &ll_special_fifo_inode_fops;
+ }else if (S_ISSOCK(inode->i_mode)){
+ inode->i_fop = &ll_special_sock_inode_fops;
+ }
+ inode->i_fop->owner = lli->ll_save_ifop->owner;
#endif
EXIT;
}
/* Data stored per server at the head of the last_rcvd file. In le32 order.
* Try to keep this the same as mds_server_data so we might one day merge. */
struct filter_server_data {
- __u8 fsd_uuid[37]; /* server UUID */
- __u8 fsd_uuid_padding[3]; /* unused */
+ __u8 fsd_uuid[40]; /* server UUID */
__u64 fsd_unused; /* was fsd_last_objid - don't use for now */
__u64 fsd_last_transno; /* last completed transaction ID */
__u64 fsd_mount_count; /* FILTER incarnation number */
__u16 fsd_subdir_count; /* number of subdirectories for objects */
__u64 fsd_catalog_oid; /* recovery catalog object id */
__u32 fsd_catalog_ogen; /* recovery catalog inode generation */
- __u8 fsd_peeruuid[37]; /* UUID of MDS associated with this OST */
- __u8 peer_padding[3]; /* unused */
+ __u8 fsd_peeruuid[40]; /* UUID of MDS associated with this OST */
__u8 fsd_padding[FILTER_LR_SERVER_SIZE - 140];
};
/* Data stored per client in the last_rcvd file. In le32 order. */
struct filter_client_data {
- __u8 fcd_uuid[37]; /* client UUID */
- __u8 fcd_uuid_padding[3]; /* unused */
+ __u8 fcd_uuid[40]; /* client UUID */
__u64 fcd_last_rcvd; /* last completed transaction ID */
__u64 fcd_mount_count; /* FILTER incarnation number */
__u64 fcd_last_xid; /* client RPC xid for the last transaction */