if (lock->l_refc == 0 && (lock->l_flags & LDLM_FL_DESTROYED)) {
if (lock->l_connection)
ptlrpc_put_connection(lock->l_connection);
+ CDEBUG(D_MALLOC, "kfreed 'lock': %d at %p (tot 1).\n",
+ sizeof(*lock), lock);
kmem_cache_free(ldlm_lock_slab, lock);
}
l_unlock(nslock);
lock = kmem_cache_alloc(ldlm_lock_slab, SLAB_KERNEL);
if (lock == NULL)
RETURN(NULL);
+ CDEBUG(D_MALLOC, "kmalloced 'lock': %d at "
+ "%p (tot %d).\n", sizeof(*lock), lock, 1);
memset(lock, 0, sizeof(*lock));
get_random_bytes(&lock->l_random, sizeof(__u64));
struct ldlm_ast_work *w;
ENTRY;
+ l_lock(&lock->l_resource->lr_namespace->ns_lock);
+ if (new && (lock->l_flags & LDLM_FL_AST_SENT))
+ GOTO(out, 0);
+
OBD_ALLOC(w, sizeof(*w));
if (!w) {
LBUG();
return;
}
- l_lock(&lock->l_resource->lr_namespace->ns_lock);
if (new) {
- if (lock->l_flags & LDLM_FL_AST_SENT)
- GOTO(out, 0);
-
lock->l_flags |= LDLM_FL_AST_SENT;
w->w_blocking = 1;
ldlm_lock2desc(new, &w->w_desc);
}
+
w->w_lock = ldlm_lock_get(lock);
list_add(&w->w_list, lock->l_resource->lr_tmp);
out:
void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
{
- struct ldlm_lock *lock;
+ struct ldlm_lock *lock;
- lock = ldlm_handle2lock(lockh);
- ldlm_lock_addref_internal(lock, mode);
- ldlm_lock_put(lock);
+ lock = ldlm_handle2lock(lockh);
+ ldlm_lock_addref_internal(lock, mode);
+ ldlm_lock_put(lock);
}
/* only called for local locks */
*
* All code that works with directory layout had been switched to pagecache
* and moved here. AV
- *
+ *
* Adapted for Lustre Light
* Copyright (C) 2002, Cluster File Systems, Inc.
- *
+ *
*/
#include <linux/fs.h>
char *buf;
__u64 offset;
int rc = 0;
- struct ptlrpc_request *request = NULL;
- struct lustre_handle lockh;
- struct lookup_intent it = {IT_READDIR };
+ struct ptlrpc_request *request;
+ struct lustre_handle lockh;
+ struct lookup_intent it = {IT_READDIR};
ENTRY;
if ((inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT <= page->index){
memset(kmap(page), 0, PAGE_CACHE_SIZE);
kunmap(page);
- EXIT;
- goto readpage_out;
+ GOTO(readpage_out, rc);
}
rc = ll_lock(inode, NULL, &it, &lockh);
+ request = (struct ptlrpc_request *)it.it_data;
+ ptlrpc_free_req(request);
if (rc != ELDLM_OK)
CERROR("lock enqueue: err: %d\n", rc);
ldlm_lock_dump((void *)(unsigned long)lockh.addr);
+
if (Page_Uptodate(page)) {
CERROR("Explain this please?\n");
- EXIT;
- goto readpage_out;
+ GOTO(readpage_out, rc);
}
- offset = page->index << PAGE_SHIFT;
+ offset = page->index << PAGE_SHIFT;
buf = kmap(page);
rc = mdc_readpage(&sbi->ll_mdc_conn, inode->i_ino,
S_IFDIR, offset, buf, &request);
- kunmap(page);
+ kunmap(page);
ptlrpc_free_req(request);
EXIT;
return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
}
-extern void set_page_clean(struct page *page);
+extern void set_page_clean(struct page *page);
static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
{
if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
limit = dir->i_size & ~PAGE_CACHE_MASK;
if (limit & (chunk_size - 1)) {
- CERROR("limit %d dir size %lld index %ld\n",
- limit, dir->i_size, page->index);
+ CERROR("limit %d dir size %lld index %ld\n",
+ limit, dir->i_size, page->index);
goto Ebadsize;
}
for (offs = limit; offs<PAGE_CACHE_SIZE; offs += chunk_size) {
return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
}
-static inline unsigned
+static inline unsigned
ext2_validate_entry(char *base, unsigned offset, unsigned mask)
{
ext2_dirent *de = (ext2_dirent*)(base + offset);
};
static unsigned int ll_dt2fmt[DT_WHT + 1] = {
- [EXT2_FT_UNKNOWN] 0,
+ [EXT2_FT_UNKNOWN] 0,
[EXT2_FT_REG_FILE] S_IFREG,
[EXT2_FT_DIR] S_IFDIR,
[EXT2_FT_CHRDEV] S_IFCHR,
- [EXT2_FT_BLKDEV] S_IFBLK,
+ [EXT2_FT_BLKDEV] S_IFBLK,
[EXT2_FT_FIFO] S_IFIFO,
[EXT2_FT_SOCK] S_IFSOCK,
[EXT2_FT_SYMLINK] S_IFLNK
};
-
+
#define S_SHIFT 12
static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] EXT2_FT_REG_FILE,
ino_t res = 0;
struct ext2_dir_entry_2 * de;
struct page *page;
-
+
de = ext2_find_entry (dir, dentry, &page);
if (de) {
res = le32_to_cpu(de->inode);
if (!page)
return -ENOMEM;
base = kmap(page);
- if (!base)
+ if (!base)
return -ENOMEM;
err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
{
struct page *page = NULL;
unsigned long i, npages = dir_pages(inode);
-
+
for (i = 0; i < npages; i++) {
char *kaddr;
ext2_dirent * de;
it->it_disposition && it->it_status)
GOTO(negative, NULL);
+ request = (struct ptlrpc_request *)it->it_data;
if (!it->it_disposition) {
struct ll_inode_info *lli = ll_i2info(dir);
memcpy(&lli->lli_intent_lock_handle, &lockh, sizeof(lockh));
offset = 0;
} else if (it->it_op == IT_UNLINK) {
struct obdo *obdo;
- request = (struct ptlrpc_request *)it->it_data;
obdo = lustre_msg_buf(request->rq_repmsg, 1);
inode = new_inode(dir->i_sb);
- ll_i2info(inode)->lli_obdo = obdo_alloc();
+ ll_i2info(inode)->lli_obdo = obdo_alloc();
/* XXX fix mem allocation error */
memcpy(ll_i2info(inode)->lli_obdo, obdo, sizeof(*obdo));
struct mds_body *body;
offset = 1;
- request = (struct ptlrpc_request *)it->it_data;
body = lustre_msg_buf(request->rq_repmsg, 1);
type = body->mode;
ino = body->fid1.id;