* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* allocate sa_entry and hash it to allow scanner process to find it */
static struct sa_entry *
-sa_alloc(struct ll_statahead_info *sai, __u64 index, const char *name, int len,
- const struct lu_fid *fid)
+sa_alloc(struct dentry *parent, struct ll_statahead_info *sai, __u64 index,
+ const char *name, int len, const struct lu_fid *fid)
{
struct ll_inode_info *lli;
struct sa_entry *entry;
dname = (char *)entry + sizeof(struct sa_entry);
memcpy(dname, name, len);
dname[len] = 0;
- entry->se_qstr.hash = full_name_hash(name, len);
+ entry->se_qstr.hash = ll_full_name_hash(parent, name, len);
entry->se_qstr.len = len;
entry->se_qstr.name = dname;
entry->se_fid = *fid;
RETURN_EXIT;
}
+ /* In case of restore, the MDT has the right size and has already
+ * sent it back without granting the layout lock, inode is up-to-date.
+ * Then AGL (async glimpse lock) is useless.
+ * Also to glimpse we need the layout, in case of a runninh restore
+ * the MDT holds the layout lock so the glimpse will block up to the
+ * end of restore (statahead/agl will block) */
+ if (ll_file_test_flag(lli, LLIF_FILE_RESTORING)) {
+ lli->lli_agl_index = 0;
+ iput(inode);
+ RETURN_EXIT;
+ }
+
/* Someone is in glimpse (sync or async), do nothing. */
rc = down_write_trylock(&lli->lli_glimpse_sem);
if (rc == 0) {
list_add_tail(&entry->se_list, &sai->sai_interim_entries);
}
sai->sai_replied++;
+
+ smp_mb();
if (waitq != NULL)
wake_up(waitq);
spin_unlock(&lli->lli_sa_lock);
int rc;
ENTRY;
- entry = sa_alloc(sai, sai->sai_index, name, len, fid);
+ entry = sa_alloc(parent, sai, sai->sai_index, name, len, fid);
if (IS_ERR(entry))
RETURN_EXIT;
if (IS_ERR(op_data))
GOTO(out, rc = PTR_ERR(op_data));
- op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
-
if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
ll_start_agl(parent, sai);
/*
* statahead thread may not quit yet because it needs to cache
* entries, now it's time to tell it to quit.
+ *
+ * In case sai is released, wake_up() is called inside spinlock,
+ * so we have to call smp_mb() explicitely to serialize ops.
*/
thread_set_flags(&sai->sai_thread, SVC_STOPPING);
+ smp_mb();
wake_up(&sai->sai_thread.t_ctl_waitq);
}
spin_unlock(&lli->lli_sa_lock);
/**
*FIXME choose the start offset of the readdir
*/
- op_data->op_max_pages = ll_i2sbi(dir)->ll_md_brw_pages;
ll_dir_chain_init(&chain);
page = ll_get_dir_page(dir, op_data, 0, &chain);