if (rc == sizeof(*ff)) {
/* 2) delete the old XATTR_NAME_FID */
dquot_initialize(inode);
- rc = osd_removexattr(dentry, inode, XATTR_NAME_FID);
+ rc = ll_vfs_removexattr(dentry, inode, XATTR_NAME_FID);
if (rc)
GOTO(stop, rc);
GOTO(out, rc = 0);
}
- if (!scrub->os_partial_scan)
- scrub->os_full_speed = 1;
-
switch (val) {
case SCRUB_NEXT_NOLMA:
sf->sf_flags |= SF_UPGRADE;
GOTO(out, rc = 0);
} else {
- if (!scrub->os_partial_scan)
+ if (!scrub->os_partial_scan) {
+ spin_lock(&scrub->os_lock);
scrub->os_full_speed = 1;
-
+ spin_unlock(&scrub->os_lock);
+ }
sf->sf_flags |= SF_INCONSISTENT;
/* XXX: If the device is restored from file-level backup, then
static int osd_scrub_prep(const struct lu_env *env, struct osd_device *dev)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
__u32 flags = scrub->os_start_flags;
int rc;
if (flags & SS_RESET)
scrub_file_reset(scrub, dev->od_uuid, 0);
+ spin_lock(&scrub->os_lock);
if (flags & SS_AUTO_FULL) {
scrub->os_full_speed = 1;
scrub->os_partial_scan = 0;
scrub->os_partial_scan = 0;
}
- spin_lock(&scrub->os_lock);
scrub->os_in_prior = 0;
scrub->os_waiting = 0;
scrub->os_paused = 0;
rc = scrub_file_store(env, scrub);
if (rc == 0) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_RUNNING);
+ scrub->os_running = 1;
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
}
up_write(&scrub->os_rwsem);
down_write(&scrub->os_rwsem);
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
/* We should NOT find the same object more than once. */
CERROR("%s: scan the same object multiple times at the pos: "
"group = %u, base = %u, offset = %u, start = %u\n",
- param->sb->s_id, (__u32)param->bg, param->gbase,
+ osd_sb2name(param->sb), (__u32)param->bg, param->gbase,
offset, param->start);
goto again;
}
struct osd_idmap_cache **oic, const bool noslot)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct lu_fid *fid;
struct osd_inode_id *lid;
int rc;
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_DELAY) && cfs_fail_val > 0)
- wait_event_idle_timeout(
- thread->t_ctl_waitq,
+ wait_var_event_timeout(
+ scrub,
!list_empty(&scrub->os_inconsistent_items) ||
- !thread_is_running(thread),
+ kthread_should_stop(),
cfs_time_seconds(cfs_fail_val));
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
return SCRUB_NEXT_CRASH;
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_FATAL))
return SCRUB_NEXT_FATAL;
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
return SCRUB_NEXT_EXIT;
if (!list_empty(&scrub->os_inconsistent_items)) {
oii = list_entry(scrub->os_inconsistent_items.next,
struct osd_inconsistent_item, oii_list);
- spin_unlock(&scrub->os_lock);
*oic = &oii->oii_cache;
scrub->os_in_prior = 1;
+ spin_unlock(&scrub->os_lock);
return 0;
}
{
struct osd_otable_cache *ooc = &dev->od_otable_it->ooi_cache;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
int rc;
- if (thread_is_running(thread) &&
+ if (scrub->os_running &&
ooc->ooc_pos_preload >= scrub->os_pos_current)
return SCRUB_NEXT_EXIT;
spin_lock(&scrub->os_lock);
if (osd_scrub_has_window(scrub, &it->ooi_cache) ||
!list_empty(&scrub->os_inconsistent_items) ||
- it->ooi_waiting || !thread_is_running(&scrub->os_thread))
+ it->ooi_waiting || kthread_should_stop())
scrub->os_waiting = 0;
else
scrub->os_waiting = 1;
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
struct scrub_file *sf = &scrub->os_file;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = it ? &it->ooi_cache : NULL;
rc = osd_scrub_check_update(info, dev, oic, rc);
if (rc != 0) {
+ spin_lock(&scrub->os_lock);
scrub->os_in_prior = 0;
+ spin_unlock(&scrub->os_lock);
return rc;
}
}
if (scrub->os_in_prior) {
+ spin_lock(&scrub->os_lock);
scrub->os_in_prior = 0;
+ spin_unlock(&scrub->os_lock);
return 0;
}
ooc->ooc_pos_preload < scrub->os_pos_current) {
spin_lock(&scrub->os_lock);
it->ooi_waiting = 0;
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
spin_unlock(&scrub->os_lock);
}
return 0;
}
- if (it != NULL)
- wait_event_idle(thread->t_ctl_waitq,
- osd_scrub_wakeup(scrub, it));
+ if (it)
+ wait_var_event(scrub, osd_scrub_wakeup(scrub, it));
if (!ooc || osd_scrub_has_window(scrub, ooc))
*noslot = false;
__u32 flags, bool inconsistent)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
int rc;
ENTRY;
LASSERT(!(flags & SS_AUTO_PARTIAL));
down_write(&scrub->os_rwsem);
+ spin_lock(&scrub->os_lock);
scrub->os_in_join = 1;
if (flags & SS_SET_FAILOUT)
sf->sf_param |= SP_FAILOUT;
sf->sf_flags |= SF_AUTO;
scrub->os_full_speed = 1;
}
+ spin_unlock(&scrub->os_lock);
scrub->os_new_checked = 0;
if (sf->sf_pos_last_checkpoint != 0)
scrub->os_in_join = 0;
scrub->os_full_scrub = 0;
spin_unlock(&scrub->os_lock);
- wake_up_all(&thread->t_ctl_waitq);
+ wake_up_var(scrub);
up_write(&scrub->os_rwsem);
CDEBUG(D_LFSCK, "%s: joined in the OI scrub with flag %u: rc = %d\n",
struct osd_device *dev, __u32 max, bool preload)
{
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
struct scrub_file *sf = &scrub->os_file;
osd_iit_next_policy next;
osd_iit_exec_policy exec;
sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT |
SF_UPGRADE | SF_AUTO);
sf->sf_status = SS_COMPLETED;
- wait_event_idle(
- thread->t_ctl_waitq,
- !thread_is_running(thread) ||
+ wait_var_event(
+ scrub,
+ kthread_should_stop() ||
!scrub->os_partial_scan ||
scrub->os_in_join ||
!list_empty(&scrub->os_inconsistent_items));
sf->sf_flags = saved_flags;
sf->sf_status = SS_SCANNING;
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
RETURN(0);
if (!scrub->os_partial_scan || scrub->os_in_join)
full:
if (!preload) {
- wait_event_idle(thread->t_ctl_waitq,
- !thread_is_running(thread) ||
- !scrub->os_in_join);
+ wait_var_event(scrub,
+ kthread_should_stop() ||
+ !scrub->os_in_join);
- if (unlikely(!thread_is_running(thread)))
+ if (kthread_should_stop())
RETURN(0);
}
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
+ wake_up_var(scrub);
spin_unlock(&scrub->os_lock);
}
struct lu_env env;
struct osd_device *dev = (struct osd_device *)args;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct ptlrpc_thread *thread = &scrub->os_thread;
int rc;
ENTRY;
struct osd_otable_it *it = dev->od_otable_it;
struct osd_otable_cache *ooc = &it->ooi_cache;
- wait_event_idle(thread->t_ctl_waitq,
- it->ooi_user_ready ||
- !thread_is_running(thread));
- if (unlikely(!thread_is_running(thread)))
+ wait_var_event(scrub,
+ it->ooi_user_ready || kthread_should_stop());
+ if (kthread_should_stop())
GOTO(post, rc = 0);
scrub->os_pos_current = ooc->ooc_pos_preload;
rc = osd_inode_iteration(osd_oti_get(&env), dev, ~0U, false);
if (unlikely(rc == SCRUB_IT_CRASH)) {
spin_lock(&scrub->os_lock);
- thread_set_flags(&scrub->os_thread, SVC_STOPPING);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
GOTO(out, rc = -EINVAL);
}
noenv:
spin_lock(&scrub->os_lock);
- thread_set_flags(thread, SVC_STOPPED);
- wake_up_all(&thread->t_ctl_waitq);
+ scrub->os_running = 0;
spin_unlock(&scrub->os_lock);
+ if (xchg(&scrub->os_task, NULL) == NULL)
+ /* scrub_stop() is waiting, we need to synchronize */
+ wait_var_event(scrub, kthread_should_stop());
+ wake_up_var(scrub);
return rc;
}
osd_ios_general_scan(struct osd_thread_info *info, struct osd_device *dev,
struct dentry *dentry, filldir_t filldir)
{
- struct osd_ios_filldir_buf buf = {
- .ctx.actor = filldir,
- .oifb_info = info,
- .oifb_dev = dev,
- .oifb_dentry = dentry };
- struct file *filp = &info->oti_file;
- struct inode *inode = dentry->d_inode;
- const struct file_operations *fops = inode->i_fop;
- int rc;
+ struct osd_ios_filldir_buf buf = {
+ .ctx.actor = filldir,
+ .oifb_info = info,
+ .oifb_dev = dev,
+ .oifb_dentry = dentry
+ };
+ struct file *filp;
+ struct inode *inode = dentry->d_inode;
+ int rc;
+
ENTRY;
LASSERT(filldir != NULL);
- filp->f_pos = 0;
- filp->f_path.dentry = dentry;
- filp->f_flags |= O_NOATIME;
- filp->f_mode = FMODE_64BITHASH | FMODE_NONOTIFY;
- filp->f_mapping = inode->i_mapping;
- filp->f_op = fops;
- filp->private_data = NULL;
- filp->f_cred = current_cred();
- filp->f_inode = inode;
+ filp = osd_quasi_file_by_dentry(info->oti_env, dentry);
rc = osd_security_file_alloc(filp);
if (rc)
RETURN(rc);
rc = iterate_dir(filp, &buf.ctx);
} while (rc >= 0 && buf.oifb_items > 0 &&
filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
- fops->release(inode, filp);
+ inode->i_fop->release(inode, filp);
RETURN(rc);
}
* FID directly, instead, the OI scrub will scan the OI structure
* and try to re-generate the LMA from the OI mapping. But if the
* OI mapping crashed or lost also, then we have to give up under
- * double failure cases. */
+ * double failure cases.
+ */
+ spin_lock(&scrub->os_lock);
scrub->os_convert_igif = 1;
+ spin_unlock(&scrub->os_lock);
child = osd_lookup_one_len_unlocked(dev, dot_lustre_name, dentry,
strlen(dot_lustre_name));
if (IS_ERR(child)) {
/* od_otable_mutex: prevent curcurrent start/stop */
mutex_lock(&dev->od_otable_mutex);
+ spin_lock(&scrub->os_lock);
scrub->os_paused = 1;
+ spin_unlock(&scrub->os_lock);
scrub_stop(scrub);
mutex_unlock(&dev->od_otable_mutex);
}
ctxt->pwd = dev->od_mnt->mnt_root;
ctxt->fs = KERNEL_DS;
- init_waitqueue_head(&scrub->os_thread.t_ctl_waitq);
init_rwsem(&scrub->os_rwsem);
spin_lock_init(&scrub->os_lock);
INIT_LIST_HEAD(&scrub->os_inconsistent_items);
}
inode = file_inode(filp);
+ ldiskfs_set_inode_flag(inode, LDISKFS_INODE_JOURNAL_DATA);
if (!dev->od_dt_dev.dd_rdonly) {
/* 'What the @fid is' is not imporatant, because the object
* has no OI mapping, and only is visible inside the OSD.*/
RETURN(obj ? PTR_ERR(obj) : -ENOENT);
#ifndef HAVE_S_UUID_AS_UUID_T
- memcpy(dev->od_uuid.b, sb->s_uuid, UUID_SIZE);
+ memcpy(dev->od_uuid.b, sb->s_uuid, sizeof(dev->od_uuid));
#else
uuid_copy(&dev->od_uuid, &sb->s_uuid);
#endif
{
spin_lock(&scrub->os_lock);
if (it->ooi_cache.ooc_pos_preload < scrub->os_pos_current ||
- scrub->os_waiting ||
- !thread_is_running(&scrub->os_thread))
+ scrub->os_waiting || !scrub->os_running)
it->ooi_waiting = 0;
else
it->ooi_waiting = 1;
static int osd_otable_it_next(const struct lu_env *env, struct dt_it *di)
{
- struct osd_otable_it *it = (struct osd_otable_it *)di;
- struct osd_device *dev = it->ooi_dev;
+ struct osd_otable_it *it = (struct osd_otable_it *)di;
+ struct osd_device *dev = it->ooi_dev;
struct lustre_scrub *scrub = &dev->od_scrub.os_scrub;
- struct osd_otable_cache *ooc = &it->ooi_cache;
- struct ptlrpc_thread *thread = &scrub->os_thread;
- int rc;
+ struct osd_otable_cache *ooc = &it->ooi_cache;
+ int rc;
ENTRY;
LASSERT(it->ooi_user_ready);
again:
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
RETURN(1);
if (ooc->ooc_cached_items > 0) {
}
if (it->ooi_all_cached) {
- wait_event_idle(thread->t_ctl_waitq,
- !thread_is_running(thread));
+ wait_var_event(scrub, !scrub->os_running);
RETURN(1);
}
if (scrub->os_waiting && osd_scrub_has_window(scrub, ooc)) {
spin_lock(&scrub->os_lock);
scrub->os_waiting = 0;
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
+ wake_up_var(scrub);
spin_unlock(&scrub->os_lock);
}
if (it->ooi_cache.ooc_pos_preload >= scrub->os_pos_current)
- wait_event_idle(thread->t_ctl_waitq,
- osd_otable_it_wakeup(scrub, it));
+ wait_var_event(scrub, osd_otable_it_wakeup(scrub, it));
- if (!thread_is_running(thread) && !it->ooi_used_outside)
+ if (!scrub->os_running && !it->ooi_used_outside)
RETURN(1);
rc = osd_otable_it_preload(env, it);
it->ooi_user_ready = 1;
if (!scrub->os_full_speed)
- wake_up_all(&scrub->os_thread.t_ctl_waitq);
+ wake_up_var(scrub);
memset(param, 0, sizeof(*param));
param->sb = osd_sb(dev);
struct osd_inconsistent_item *oii;
struct osd_scrub *oscrub = &dev->od_scrub;
struct lustre_scrub *lscrub = &oscrub->os_scrub;
- struct ptlrpc_thread *thread = &lscrub->os_thread;
int wakeup = 0;
ENTRY;
oii->oii_cache = *oic;
oii->oii_insert = insert;
+ spin_lock(&lscrub->os_lock);
if (lscrub->os_partial_scan) {
__u64 now = ktime_get_real_seconds();
lscrub->os_full_scrub = 1;
}
- spin_lock(&lscrub->os_lock);
- if (unlikely(!thread_is_running(thread))) {
+ if (!lscrub->os_running) {
spin_unlock(&lscrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
list_add_tail(&oii->oii_list, &lscrub->os_inconsistent_items);
spin_unlock(&lscrub->os_lock);
- if (wakeup != 0)
- wake_up_all(&thread->t_ctl_waitq);
+ if (wakeup)
+ wake_up_var(lscrub);
RETURN(0);
}