* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include "filter_internal.h"
-/* Group 0 is no longer a legal group, to catch uninitialized IDs */
-#define FILTER_MIN_GROUPS FILTER_GROUP_MDS1_N_BASE
static struct lvfs_callback_ops filter_lvfs_ops;
cfs_mem_cache_t *ll_fmd_cachep;
static void filter_commit_cb(struct obd_device *obd, __u64 transno,
void *cb_data, int error)
{
- obd_transno_commit_cb(obd, transno, error);
+ struct obd_export *exp = cb_data;
+ LASSERT(exp->exp_obd == obd);
+ obd_transno_commit_cb(obd, transno, exp, error);
+ class_export_cb_put(exp);
+}
+
+int filter_version_get_check(struct obd_export *exp,
+ struct obd_trans_info *oti, struct inode *inode)
+{
+ __u64 curr_version;
+
+ if (inode == NULL || oti == NULL)
+ RETURN(0);
+
+ curr_version = fsfilt_get_version(exp->exp_obd, inode);
+ if ((__s64)curr_version == -EOPNOTSUPP)
+ RETURN(0);
+ /* VBR: version is checked always because costs nothing */
+ if (oti->oti_pre_version != 0 &&
+ oti->oti_pre_version != curr_version) {
+ CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
+ oti->oti_pre_version, curr_version);
+ cfs_spin_lock(&exp->exp_lock);
+ exp->exp_vbr_failed = 1;
+ cfs_spin_unlock(&exp->exp_lock);
+ RETURN (-EOVERFLOW);
+ }
+ oti->oti_pre_version = curr_version;
+ RETURN(0);
}
/* Assumes caller has already pushed us into the kernel context. */
-int filter_finish_transno(struct obd_export *exp, struct obd_trans_info *oti,
- int rc, int force_sync)
+int filter_finish_transno(struct obd_export *exp, struct inode *inode,
+ struct obd_trans_info *oti, int rc, int force_sync)
{
- struct filter_obd *filter = &exp->exp_obd->u.filter;
- struct filter_export_data *fed = &exp->exp_filter_data;
- struct lsd_client_data *lcd = fed->fed_lcd;
+ struct obd_device_target *obt = &exp->exp_obd->u.obt;
+ struct tg_export_data *ted = &exp->exp_target_data;
+ struct lr_server_data *lsd = class_server_data(exp->exp_obd);
+ struct lsd_client_data *lcd;
__u64 last_rcvd;
loff_t off;
int err, log_pri = D_RPCTRACE;
if (!exp->exp_obd->obd_replayable || oti == NULL)
RETURN(rc);
+ cfs_mutex_down(&ted->ted_lcd_lock);
+ lcd = ted->ted_lcd;
+ /* if the export has already been disconnected, we have no last_rcvd slot,
+ * update server data with latest transno then */
+ if (lcd == NULL) {
+ cfs_mutex_up(&ted->ted_lcd_lock);
+ CWARN("commit transaction for disconnected client %s: rc %d\n",
+ exp->exp_client_uuid.uuid, rc);
+ err = filter_update_server_data(exp->exp_obd);
+ RETURN(err);
+ }
+
/* we don't allocate new transnos for replayed requests */
+ cfs_spin_lock(&obt->obt_lut->lut_translock);
if (oti->oti_transno == 0) {
- spin_lock(&filter->fo_translock);
- last_rcvd = le64_to_cpu(filter->fo_fsd->lsd_last_transno) + 1;
- filter->fo_fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
- spin_unlock(&filter->fo_translock);
- oti->oti_transno = last_rcvd;
+ last_rcvd = le64_to_cpu(lsd->lsd_last_transno) + 1;
+ lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
} else {
- spin_lock(&filter->fo_translock);
last_rcvd = oti->oti_transno;
- if (last_rcvd > le64_to_cpu(filter->fo_fsd->lsd_last_transno))
- filter->fo_fsd->lsd_last_transno =
- cpu_to_le64(last_rcvd);
- spin_unlock(&filter->fo_translock);
+ if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
+ lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
}
+ oti->oti_transno = last_rcvd;
+
+ LASSERT(last_rcvd >= le64_to_cpu(lcd->lcd_last_transno));
lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
+ lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
+ lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
+ cfs_spin_unlock(&obt->obt_lut->lut_translock);
- /* could get xid from oti, if it's ever needed */
- lcd->lcd_last_xid = 0;
+ if (inode)
+ fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
- off = fed->fed_lr_off;
+ off = ted->ted_lr_off;
if (off <= 0) {
CERROR("%s: client idx %d is %lld\n", exp->exp_obd->obd_name,
- fed->fed_lr_idx, fed->fed_lr_off);
+ ted->ted_lr_idx, ted->ted_lr_off);
err = -EINVAL;
} else {
+ class_export_cb_get(exp); /* released when the cb is called */
if (!force_sync)
force_sync = fsfilt_add_journal_cb(exp->exp_obd,
last_rcvd,
oti->oti_handle,
filter_commit_cb,
- NULL);
+ exp);
- err = fsfilt_write_record(exp->exp_obd, filter->fo_rcvd_filp,
+ err = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
lcd, sizeof(*lcd), &off,
force_sync | exp->exp_need_sync);
if (force_sync)
- filter_commit_cb(exp->exp_obd, last_rcvd, NULL, err);
+ filter_commit_cb(exp->exp_obd, last_rcvd, exp, err);
}
if (err) {
log_pri = D_ERROR;
}
CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
- last_rcvd, lcd->lcd_uuid, fed->fed_lr_idx, err);
-
+ last_rcvd, lcd->lcd_uuid, ted->ted_lr_idx, err);
+ cfs_mutex_up(&ted->ted_lcd_lock);
RETURN(rc);
}
{
int i;
for (i = 0; i < BRW_LAST; i++)
- spin_lock_init(&brw_stats->hist[i].oh_lock);
+ cfs_spin_lock_init(&brw_stats->hist[i].oh_lock);
}
static int lprocfs_init_rw_stats(struct obd_device *obd,
num_stats = (sizeof(*obd->obd_type->typ_dt_ops) / sizeof(void *)) +
LPROC_FILTER_LAST - 1;
- *stats = lprocfs_alloc_stats(num_stats, 0);
+ *stats = lprocfs_alloc_stats(num_stats, LPROCFS_STATS_FLAG_NOPERCPU);
if (*stats == NULL)
return -ENOMEM;
plus the procfs overhead :( */
static int filter_export_stats_init(struct obd_device *obd,
struct obd_export *exp,
+ int reconnect,
void *client_nid)
{
int rc, newnid = 0;
/* Self-export gets no proc entry */
RETURN(0);
- rc = lprocfs_exp_setup(exp, client_nid, &newnid);
+ rc = lprocfs_exp_setup(exp, client_nid, reconnect, &newnid);
if (rc) {
/* Mask error for already created
* /proc entries */
OBD_ALLOC(tmp->nid_brw_stats, sizeof(struct brw_stats));
if (tmp->nid_brw_stats == NULL)
- RETURN(-ENOMEM);
+ GOTO(clean, rc = -ENOMEM);
init_brw_stats(tmp->nid_brw_stats);
rc = lprocfs_seq_create(exp->exp_nid_stats->nid_proc, "brw_stats",
rc = lprocfs_init_rw_stats(obd, &exp->exp_nid_stats->nid_stats);
if (rc)
- RETURN(rc);
+ GOTO(clean, rc);
rc = lprocfs_register_stats(tmp->nid_proc, "stats",
tmp->nid_stats);
if (rc)
- RETURN(rc);
- /* Always add in ldlm_stats */
- tmp->nid_ldlm_stats = lprocfs_alloc_stats(LDLM_LAST_OPC -
- LDLM_FIRST_OPC, 0);
- if (tmp->nid_ldlm_stats == NULL)
- return -ENOMEM;
-
- lprocfs_init_ldlm_stats(tmp->nid_ldlm_stats);
- rc = lprocfs_register_stats(tmp->nid_proc, "ldlm_stats",
- tmp->nid_ldlm_stats);
+ GOTO(clean, rc);
+ rc = lprocfs_nid_ldlm_stats_init(tmp);
if (rc)
- RETURN(rc);
+ GOTO(clean, rc);
}
RETURN(0);
+ clean:
+ return rc;
}
/* Add client data to the FILTER. We use a bitmap to locate a free space
static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
int cl_idx)
{
- struct filter_obd *filter = &obd->u.filter;
- struct filter_export_data *fed = &exp->exp_filter_data;
- unsigned long *bitmap = filter->fo_last_rcvd_slots;
+ struct obd_device_target *obt = &obd->u.obt;
+ struct tg_export_data *ted = &exp->exp_target_data;
+ struct lr_server_data *lsd = class_server_data(obd);
+ unsigned long *bitmap = obt->obt_lut->lut_client_bitmap;
int new_client = (cl_idx == -1);
ENTRY;
LASSERTF(cl_idx > -2, "%d\n", cl_idx);
/* Self-export */
- if (strcmp(fed->fed_lcd->lcd_uuid, obd->obd_uuid.uuid) == 0)
+ if (strcmp(ted->ted_lcd->lcd_uuid, obd->obd_uuid.uuid) == 0)
RETURN(0);
/* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
* there's no need for extra complication here
*/
if (new_client) {
- cl_idx = find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
+ cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
repeat:
if (cl_idx >= LR_MAX_CLIENTS) {
CERROR("no room for %u client - fix LR_MAX_CLIENTS\n",
cl_idx);
RETURN(-EOVERFLOW);
}
- if (test_and_set_bit(cl_idx, bitmap)) {
- cl_idx = find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
- cl_idx);
+ if (cfs_test_and_set_bit(cl_idx, bitmap)) {
+ cl_idx = cfs_find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
+ cl_idx);
goto repeat;
}
} else {
- if (test_and_set_bit(cl_idx, bitmap)) {
+ if (cfs_test_and_set_bit(cl_idx, bitmap)) {
CERROR("FILTER client %d: bit already set in bitmap!\n",
cl_idx);
LBUG();
}
}
- fed->fed_lr_idx = cl_idx;
- fed->fed_lr_off = le32_to_cpu(filter->fo_fsd->lsd_client_start) +
- cl_idx * le16_to_cpu(filter->fo_fsd->lsd_client_size);
- LASSERTF(fed->fed_lr_off > 0, "fed_lr_off = %llu\n", fed->fed_lr_off);
+ ted->ted_lr_idx = cl_idx;
+ ted->ted_lr_off = le32_to_cpu(lsd->lsd_client_start) +
+ cl_idx * le16_to_cpu(lsd->lsd_client_size);
+ cfs_init_mutex(&ted->ted_lcd_lock);
+ LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
- fed->fed_lr_idx, fed->fed_lr_off, fed->fed_lcd->lcd_uuid);
+ ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
if (new_client) {
struct lvfs_run_ctxt saved;
- loff_t off = fed->fed_lr_off;
+ loff_t off = ted->ted_lr_off;
int rc;
void *handle;
CDEBUG(D_INFO, "writing client lcd at idx %u (%llu) (len %u)\n",
- fed->fed_lr_idx,off,(unsigned int)sizeof(*fed->fed_lcd));
+ ted->ted_lr_idx,off,(unsigned int)sizeof(*ted->ted_lcd));
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_TGT_CLIENT_ADD))
+ RETURN(-ENOSPC);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
/* Transaction needed to fix bug 1403 */
handle = fsfilt_start(obd,
- filter->fo_rcvd_filp->f_dentry->d_inode,
+ obt->obt_rcvd_filp->f_dentry->d_inode,
FSFILT_OP_SETATTR, NULL);
if (IS_ERR(handle)) {
rc = PTR_ERR(handle);
CERROR("unable to start transaction: rc %d\n", rc);
} else {
+ ted->ted_lcd->lcd_last_epoch = lsd->lsd_start_epoch;
+ exp->exp_last_request_time = cfs_time_current_sec();
rc = fsfilt_add_journal_cb(obd, 0, handle,
- target_client_add_cb, exp);
+ target_client_add_cb,
+ class_export_cb_get(exp));
if (rc == 0) {
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_need_sync = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
}
- rc = fsfilt_write_record(obd, filter->fo_rcvd_filp,
- fed->fed_lcd,
- sizeof(*fed->fed_lcd),
+ rc = fsfilt_write_record(obd, obt->obt_rcvd_filp,
+ ted->ted_lcd,
+ sizeof(*ted->ted_lcd),
&off, rc /* sync if no cb */);
fsfilt_commit(obd,
- filter->fo_rcvd_filp->f_dentry->d_inode,
+ obt->obt_rcvd_filp->f_dentry->d_inode,
handle, 0);
}
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
if (rc) {
CERROR("error writing %s client idx %u: rc %d\n",
- LAST_RCVD, fed->fed_lr_idx, rc);
+ LAST_RCVD, ted->ted_lr_idx, rc);
RETURN(rc);
}
}
RETURN(0);
}
-struct lsd_client_data zero_lcd; /* globals are implicitly zeroed */
-
-static int filter_client_free(struct obd_export *exp)
+static int filter_client_del(struct obd_export *exp)
{
- struct filter_export_data *fed = &exp->exp_filter_data;
- struct filter_obd *filter = &exp->exp_obd->u.filter;
- struct obd_device *obd = exp->exp_obd;
+ struct tg_export_data *ted = &exp->exp_target_data;
+ struct obd_device_target *obt = &exp->exp_obd->u.obt;
struct lvfs_run_ctxt saved;
int rc;
loff_t off;
ENTRY;
- if (fed->fed_lcd == NULL)
+ if (ted->ted_lcd == NULL)
RETURN(0);
/* XXX if lcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
- if (strcmp(fed->fed_lcd->lcd_uuid, obd->obd_uuid.uuid ) == 0)
+ if (strcmp(ted->ted_lcd->lcd_uuid, exp->exp_obd->obd_uuid.uuid ) == 0)
GOTO(free, 0);
- LASSERT(filter->fo_last_rcvd_slots != NULL);
+ LASSERT(obt->obt_lut->lut_client_bitmap != NULL);
- off = fed->fed_lr_off;
+ off = ted->ted_lr_off;
CDEBUG(D_INFO, "freeing client at idx %u, offset %lld with UUID '%s'\n",
- fed->fed_lr_idx, fed->fed_lr_off, fed->fed_lcd->lcd_uuid);
+ ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
- /* Don't clear fed_lr_idx here as it is likely also unset. At worst
+ /* Don't clear ted_lr_idx here as it is likely also unset. At worst
* we leak a client slot that will be cleaned on the next recovery. */
if (off <= 0) {
CERROR("%s: client idx %d has med_off %lld\n",
- obd->obd_name, fed->fed_lr_idx, off);
+ exp->exp_obd->obd_name, ted->ted_lr_idx, off);
GOTO(free, rc = -EINVAL);
}
/* Clear the bit _after_ zeroing out the client so we don't
race with filter_client_add and zero out new clients.*/
- if (!test_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
+ if (!cfs_test_bit(ted->ted_lr_idx, obt->obt_lut->lut_client_bitmap)) {
CERROR("FILTER client %u: bit already clear in bitmap!!\n",
- fed->fed_lr_idx);
+ ted->ted_lr_idx);
LBUG();
}
- if (!(exp->exp_flags & OBD_OPT_FAILOVER)) {
- /* Don't force sync on disconnect if aborting recovery,
- * or it does num_clients * num_osts. b=17194 */
- int need_sync = (!exp->exp_libclient || exp->exp_need_sync) &&
- !(exp->exp_flags&OBD_OPT_ABORT_RECOV);
- push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- rc = fsfilt_write_record(obd, filter->fo_rcvd_filp, &zero_lcd,
- sizeof(zero_lcd), &off, 0);
-
- /* Make sure the server's last_transno is up to date. Do this
- * after the client is freed so we know all the client's
- * transactions have been committed. */
- if (rc == 0)
- filter_update_server_data(obd, filter->fo_rcvd_filp,
- filter->fo_fsd, need_sync);
- pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
-
- CDEBUG(rc == 0 ? D_INFO : D_ERROR,
- "zero out client %s at idx %u/%llu in %s %ssync rc %d\n",
- fed->fed_lcd->lcd_uuid, fed->fed_lr_idx, fed->fed_lr_off,
- LAST_RCVD, need_sync ? "" : "a", rc);
- }
-
- if (!test_and_clear_bit(fed->fed_lr_idx, filter->fo_last_rcvd_slots)) {
- CERROR("FILTER client %u: bit already clear in bitmap!!\n",
- fed->fed_lr_idx);
- LBUG();
- }
+ push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ /* Make sure the server's last_transno is up to date.
+ * This should be done before zeroing client slot so last_transno will
+ * be in server data or in client data in case of failure */
+ filter_update_server_data(exp->exp_obd);
+
+ cfs_mutex_down(&ted->ted_lcd_lock);
+ memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
+ rc = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
+ ted->ted_lcd,
+ sizeof(*ted->ted_lcd), &off, 0);
+ cfs_mutex_up(&ted->ted_lcd_lock);
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ CDEBUG(rc == 0 ? D_INFO : D_ERROR,
+ "zero out client %s at idx %u/%llu in %s, rc %d\n",
+ ted->ted_lcd->lcd_uuid, ted->ted_lr_idx, ted->ted_lr_off,
+ LAST_RCVD, rc);
EXIT;
free:
- OBD_FREE_PTR(fed->fed_lcd);
- fed->fed_lcd = NULL;
-
return 0;
}
/* XXX when we have persistent reservations and the handle
* is stored herein we need to drop it here. */
fed->fed_mod_count--;
- list_del(&fmd->fmd_list);
+ cfs_list_del(&fmd->fmd_list);
OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
}
}
return;
fed = &exp->exp_filter_data;
- spin_lock(&fed->fed_lock);
+ cfs_spin_lock(&fed->fed_lock);
filter_fmd_put_nolock(fed, fmd); /* caller reference */
- spin_unlock(&fed->fed_lock);
+ cfs_spin_unlock(&fed->fed_lock);
}
/* expire entries from the end of the list if there are too many
{
struct filter_mod_data *fmd, *tmp;
- list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+ cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
if (fmd == keep)
break;
- if (time_before(jiffies, fmd->fmd_expire) &&
+ if (cfs_time_before(jiffies, fmd->fmd_expire) &&
fed->fed_mod_count < filter->fo_fmd_max_num)
break;
- list_del_init(&fmd->fmd_list);
+ cfs_list_del_init(&fmd->fmd_list);
filter_fmd_put_nolock(fed, fmd); /* list reference */
}
}
void filter_fmd_expire(struct obd_export *exp)
{
- spin_lock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_lock(&exp->exp_filter_data.fed_lock);
filter_fmd_expire_nolock(&exp->exp_obd->u.filter,
&exp->exp_filter_data, NULL);
- spin_unlock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
}
/* find specified objid, group in export fmd list.
* caller must hold fed_lock and take fmd reference itself */
static struct filter_mod_data *filter_fmd_find_nolock(struct filter_obd *filter,
struct filter_export_data *fed,
- obd_id objid, obd_gr group)
+ obd_id objid, obd_seq group)
{
struct filter_mod_data *found = NULL, *fmd;
LASSERT_SPIN_LOCKED(&fed->fed_lock);
- list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
+ cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
if (fmd->fmd_id == objid && fmd->fmd_gr == group) {
found = fmd;
- list_del(&fmd->fmd_list);
- list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
+ cfs_list_del(&fmd->fmd_list);
+ cfs_list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
fmd->fmd_expire = jiffies + filter->fo_fmd_max_age;
break;
}
/* Find fmd based on objid and group, or return NULL if not found. */
struct filter_mod_data *filter_fmd_find(struct obd_export *exp,
- obd_id objid, obd_gr group)
+ obd_id objid, obd_seq group)
{
struct filter_mod_data *fmd;
- spin_lock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_lock(&exp->exp_filter_data.fed_lock);
fmd = filter_fmd_find_nolock(&exp->exp_obd->u.filter,
&exp->exp_filter_data, objid, group);
if (fmd)
fmd->fmd_refcount++; /* caller reference */
- spin_unlock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
return fmd;
}
* Currently this is not fatal because any fmd state is transient and
* may also be freed when it gets sufficiently old. */
struct filter_mod_data *filter_fmd_get(struct obd_export *exp,
- obd_id objid, obd_gr group)
+ obd_id objid, obd_seq group)
{
struct filter_export_data *fed = &exp->exp_filter_data;
struct filter_mod_data *found = NULL, *fmd_new = NULL;
OBD_SLAB_ALLOC_PTR_GFP(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO);
- spin_lock(&fed->fed_lock);
+ cfs_spin_lock(&fed->fed_lock);
found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
if (fmd_new) {
if (found == NULL) {
- list_add_tail(&fmd_new->fmd_list, &fed->fed_mod_list);
+ cfs_list_add_tail(&fmd_new->fmd_list,
+ &fed->fed_mod_list);
fmd_new->fmd_id = objid;
fmd_new->fmd_gr = group;
fmd_new->fmd_refcount++; /* list reference */
exp->exp_obd->u.filter.fo_fmd_max_age;
}
- spin_unlock(&fed->fed_lock);
+ cfs_spin_unlock(&fed->fed_lock);
return found;
}
* This isn't so critical because it would in fact only affect the one client
* that is doing the unlink and at worst we have an stale entry referencing
* an object that should never be used again. */
-static void filter_fmd_drop(struct obd_export *exp, obd_id objid, obd_gr group)
+static void filter_fmd_drop(struct obd_export *exp, obd_id objid, obd_seq group)
{
struct filter_mod_data *found = NULL;
- spin_lock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_lock(&exp->exp_filter_data.fed_lock);
found = filter_fmd_find_nolock(&exp->exp_filter_data, objid, group);
if (found) {
- list_del_init(&found->fmd_list);
+ cfs_list_del_init(&found->fmd_list);
filter_fmd_put_nolock(&exp->exp_filter_data, found);
}
- spin_unlock(&exp->exp_filter_data.fed_lock);
+ cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
}
#else
#define filter_fmd_drop(exp, objid, group)
struct filter_export_data *fed = &exp->exp_filter_data;
struct filter_mod_data *fmd = NULL, *tmp;
- spin_lock(&fed->fed_lock);
- list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
- list_del_init(&fmd->fmd_list);
+ cfs_spin_lock(&fed->fed_lock);
+ cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
+ cfs_list_del_init(&fmd->fmd_list);
filter_fmd_put_nolock(fed, fmd);
}
- spin_unlock(&fed->fed_lock);
+ cfs_spin_unlock(&fed->fed_lock);
}
static int filter_init_export(struct obd_export *exp)
{
- spin_lock_init(&exp->exp_filter_data.fed_lock);
+ int rc;
+ cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
+ rc = lut_client_alloc(exp);
+ if (rc == 0)
+ rc = ldlm_init_export(exp);
- return ldlm_init_export(exp);
+ return rc;
}
-static int filter_free_server_data(struct filter_obd *filter)
+static int filter_free_server_data(struct obd_device_target *obt)
{
- OBD_FREE(filter->fo_fsd, sizeof(*filter->fo_fsd));
- filter->fo_fsd = NULL;
- OBD_FREE(filter->fo_last_rcvd_slots, LR_MAX_CLIENTS / 8);
- filter->fo_last_rcvd_slots = NULL;
+ lut_fini(NULL, obt->obt_lut);
+ OBD_FREE_PTR(obt->obt_lut);
return 0;
}
/* assumes caller is already in kernel ctxt */
-int filter_update_server_data(struct obd_device *obd, struct file *filp,
- struct lr_server_data *fsd, int force_sync)
+int filter_update_server_data(struct obd_device *obd)
{
+ struct file *filp = obd->u.obt.obt_rcvd_filp;
+ struct lr_server_data *lsd = class_server_data(obd);
loff_t off = 0;
int rc;
ENTRY;
- CDEBUG(D_INODE, "server uuid : %s\n", fsd->lsd_uuid);
+ CDEBUG(D_INODE, "server uuid : %s\n", lsd->lsd_uuid);
CDEBUG(D_INODE, "server last_rcvd : "LPU64"\n",
- le64_to_cpu(fsd->lsd_last_transno));
+ le64_to_cpu(lsd->lsd_last_transno));
CDEBUG(D_INODE, "server last_mount: "LPU64"\n",
- le64_to_cpu(fsd->lsd_mount_count));
+ le64_to_cpu(lsd->lsd_mount_count));
- fsd->lsd_compat14 = fsd->lsd_last_transno;
- rc = fsfilt_write_record(obd, filp, fsd, sizeof(*fsd), &off, force_sync);
+ rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off, 0);
if (rc)
CERROR("error writing lr_server_data: rc = %d\n", rc);
RETURN(rc);
}
-int filter_update_last_objid(struct obd_device *obd, obd_gr group,
+int filter_update_last_objid(struct obd_device *obd, obd_seq group,
int force_sync)
{
struct filter_obd *filter = &obd->u.filter;
ENTRY;
if (filter->fo_last_objid_files[group] == NULL) {
- CERROR("Object group "LPU64" not fully setup; not updating "
+ CERROR("Object seq "LPU64" not fully setup; not updating "
"last_objid\n", group);
RETURN(-EINVAL);
}
- CDEBUG(D_INODE, "%s: server last_objid for group "LPU64": "LPU64"\n",
- obd->obd_name, group, filter->fo_last_objids[group]);
+ CDEBUG(D_INODE, "%s: server last_objid for "POSTID"\n",
+ obd->obd_name, filter->fo_last_objids[group], group);
tmp = cpu_to_le64(filter->fo_last_objids[group]);
rc = fsfilt_write_record(obd, filter->fo_last_objid_files[group],
&tmp, sizeof(tmp), &off, force_sync);
if (rc)
- CERROR("error writing group "LPU64" last objid: rc = %d\n",
+ CERROR("error writing seq "LPU64" last objid: rc = %d\n",
group, rc);
RETURN(rc);
}
static int filter_init_server_data(struct obd_device *obd, struct file * filp)
{
struct filter_obd *filter = &obd->u.filter;
- struct lr_server_data *fsd;
+ struct lr_server_data *lsd;
struct lsd_client_data *lcd = NULL;
struct inode *inode = filp->f_dentry->d_inode;
unsigned long last_rcvd_size = i_size_read(inode);
+ struct lu_target *lut;
__u64 mount_count;
+ __u32 start_epoch;
int cl_idx;
loff_t off = 0;
int rc;
/* ensure padding in the struct is the correct size */
CLASSERT (offsetof(struct lr_server_data, lsd_padding) +
- sizeof(fsd->lsd_padding) == LR_SERVER_SIZE);
+ sizeof(lsd->lsd_padding) == LR_SERVER_SIZE);
CLASSERT (offsetof(struct lsd_client_data, lcd_padding) +
sizeof(lcd->lcd_padding) == LR_CLIENT_SIZE);
- OBD_ALLOC(fsd, sizeof(*fsd));
- if (!fsd)
- RETURN(-ENOMEM);
- filter->fo_fsd = fsd;
-
- OBD_ALLOC(filter->fo_last_rcvd_slots, LR_MAX_CLIENTS / 8);
- if (filter->fo_last_rcvd_slots == NULL) {
- OBD_FREE(fsd, sizeof(*fsd));
+ /* allocate and initialize lu_target */
+ OBD_ALLOC_PTR(lut);
+ if (lut == NULL)
RETURN(-ENOMEM);
- }
-
+ rc = lut_init(NULL, lut, obd, NULL);
+ if (rc)
+ GOTO(err_lut, rc);
+ lsd = class_server_data(obd);
if (last_rcvd_size == 0) {
LCONSOLE_WARN("%s: new disk, initializing\n", obd->obd_name);
- memcpy(fsd->lsd_uuid, obd->obd_uuid.uuid,sizeof(fsd->lsd_uuid));
- fsd->lsd_last_transno = 0;
- mount_count = fsd->lsd_mount_count = 0;
- fsd->lsd_server_size = cpu_to_le32(LR_SERVER_SIZE);
- fsd->lsd_client_start = cpu_to_le32(LR_CLIENT_START);
- fsd->lsd_client_size = cpu_to_le16(LR_CLIENT_SIZE);
- fsd->lsd_subdir_count = cpu_to_le16(FILTER_SUBDIR_COUNT);
+ memcpy(lsd->lsd_uuid, obd->obd_uuid.uuid,sizeof(lsd->lsd_uuid));
+ lsd->lsd_last_transno = 0;
+ mount_count = lsd->lsd_mount_count = 0;
+ lsd->lsd_server_size = cpu_to_le32(LR_SERVER_SIZE);
+ lsd->lsd_client_start = cpu_to_le32(LR_CLIENT_START);
+ lsd->lsd_client_size = cpu_to_le16(LR_CLIENT_SIZE);
+ lsd->lsd_subdir_count = cpu_to_le16(FILTER_SUBDIR_COUNT);
filter->fo_subdir_count = FILTER_SUBDIR_COUNT;
- fsd->lsd_feature_incompat = cpu_to_le32(OBD_INCOMPAT_OST);
+ /* OBD_COMPAT_OST is set in filter_connect_internal when the
+ * MDS first connects and assigns the OST index number. */
+ lsd->lsd_feature_incompat = cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
+ OBD_INCOMPAT_OST);
} else {
- rc = fsfilt_read_record(obd, filp, fsd, sizeof(*fsd), &off);
+ rc = fsfilt_read_record(obd, filp, lsd, sizeof(*lsd), &off);
if (rc) {
CDEBUG(D_INODE,"OBD filter: error reading %s: rc %d\n",
LAST_RCVD, rc);
- GOTO(err_fsd, rc);
+ GOTO(err_lut, rc);
}
- if (strcmp(fsd->lsd_uuid, obd->obd_uuid.uuid) != 0) {
+ if (strcmp(lsd->lsd_uuid, obd->obd_uuid.uuid) != 0) {
LCONSOLE_ERROR_MSG(0x134, "Trying to start OBD %s "
"using the wrong disk %s. Were the "
"/dev/ assignments rearranged?\n",
- obd->obd_uuid.uuid, fsd->lsd_uuid);
- GOTO(err_fsd, rc = -EINVAL);
+ obd->obd_uuid.uuid, lsd->lsd_uuid);
+ GOTO(err_lut, rc = -EINVAL);
}
- mount_count = le64_to_cpu(fsd->lsd_mount_count);
- filter->fo_subdir_count = le16_to_cpu(fsd->lsd_subdir_count);
+ mount_count = le64_to_cpu(lsd->lsd_mount_count);
+ filter->fo_subdir_count = le16_to_cpu(lsd->lsd_subdir_count);
/* COMPAT_146 */
/* Assume old last_rcvd format unless I_C_LR is set */
- if (!(fsd->lsd_feature_incompat &
+ if (!(lsd->lsd_feature_incompat &
cpu_to_le32(OBD_INCOMPAT_COMMON_LR)))
- fsd->lsd_last_transno = fsd->lsd_compat14;
+ lsd->lsd_last_transno = lsd->lsd_compat14;
/* end COMPAT_146 */
+ /* OBD_COMPAT_OST is set in filter_connect_internal when the
+ * MDS first connects and assigns the OST index number. */
+ lsd->lsd_feature_incompat |= cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
+ OBD_INCOMPAT_OST);
}
- if (fsd->lsd_feature_incompat & ~cpu_to_le32(FILTER_INCOMPAT_SUPP)) {
+ if (lsd->lsd_feature_incompat & ~cpu_to_le32(FILTER_INCOMPAT_SUPP)) {
CERROR("%s: unsupported incompat filesystem feature(s) %x\n",
- obd->obd_name, le32_to_cpu(fsd->lsd_feature_incompat) &
+ obd->obd_name, le32_to_cpu(lsd->lsd_feature_incompat) &
~FILTER_INCOMPAT_SUPP);
- GOTO(err_fsd, rc = -EINVAL);
+ GOTO(err_lut, rc = -EINVAL);
}
- if (fsd->lsd_feature_rocompat & ~cpu_to_le32(FILTER_ROCOMPAT_SUPP)) {
+ if (lsd->lsd_feature_rocompat & ~cpu_to_le32(FILTER_ROCOMPAT_SUPP)) {
CERROR("%s: unsupported read-only filesystem feature(s) %x\n",
- obd->obd_name, le32_to_cpu(fsd->lsd_feature_rocompat) &
+ obd->obd_name, le32_to_cpu(lsd->lsd_feature_rocompat) &
~FILTER_ROCOMPAT_SUPP);
/* Do something like remount filesystem read-only */
- GOTO(err_fsd, rc = -EINVAL);
+ GOTO(err_lut, rc = -EINVAL);
}
- CDEBUG(D_INODE, "%s: server last_transno : "LPU64"\n",
- obd->obd_name, le64_to_cpu(fsd->lsd_last_transno));
+ start_epoch = le32_to_cpu(lsd->lsd_start_epoch);
+
+ CDEBUG(D_INODE, "%s: server start_epoch : %#x\n",
+ obd->obd_name, start_epoch);
+ CDEBUG(D_INODE, "%s: server last_transno : "LPX64"\n",
+ obd->obd_name, le64_to_cpu(lsd->lsd_last_transno));
CDEBUG(D_INODE, "%s: server mount_count: "LPU64"\n",
obd->obd_name, mount_count + 1);
CDEBUG(D_INODE, "%s: server data size: %u\n",
- obd->obd_name, le32_to_cpu(fsd->lsd_server_size));
+ obd->obd_name, le32_to_cpu(lsd->lsd_server_size));
CDEBUG(D_INODE, "%s: per-client data start: %u\n",
- obd->obd_name, le32_to_cpu(fsd->lsd_client_start));
+ obd->obd_name, le32_to_cpu(lsd->lsd_client_start));
CDEBUG(D_INODE, "%s: per-client data size: %u\n",
- obd->obd_name, le32_to_cpu(fsd->lsd_client_size));
+ obd->obd_name, le32_to_cpu(lsd->lsd_client_size));
CDEBUG(D_INODE, "%s: server subdir_count: %u\n",
- obd->obd_name, le16_to_cpu(fsd->lsd_subdir_count));
+ obd->obd_name, le16_to_cpu(lsd->lsd_subdir_count));
CDEBUG(D_INODE, "%s: last_rcvd clients: %lu\n", obd->obd_name,
- last_rcvd_size <= le32_to_cpu(fsd->lsd_client_start) ? 0 :
- (last_rcvd_size - le32_to_cpu(fsd->lsd_client_start)) /
- le16_to_cpu(fsd->lsd_client_size));
+ last_rcvd_size <= le32_to_cpu(lsd->lsd_client_start) ? 0 :
+ (last_rcvd_size - le32_to_cpu(lsd->lsd_client_start)) /
+ le16_to_cpu(lsd->lsd_client_size));
if (!obd->obd_replayable) {
CWARN("%s: recovery support OFF\n", obd->obd_name);
GOTO(out, rc = 0);
}
- for (cl_idx = 0, off = le32_to_cpu(fsd->lsd_client_start);
+ OBD_ALLOC_PTR(lcd);
+ if (!lcd)
+ GOTO(err_client, rc = -ENOMEM);
+
+ for (cl_idx = 0, off = le32_to_cpu(lsd->lsd_client_start);
off < last_rcvd_size; cl_idx++) {
__u64 last_rcvd;
struct obd_export *exp;
struct filter_export_data *fed;
- if (!lcd) {
- OBD_ALLOC_PTR(lcd);
- if (!lcd)
- GOTO(err_client, rc = -ENOMEM);
- }
-
/* Don't assume off is incremented properly by
* fsfilt_read_record(), in case sizeof(*lcd)
- * isn't the same as fsd->lsd_client_size. */
- off = le32_to_cpu(fsd->lsd_client_start) +
- cl_idx * le16_to_cpu(fsd->lsd_client_size);
+ * isn't the same as lsd->lsd_client_size. */
+ off = le32_to_cpu(lsd->lsd_client_start) +
+ cl_idx * le16_to_cpu(lsd->lsd_client_size);
rc = fsfilt_read_record(obd, filp, lcd, sizeof(*lcd), &off);
if (rc) {
CERROR("error reading FILT %s idx %d off %llu: rc %d\n",
continue;
}
+ check_lcd(obd->obd_name, cl_idx, lcd);
+
last_rcvd = le64_to_cpu(lcd->lcd_last_transno);
+ CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
+ " srv lr: "LPU64"\n", lcd->lcd_uuid, cl_idx,
+ last_rcvd, le64_to_cpu(lsd->lsd_last_transno));
+
/* These exports are cleaned up by filter_disconnect(), so they
* need to be set up like real exports as filter_connect() does.
*/
exp = class_new_export(obd, (struct obd_uuid *)lcd->lcd_uuid);
-
- CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
- " srv lr: "LPU64"\n", lcd->lcd_uuid, cl_idx,
- last_rcvd, le64_to_cpu(fsd->lsd_last_transno));
if (IS_ERR(exp)) {
if (PTR_ERR(exp) == -EALREADY) {
/* export already exists, zero out this one */
- CERROR("Zeroing out duplicate export due to "
- "bug 10479.\n");
- lcd->lcd_uuid[0] = '\0';
- } else {
- GOTO(err_client, rc = PTR_ERR(exp));
+ CERROR("Duplicate export %s!\n", lcd->lcd_uuid);
+ continue;
}
- } else {
- fed = &exp->exp_filter_data;
- fed->fed_lcd = lcd;
- fed->fed_group = 0; /* will be assigned at connect */
- filter_export_stats_init(obd, exp, NULL);
- rc = filter_client_add(obd, exp, cl_idx);
- /* can't fail for existing client */
- LASSERTF(rc == 0, "rc = %d\n", rc);
-
- lcd = NULL;
- spin_lock(&exp->exp_lock);
- exp->exp_connecting = 0;
- exp->exp_in_recovery = 0;
- spin_unlock(&exp->exp_lock);
- obd->obd_max_recoverable_clients++;
- class_export_put(exp);
+ OBD_FREE_PTR(lcd);
+ GOTO(err_client, rc = PTR_ERR(exp));
}
- /* Need to check last_rcvd even for duplicated exports. */
- CDEBUG(D_OTHER, "client at idx %d has last_rcvd = "LPU64"\n",
- cl_idx, last_rcvd);
-
- if (last_rcvd > le64_to_cpu(fsd->lsd_last_transno))
- fsd->lsd_last_transno = cpu_to_le64(last_rcvd);
- }
-
- if (lcd)
- OBD_FREE_PTR(lcd);
-
- obd->obd_last_committed = le64_to_cpu(fsd->lsd_last_transno);
-
- target_recovery_init(obd, ost_handle);
-
+ fed = &exp->exp_filter_data;
+ *fed->fed_ted.ted_lcd = *lcd;
+ fed->fed_group = 0; /* will be assigned at connect */
+ filter_export_stats_init(obd, exp, 0, NULL);
+ rc = filter_client_add(obd, exp, cl_idx);
+ /* can't fail for existing client */
+ LASSERTF(rc == 0, "rc = %d\n", rc);
+
+ /* VBR: set export last committed */
+ exp->exp_last_committed = last_rcvd;
+ cfs_spin_lock(&exp->exp_lock);
+ exp->exp_connecting = 0;
+ exp->exp_in_recovery = 0;
+ cfs_spin_unlock(&exp->exp_lock);
+ obd->obd_max_recoverable_clients++;
+ class_export_put(exp);
+
+ if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
+ lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
+ }
+ OBD_FREE_PTR(lcd);
+
+ obd->obd_last_committed = le64_to_cpu(lsd->lsd_last_transno);
out:
- filter->fo_mount_count = mount_count + 1;
- fsd->lsd_mount_count = cpu_to_le64(filter->fo_mount_count);
+ obd->u.obt.obt_mount_count = mount_count + 1;
+ lsd->lsd_mount_count = cpu_to_le64(obd->u.obt.obt_mount_count);
/* save it, so mount count and last_transno is current */
- rc = filter_update_server_data(obd, filp, filter->fo_fsd, 1);
+ rc = filter_update_server_data(obd);
if (rc)
GOTO(err_client, rc);
RETURN(0);
err_client:
- target_recovery_fini(obd);
-err_fsd:
- filter_free_server_data(filter);
+ class_disconnect_exports(obd);
+err_lut:
+ filter_free_server_data(&obd->u.obt);
RETURN(rc);
}
CDEBUG(D_INODE, "error reading LAST_GROUP: rc %d\n",rc);
GOTO(cleanup, rc);
}
- LASSERTF(off == 0 || CHECK_MDS_GROUP(last_group),
- "off = %llu and last_group = %d\n", off, last_group);
CDEBUG(D_INODE, "%s: previous %d, new %d\n",
obd->obd_name, last_group, group);
RETURN(PTR_ERR(dentry));
}
} else {
- dentry = simple_mkdir(filter->fo_dentry_O, filter->fo_vfsmnt,
- name, 0700, 1);
+ dentry = simple_mkdir(filter->fo_dentry_O,
+ obd->u.obt.obt_vfsmnt, name, 0700, 1);
if (IS_ERR(dentry)) {
CERROR("cannot lookup/create O/%s: rc = %ld\n", name,
PTR_ERR(dentry));
GOTO(cleanup, rc);
}
- if (filter->fo_subdir_count) {
+ if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
OBD_ALLOC(tmp_subdirs, sizeof(*tmp_subdirs));
if (tmp_subdirs == NULL)
GOTO(cleanup, rc = -ENOMEM);
snprintf(dir, sizeof(dir), "d%u", i);
tmp_subdirs->dentry[i] = simple_mkdir(dentry,
- filter->fo_vfsmnt,
+ obd->u.obt.obt_vfsmnt,
dir, 0700, 1);
if (IS_ERR(tmp_subdirs->dentry[i])) {
rc = PTR_ERR(tmp_subdirs->dentry[i]);
filter->fo_dentry_O_groups[group] = dentry;
filter->fo_last_objid_files[group] = filp;
- if (filter->fo_subdir_count) {
+ if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
filter->fo_dentry_O_sub[group] = *tmp_subdirs;
OBD_FREE(tmp_subdirs, sizeof(*tmp_subdirs));
}
if (new_files != NULL)
OBD_FREE(new_files, len * sizeof(*new_files));
case 3:
- if (filter->fo_subdir_count) {
+ if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
for (i = 0; i < filter->fo_subdir_count; i++) {
if (tmp_subdirs->dentry[i] != NULL)
dput(tmp_subdirs->dentry[i]);
struct filter_obd *filter = &obd->u.filter;
int old_count, group, rc = 0;
- down(&filter->fo_init_lock);
+ cfs_down(&filter->fo_init_lock);
old_count = filter->fo_group_count;
for (group = old_count; group <= last_group; group++) {
-
rc = filter_read_group_internal(obd, group, create);
if (rc != 0)
break;
}
- up(&filter->fo_init_lock);
+ cfs_up(&filter->fo_init_lock);
return rc;
}
static int filter_prep_groups(struct obd_device *obd)
{
struct filter_obd *filter = &obd->u.filter;
- struct dentry *dentry, *O_dentry;
+ struct dentry *O_dentry;
struct file *filp;
int last_group, rc = 0, cleanup_phase = 0;
loff_t off = 0;
ENTRY;
- O_dentry = simple_mkdir(current->fs->pwd, filter->fo_vfsmnt,
+ O_dentry = simple_mkdir(cfs_fs_pwd(current->fs), obd->u.obt.obt_vfsmnt,
"O", 0700, 1);
CDEBUG(D_INODE, "got/created O: %p\n", O_dentry);
if (IS_ERR(O_dentry)) {
filter->fo_dentry_O = O_dentry;
cleanup_phase = 1; /* O_dentry */
- /* Lookup "R" to tell if we're on an old OST FS and need to convert
- * from O/R/<dir>/<objid> to O/0/<dir>/<objid>. This can be removed
- * some time post 1.0 when all old-style OSTs have converted along
- * with the init_objid hack. */
- dentry = ll_lookup_one_len("R", O_dentry, 1);
- if (IS_ERR(dentry))
- GOTO(cleanup, rc = PTR_ERR(dentry));
- if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
- struct dentry *O0_dentry = lookup_one_len("0", O_dentry, 1);
- ENTRY;
-
- CWARN("converting OST to new object layout\n");
- if (IS_ERR(O0_dentry)) {
- rc = PTR_ERR(O0_dentry);
- CERROR("error looking up O/0: rc %d\n", rc);
- GOTO(cleanup_R, rc);
- }
-
- if (O0_dentry->d_inode) {
- CERROR("Both O/R and O/0 exist. Fix manually.\n");
- GOTO(cleanup_O0, rc = -EEXIST);
- }
-
- LOCK_INODE_MUTEX(O_dentry->d_inode);
- rc = ll_vfs_rename(O_dentry->d_inode, dentry, filter->fo_vfsmnt,
- O_dentry->d_inode, O0_dentry,
- filter->fo_vfsmnt);
- UNLOCK_INODE_MUTEX(O_dentry->d_inode);
-
- if (rc) {
- CERROR("error renaming O/R to O/0: rc %d\n", rc);
- GOTO(cleanup_O0, rc);
- }
- filter->fo_fsd->lsd_feature_incompat |=
- cpu_to_le32(OBD_INCOMPAT_GROUPS);
- rc = filter_update_server_data(obd, filter->fo_rcvd_filp,
- filter->fo_fsd, 1);
- GOTO(cleanup_O0, rc);
-
- cleanup_O0:
- f_dput(O0_dentry);
- cleanup_R:
- f_dput(dentry);
- if (rc)
- GOTO(cleanup, rc);
- } else {
- f_dput(dentry);
- }
-
- cleanup_phase = 2; /* groups */
-
/* we have to initialize all groups before first connections from
* clients because they may send create/destroy for any group -bzzz */
filp = filp_open("LAST_GROUP", O_CREAT | O_RDWR, 0700);
CERROR("cannot create LAST_GROUP: rc = %ld\n", PTR_ERR(filp));
GOTO(cleanup, rc = PTR_ERR(filp));
}
- cleanup_phase = 3; /* filp */
+ cleanup_phase = 2; /* filp */
rc = fsfilt_read_record(obd, filp, &last_group, sizeof(__u32), &off);
if (rc) {
CDEBUG(D_INODE, "error reading LAST_GROUP: rc %d\n", rc);
GOTO(cleanup, rc);
}
- if (off == 0) {
- last_group = FILTER_MIN_GROUPS;
- } else {
- LASSERT_MDS_GROUP(last_group);
- }
+
+ if (off == 0)
+ last_group = FID_SEQ_OST_MDT0;
CWARN("%s: initialize groups [%d,%d]\n", obd->obd_name,
- FILTER_MIN_GROUPS, last_group);
+ FID_SEQ_OST_MDT0, last_group);
filter->fo_committed_group = last_group;
rc = filter_read_groups(obd, last_group, 1);
if (rc)
cleanup:
switch (cleanup_phase) {
- case 3:
- filp_close(filp, 0);
case 2:
- filter_cleanup_groups(obd);
+ filp_close(filp, 0);
case 1:
+ filter_cleanup_groups(obd);
f_dput(filter->fo_dentry_O);
filter->fo_dentry_O = NULL;
default:
LAST_RCVD, rc);
GOTO(out, rc);
}
- filter->fo_rcvd_filp = file;
+ obd->u.obt.obt_rcvd_filp = file;
if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
CERROR("%s is not a regular file!: mode = %o\n", LAST_RCVD,
file->f_dentry->d_inode->i_mode);
CERROR("cannot read %s: rc = %d\n", LAST_RCVD, rc);
GOTO(err_filp, rc);
}
+ LASSERT(obd->u.obt.obt_lut);
+ target_recovery_init(obd->u.obt.obt_lut, ost_handle);
+
/* open and create health check io file*/
file = filp_open(HEALTH_CHECK, O_RDWR | O_CREAT, 0644);
if (IS_ERR(file)) {
HEALTH_CHECK, rc);
GOTO(err_server_data, rc);
}
- filter->fo_health_check_filp = file;
+ filter->fo_obt.obt_health_check_filp = file;
if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
CERROR("%s is not a regular file!: mode = %o\n", HEALTH_CHECK,
file->f_dentry->d_inode->i_mode);
return(rc);
err_health_check:
- if (filp_close(filter->fo_health_check_filp, 0))
+ if (filp_close(filter->fo_obt.obt_health_check_filp, 0))
CERROR("can't close %s after error\n", HEALTH_CHECK);
- filter->fo_health_check_filp = NULL;
+ filter->fo_obt.obt_health_check_filp = NULL;
err_server_data:
target_recovery_fini(obd);
- filter_free_server_data(filter);
+ filter_free_server_data(&obd->u.obt);
err_filp:
- if (filp_close(filter->fo_rcvd_filp, 0))
+ if (filp_close(obd->u.obt.obt_rcvd_filp, 0))
CERROR("can't close %s after error\n", LAST_RCVD);
- filter->fo_rcvd_filp = NULL;
+ obd->u.obt.obt_rcvd_filp = NULL;
goto out;
}
* from lastobjid */
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- rc = filter_update_server_data(obd, filter->fo_rcvd_filp,
- filter->fo_fsd, 0);
+ rc = filter_update_server_data(obd);
if (rc)
CERROR("error writing server data: rc = %d\n", rc);
i, rc);
}
- rc = filp_close(filter->fo_rcvd_filp, 0);
- filter->fo_rcvd_filp = NULL;
+ rc = filp_close(obd->u.obt.obt_rcvd_filp, 0);
+ obd->u.obt.obt_rcvd_filp = NULL;
if (rc)
CERROR("error closing %s: rc = %d\n", LAST_RCVD, rc);
- rc = filp_close(filter->fo_health_check_filp, 0);
- filter->fo_health_check_filp = NULL;
+ rc = filp_close(filter->fo_obt.obt_health_check_filp, 0);
+ filter->fo_obt.obt_health_check_filp = NULL;
if (rc)
CERROR("error closing %s: rc = %d\n", HEALTH_CHECK, rc);
filter_cleanup_groups(obd);
- filter_free_server_data(filter);
+ filter_free_server_data(&obd->u.obt);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
filter_free_capa_keys(filter);
}
static void filter_set_last_id(struct filter_obd *filter,
- obd_id id, obd_gr group)
+ obd_id id, obd_seq group)
{
- LASSERT(filter->fo_fsd != NULL);
LASSERT(group <= filter->fo_group_count);
- spin_lock(&filter->fo_objidlock);
+ cfs_spin_lock(&filter->fo_objidlock);
filter->fo_last_objids[group] = id;
- spin_unlock(&filter->fo_objidlock);
+ cfs_spin_unlock(&filter->fo_objidlock);
}
-obd_id filter_last_id(struct filter_obd *filter, obd_gr group)
+obd_id filter_last_id(struct filter_obd *filter, obd_seq group)
{
obd_id id;
- LASSERT(filter->fo_fsd != NULL);
LASSERT(group <= filter->fo_group_count);
+ LASSERT(filter->fo_last_objids != NULL);
/* FIXME: object groups */
- spin_lock(&filter->fo_objidlock);
+ cfs_spin_lock(&filter->fo_objidlock);
id = filter->fo_last_objids[group];
- spin_unlock(&filter->fo_objidlock);
+ cfs_spin_unlock(&filter->fo_objidlock);
return id;
}
}
/* We never dget the object parent, so DON'T dput it either */
-struct dentry *filter_parent(struct obd_device *obd, obd_gr group, obd_id objid)
+struct dentry *filter_parent(struct obd_device *obd, obd_seq group, obd_id objid)
{
struct filter_obd *filter = &obd->u.filter;
struct filter_subdirs *subdirs;
LASSERT(group < filter->fo_group_count); /* FIXME: object groups */
- if ((group > FILTER_GROUP_MDS0 && group < FILTER_GROUP_MDS1_N_BASE) ||
- filter->fo_subdir_count == 0)
+ if (!fid_seq_is_mdt(group) || filter->fo_subdir_count == 0)
return filter->fo_dentry_O_groups[group];
subdirs = &filter->fo_dentry_O_sub[group];
}
/* We never dget the object parent, so DON'T dput it either */
-struct dentry *filter_parent_lock(struct obd_device *obd, obd_gr group,
+struct dentry *filter_parent_lock(struct obd_device *obd, obd_seq group,
obd_id objid)
{
unsigned long now = jiffies;
* internal to the filesystem code. */
struct dentry *filter_fid2dentry(struct obd_device *obd,
struct dentry *dir_dentry,
- obd_gr group, obd_id id)
+ obd_seq group, obd_id id)
{
struct dentry *dparent = dir_dentry;
struct dentry *dchild;
if (dir_dentry == NULL) {
dparent = filter_parent_lock(obd, group, id);
if (IS_ERR(dparent)) {
- CERROR("%s: error getting object "LPU64":"LPU64
+ CERROR("%s: error getting object "POSTID
" parent: rc %ld\n", obd->obd_name,
id, group, PTR_ERR(dparent));
RETURN(dparent);
if (dir_dentry == NULL)
filter_parent_unlock(dparent);
if (IS_ERR(dchild)) {
- CERROR("%s: child lookup error %ld\n", obd->obd_name,
- PTR_ERR(dchild));
+ CERROR("%s: object "LPU64":"LPU64" lookup error: rc %ld\n",
+ obd->obd_name, id, group, PTR_ERR(dchild));
RETURN(dchild);
}
}
static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
- obd_id group)
+ obd_id group, struct lustre_handle *lockh)
{
- struct lustre_handle lockh;
int flags = LDLM_AST_DISCARD_DATA, rc;
struct ldlm_res_id res_id;
ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_EXTENT,
&policy, LCK_PW, &flags, ldlm_blocking_ast,
ldlm_completion_ast, NULL, NULL, 0, NULL,
- NULL, &lockh);
-
- /* We only care about the side-effects, just drop the lock. */
- if (rc == ELDLM_OK)
- ldlm_lock_decref(&lockh, LCK_PW);
-
+ lockh);
+ if (rc != ELDLM_OK)
+ lockh->cookie = 0;
RETURN(rc);
}
+static void filter_fini_destroy(struct obd_device *obd,
+ struct lustre_handle *lockh)
+{
+ if (lustre_handle_is_used(lockh))
+ ldlm_lock_decref(lockh, LCK_PW);
+}
+
/* This is vfs_unlink() without down(i_sem). If we call regular vfs_unlink()
* we have 2.6 lock ordering issues with filter_commitrw_write() as it takes
* i_sem before starting a handle, while filter_destroy() + vfs_unlink do the
GOTO(out, rc = -EPERM);
/* check_sticky() */
- if ((dentry->d_inode->i_uid != current->fsuid &&
+ if ((dentry->d_inode->i_uid != cfs_curproc_fsuid() &&
!cfs_capable(CFS_CAP_FOWNER)) || IS_APPEND(dentry->d_inode) ||
IS_IMMUTABLE(dentry->d_inode))
GOTO(out, rc = -EPERM);
/* NOTE: This might need to go outside i_mutex, though it isn't clear if
* that was done because of journal_start (which is already done
* here) or some other ordering issue. */
- DQUOT_INIT(dir);
+ ll_vfs_dq_init(dir);
rc = ll_security_inode_unlink(dir, dentry, mnt);
if (rc)
* Caller must hold child i_mutex, we drop it always.
* Caller is also required to ensure that dchild->d_inode exists. */
static int filter_destroy_internal(struct obd_device *obd, obd_id objid,
- obd_gr group, struct dentry *dparent,
+ obd_seq group, struct dentry *dparent,
struct dentry *dchild)
{
struct inode *inode = dchild->d_inode;
- struct filter_obd *filter = &obd->u.filter;
int rc;
- if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 1) {
+ /* There should be 2 references to the inode:
+ * 1) taken by filter_prepare_destroy
+ * 2) taken by filter_destroy */
+ if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 2) {
CERROR("destroying objid %.*s ino %lu nlink %lu count %d\n",
dchild->d_name.len, dchild->d_name.name, inode->i_ino,
(unsigned long)inode->i_nlink,
atomic_read(&inode->i_count));
}
- rc = filter_vfs_unlink(dparent->d_inode, dchild, filter->fo_vfsmnt);
+ rc = filter_vfs_unlink(dparent->d_inode, dchild, obd->u.obt.obt_vfsmnt);
if (rc)
CERROR("error unlinking objid %.*s: rc %d\n",
dchild->d_name.len, dchild->d_name.name, rc);
if (interval_high(n) <= size)
return INTERVAL_ITER_STOP;
- list_for_each_entry(lck, &node->li_group, l_sl_policy) {
+ cfs_list_for_each_entry(lck, &node->li_group, l_sl_policy) {
/* Don't send glimpse ASTs to liblustre clients.
* They aren't listening for them, and they do
* entirely synchronous I/O anyways. */
* lock, and should not be granted if the lock will be blocked.
*/
- LASSERT(ns == res->lr_namespace);
+ LASSERT(ns == ldlm_res_to_ns(res));
lock_res(res);
rc = policy(lock, &tmpflags, 0, &err, &rpc_list);
check_res_locked(res);
/* FIXME: we should change the policy function slightly, to not make
* this list at all, since we just turn around and free it */
- while (!list_empty(&rpc_list)) {
+ while (!cfs_list_empty(&rpc_list)) {
struct ldlm_lock *wlock =
- list_entry(rpc_list.next, struct ldlm_lock, l_cp_ast);
+ cfs_list_entry(rpc_list.next, struct ldlm_lock,
+ l_cp_ast);
LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
lock->l_flags &= ~LDLM_FL_CP_REQD;
- list_del_init(&wlock->l_cp_ast);
+ cfs_list_del_init(&wlock->l_cp_ast);
LDLM_LOCK_RELEASE(wlock);
}
if (rc == LDLM_ITER_CONTINUE) {
/* do not grant locks to the liblustre clients: they cannot
* handle ASTs robustly. We need to do this while still
- * holding ns_lock to avoid the lock remaining on the res_link
+ * holding lr_lock to avoid the lock remaining on the res_link
* list (and potentially being added to l_pending_list by an
* AST) when we are going to drop this lock ASAP. */
if (lock->l_export->exp_libclient ||
*reply_lvb = *res_lvb;
/*
- * ->ns_lock guarantees that no new locks are granted, and,
+ * lr_lock guarantees that no new locks are granted, and,
* therefore, that res->lr_lvb_data cannot increase beyond the
* end of already granted lock. As a result, it is safe to
* check against "stale" reply_lvb->lvb_size value without
*
* Of course, this will all disappear when we switch to
* taking liblustre locks on the OST. */
- ldlm_res_lvbo_update(res, NULL, 0, 1);
+ ldlm_res_lvbo_update(res, NULL, 1);
}
RETURN(ELDLM_LOCK_ABORTED);
}
LASSERTF(l->l_glimpse_ast != NULL, "l == %p", l);
rc = l->l_glimpse_ast(l, NULL); /* this will update the LVB */
- /* Update the LVB from disk if the AST failed (this is a legal race) */
- /*
- * XXX nikita: situation when ldlm_server_glimpse_ast() failed before
- * sending ast is not handled. This can result in lost client writes.
- */
- if (rc != 0)
- ldlm_res_lvbo_update(res, NULL, 0, 1);
lock_res(res);
*reply_lvb = *res_lvb;
sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
- write_lock(&filter->fo_sptlrpc_lock);
+ cfs_write_lock(&filter->fo_sptlrpc_lock);
sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
filter->fo_sptlrpc_rset = tmp_rset;
- write_unlock(&filter->fo_sptlrpc_lock);
+ cfs_write_unlock(&filter->fo_sptlrpc_lock);
return 0;
}
__u8 *uuid_ptr;
char *str, *label;
char ns_name[48];
- request_queue_t *q;
+ struct request_queue *q;
int rc, i;
ENTRY;
struct lustre_sb_info *lsi = s2lsi(lmi->lmi_sb);
mnt = lmi->lmi_mnt;
obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
+
+ /* gets recovery timeouts from mount data */
+ if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_soft)
+ obd->obd_recovery_timeout =
+ lsi->lsi_lmd->lmd_recovery_time_soft;
+ if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_hard)
+ obd->obd_recovery_time_hard =
+ lsi->lsi_lmd->lmd_recovery_time_hard;
} else {
/* old path - used by lctl */
CERROR("Using old MDS mount method\n");
}
}
- filter->fo_vfsmnt = mnt;
+ obd->u.obt.obt_vfsmnt = mnt;
obd->u.obt.obt_sb = mnt->mnt_sb;
+ obd->u.obt.obt_magic = OBT_MAGIC;
filter->fo_fstype = mnt->mnt_sb->s_type->name;
CDEBUG(D_SUPER, "%s: mnt = %p\n", filter->fo_fstype, mnt);
obd->obd_lvfs_ctxt.fs = get_ds();
obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
- init_mutex(&filter->fo_init_lock);
+ cfs_init_mutex(&filter->fo_init_lock);
filter->fo_committed_group = 0;
-
- rc = filter_prep(obd);
- if (rc)
- GOTO(err_ops, rc);
-
filter->fo_destroys_in_progress = 0;
for (i = 0; i < 32; i++)
- sema_init(&filter->fo_create_locks[i], 1);
+ cfs_sema_init(&filter->fo_create_locks[i], 1);
- spin_lock_init(&filter->fo_translock);
- spin_lock_init(&filter->fo_objidlock);
+ cfs_spin_lock_init(&filter->fo_objidlock);
CFS_INIT_LIST_HEAD(&filter->fo_export_list);
- sema_init(&filter->fo_alloc_lock, 1);
+ cfs_sema_init(&filter->fo_alloc_lock, 1);
init_brw_stats(&filter->fo_filter_stats);
+ cfs_spin_lock_init(&filter->fo_flags_lock);
filter->fo_read_cache = 1; /* enable read-only cache by default */
filter->fo_writethrough_cache = 1; /* enable writethrough cache */
filter->fo_readcache_max_filesize = FILTER_MAX_CACHE_SIZE;
filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
+ filter->fo_syncjournal = 0; /* Don't sync journals on i/o by default */
+ filter_slc_set(filter); /* initialize sync on lock cancel */
+
+ rc = filter_prep(obd);
+ if (rc)
+ GOTO(err_ops, rc);
CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
- spin_lock_init(&filter->fo_llog_list_lock);
+ cfs_spin_lock_init(&filter->fo_llog_list_lock);
filter->fo_fl_oss_capa = 1;
+
CFS_INIT_LIST_HEAD(&filter->fo_capa_keys);
filter->fo_capa_hash = init_capa_hash();
if (filter->fo_capa_hash == NULL)
- GOTO(err_ops, rc = -ENOMEM);
+ GOTO(err_post, rc = -ENOMEM);
sprintf(ns_name, "filter-%s", obd->obd_uuid.uuid);
- obd->obd_namespace = ldlm_namespace_new(obd, ns_name, LDLM_NAMESPACE_SERVER,
- LDLM_NAMESPACE_GREEDY);
+ obd->obd_namespace = ldlm_namespace_new(obd, ns_name,
+ LDLM_NAMESPACE_SERVER,
+ LDLM_NAMESPACE_GREEDY,
+ LDLM_NS_TYPE_OST);
if (obd->obd_namespace == NULL)
GOTO(err_post, rc = -ENOMEM);
obd->obd_namespace->ns_lvbp = obd;
ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
"filter_ldlm_cb_client", &obd->obd_ldlm_client);
- rc = obd_llog_init(obd, &obd->obd_olg, obd, 1, NULL, NULL);
+ rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
if (rc) {
CERROR("failed to setup llogging subsystems\n");
GOTO(err_post, rc);
}
- rwlock_init(&filter->fo_sptlrpc_lock);
+ cfs_rwlock_init(&filter->fo_sptlrpc_lock);
sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
/* do this after llog being initialized */
filter_adapt_sptlrpc_conf(obd, 1);
GOTO(err_post, rc);
q = bdev_get_queue(mnt->mnt_sb->s_bdev);
- if (q->max_sectors < q->max_hw_sectors &&
- q->max_sectors < PTLRPC_MAX_BRW_SIZE >> 9)
+ if (queue_max_sectors(q) < queue_max_hw_sectors(q) &&
+ queue_max_sectors(q) < PTLRPC_MAX_BRW_SIZE >> 9)
LCONSOLE_INFO("%s: underlying device %s should be tuned "
"for larger I/O requests: max_sectors = %u "
"could be up to max_hw_sectors=%u\n",
obd->obd_name, mnt->mnt_sb->s_id,
- q->max_sectors, q->max_hw_sectors);
+ queue_max_sectors(q), queue_max_hw_sectors(q));
uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
if (uuid_ptr != NULL) {
}
label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
-
- if (obd->obd_recovering) {
- LCONSOLE_WARN("OST %s now serving %s (%s%s%s), but will be in "
- "recovery for at least %d:%.02d, or until %d "
- "client%s reconnect. During this time new clients"
- " will not be allowed to connect. "
- "Recovery progress can be monitored by watching "
- "/proc/fs/lustre/obdfilter/%s/recovery_status.\n",
- obd->obd_name, lustre_cfg_string(lcfg, 1),
- label ?: "", label ? "/" : "", str,
+ LCONSOLE_INFO("%s: Now serving %s %s%s with recovery %s\n",
+ obd->obd_name, label ?: str, lmi ? "on " : "",
+ lmi ? s2lsi(lmi->lmi_sb)->lsi_lmd->lmd_dev : "",
+ obd->obd_replayable ? "enabled" : "disabled");
+
+ if (obd->obd_recovering)
+ LCONSOLE_WARN("%s: Will be in recovery for at least %d:%.02d, "
+ "or until %d client%s reconnect%s\n",
+ obd->obd_name,
obd->obd_recovery_timeout / 60,
obd->obd_recovery_timeout % 60,
obd->obd_max_recoverable_clients,
- (obd->obd_max_recoverable_clients == 1) ? "":"s",
- obd->obd_name);
- } else {
- LCONSOLE_INFO("OST %s now serving %s (%s%s%s) with recovery "
- "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
- label ?: "", label ? "/" : "", str,
- obd->obd_replayable ? "enabled" : "disabled");
- }
+ (obd->obd_max_recoverable_clients == 1) ? "" : "s",
+ (obd->obd_max_recoverable_clients == 1) ? "s": "");
+
RETURN(0);
LLOG_MDS_OST_REPL_CTXT);
GOTO(cleanup_olg, rc = -ENODEV);
}
- ctxt->loc_lcm = filter->fo_lcm;
+ ctxt->loc_lcm = lcm_get(filter->fo_lcm);
ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
llog_ctxt_put(ctxt);
static int
filter_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
- struct obd_device *tgt, int count, struct llog_catid *catid,
- struct obd_uuid *uuid)
+ struct obd_device *tgt, int *index)
{
struct filter_obd *filter = &obd->u.filter;
struct llog_ctxt *ctxt;
RETURN(-ENODEV);
}
ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
- ctxt->loc_lcm = filter->fo_lcm;
+ ctxt->loc_lcm = lcm_get(filter->fo_lcm);
llog_ctxt_put(ctxt);
RETURN(rc);
}
* This is safe to do, as llog is already synchronized
* and its import may go.
*/
- mutex_down(&ctxt->loc_sem);
+ cfs_mutex_down(&ctxt->loc_sem);
if (ctxt->loc_imp) {
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = NULL;
}
- mutex_up(&ctxt->loc_sem);
+ cfs_mutex_up(&ctxt->loc_sem);
llog_ctxt_put(ctxt);
}
if (filter->fo_lcm) {
+ cfs_mutex_down(&ctxt->loc_sem);
llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
filter->fo_lcm = NULL;
+ cfs_mutex_up(&ctxt->loc_sem);
}
RETURN(filter_olg_fini(&obd->obd_olg));
}
struct obd_llog_group *olg;
LASSERT_SPIN_LOCKED(&filter->fo_llog_list_lock);
- list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
- if (olg->olg_group == group)
+ cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+ if (olg->olg_seq == group)
RETURN(olg);
}
RETURN(NULL);
filter = &obd->u.filter;
- if (group == FILTER_GROUP_LLOG)
+ if (group == FID_SEQ_LLOG)
RETURN(&obd->obd_olg);
- spin_lock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
olg = filter_find_olg_internal(filter, group);
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
RETURN(olg);
}
filter = &obd->u.filter;
- if (group == FILTER_GROUP_LLOG)
+ if (group == FID_SEQ_LLOG)
RETURN(&obd->obd_olg);
- spin_lock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
olg = filter_find_olg_internal(filter, group);
if (olg) {
if (olg->olg_initializing) {
GOTO(out_unlock, olg = ERR_PTR(-ENOMEM));
llog_group_init(olg, group);
- list_add(&olg->olg_list, &filter->fo_llog_list);
+ cfs_list_add(&olg->olg_list, &filter->fo_llog_list);
olg->olg_initializing = 1;
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
- rc = llog_cat_initialize(obd, olg, 1, NULL);
+ rc = obd_llog_init(obd, olg, obd, NULL);
if (rc) {
- spin_lock(&filter->fo_llog_list_lock);
- list_del(&olg->olg_list);
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
+ cfs_list_del(&olg->olg_list);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
OBD_FREE_PTR(olg);
GOTO(out, olg = ERR_PTR(-ENOMEM));
}
- spin_lock(&filter->fo_llog_list_lock);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
olg->olg_initializing = 0;
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
CDEBUG(D_OTHER, "%s: new llog group %u (0x%p)\n",
obd->obd_name, group, olg);
out:
RETURN(olg);
out_unlock:
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
GOTO(out, olg);
}
CDEBUG(D_OTHER, "%s: LLog connect for: "LPX64"/"LPX64":%x\n",
obd->obd_name, body->lgdc_logid.lgl_oid,
- body->lgdc_logid.lgl_ogr, body->lgdc_logid.lgl_ogen);
+ body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
- olg = filter_find_olg(obd, body->lgdc_logid.lgl_ogr);
+ olg = filter_find_olg(obd, body->lgdc_logid.lgl_oseq);
if (!olg) {
CERROR(" %s: can not find olg of group %d\n",
- obd->obd_name, (int)body->lgdc_logid.lgl_ogr);
+ obd->obd_name, (int)body->lgdc_logid.lgl_oseq);
RETURN(-ENOENT);
}
llog_group_set_export(olg, exp);
CWARN("%s: Recovery from log "LPX64"/"LPX64":%x\n",
obd->obd_name, body->lgdc_logid.lgl_oid,
- body->lgdc_logid.lgl_ogr, body->lgdc_logid.lgl_ogen);
+ body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
+ cfs_spin_lock(&obd->u.filter.fo_flags_lock);
+ obd->u.filter.fo_mds_ost_sync = 1;
+ cfs_spin_unlock(&obd->u.filter.fo_flags_lock);
rc = llog_connect(ctxt, &body->lgdc_logid,
&body->lgdc_gen, NULL);
llog_ctxt_put(ctxt);
{
struct obd_llog_group *olg, *tmp;
struct filter_obd *filter;
- struct list_head remove_list;
+ cfs_list_t remove_list;
int rc = 0;
ENTRY;
filter = &obd->u.filter;
CFS_INIT_LIST_HEAD(&remove_list);
- spin_lock(&filter->fo_llog_list_lock);
- while (!list_empty(&filter->fo_llog_list)) {
- olg = list_entry(filter->fo_llog_list.next,
- struct obd_llog_group, olg_list);
- list_del(&olg->olg_list);
- list_add(&olg->olg_list, &remove_list);
+ cfs_spin_lock(&filter->fo_llog_list_lock);
+ while (!cfs_list_empty(&filter->fo_llog_list)) {
+ olg = cfs_list_entry(filter->fo_llog_list.next,
+ struct obd_llog_group, olg_list);
+ cfs_list_del(&olg->olg_list);
+ cfs_list_add(&olg->olg_list, &remove_list);
}
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
- list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
- list_del_init(&olg->olg_list);
+ cfs_list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
+ cfs_list_del_init(&olg->olg_list);
rc = filter_olg_fini(olg);
if (rc)
CERROR("failed to cleanup llogging subsystem for %u\n",
- olg->olg_group);
+ olg->olg_seq);
OBD_FREE_PTR(olg);
}
case OBD_CLEANUP_EARLY:
break;
case OBD_CLEANUP_EXPORTS:
- target_cleanup_recovery(obd);
+ /* Stop recovery before namespace cleanup. */
+ target_recovery_fini(obd);
rc = filter_llog_preclean(obd);
break;
}
LCONSOLE_WARN("%s: shutting down for failover; client state "
"will be preserved.\n", obd->obd_name);
- if (!list_empty(&obd->obd_exports)) {
- CERROR("%s: still has clients!\n", obd->obd_name);
- class_disconnect_exports(obd);
- if (!list_empty(&obd->obd_exports)) {
- CERROR("still has exports after forced cleanup?\n");
- RETURN(-EBUSY);
- }
- }
+ obd_exports_barrier(obd);
+ obd_zombie_barrier();
lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
lprocfs_free_per_client_stats(obd);
lprocfs_obd_cleanup(obd);
lquota_cleanup(filter_quota_interface_ref, obd);
- /* Stop recovery before namespace cleanup. */
- target_stop_recovery_thread(obd);
- target_cleanup_recovery(obd);
-
ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
obd->obd_namespace = NULL;
filter_post(obd);
- LL_DQUOT_OFF(obd->u.obt.obt_sb);
+ ll_vfs_dq_off(obd->u.obt.obt_sb, 0);
shrink_dcache_sb(obd->u.obt.obt_sb);
- server_put_mount(obd->obd_name, filter->fo_vfsmnt);
+ server_put_mount(obd->obd_name, obd->u.obt.obt_vfsmnt);
obd->u.obt.obt_sb = NULL;
fsfilt_put_ops(obd->obd_fsops);
}
static int filter_connect_internal(struct obd_export *exp,
- struct obd_connect_data *data)
+ struct obd_connect_data *data,
+ int reconnect)
{
struct filter_export_data *fed = &exp->exp_filter_data;
struct filter_obd *filter = &exp->exp_obd->u.filter;
obd_size left, want;
- spin_lock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
left = filter_grant_space_left(exp);
want = data->ocd_grant;
- filter_grant(exp, fed->fed_grant, want, left);
+ filter_grant(exp, fed->fed_grant, want, left, (reconnect == 0));
data->ocd_grant = fed->fed_grant;
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %d want: "
LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
exp->exp_client_uuid.uuid, exp,
data->ocd_grant, want, left);
-
+
filter->fo_tot_granted_clients ++;
}
if (data->ocd_connect_flags & OBD_CONNECT_INDEX) {
- struct filter_obd *filter = &exp->exp_obd->u.filter;
- struct lr_server_data *lsd = filter->fo_fsd;
+ struct lr_server_data *lsd = class_server_data(exp->exp_obd);
int index = le32_to_cpu(lsd->lsd_ost_index);
if (!(lsd->lsd_feature_compat &
/* this will only happen on the first connect */
lsd->lsd_ost_index = cpu_to_le32(data->ocd_index);
lsd->lsd_feature_compat |= cpu_to_le32(OBD_COMPAT_OST);
- filter_update_server_data(exp->exp_obd,
- filter->fo_rcvd_filp, lsd, 1);
+ /* sync is not needed here as filter_client_add will
+ * set exp_need_sync flag */
+ filter_update_server_data(exp->exp_obd);
} else if (index != data->ocd_index) {
LCONSOLE_ERROR_MSG(0x136, "Connection from %s to index"
" %u doesn't match actual OST index"
data->ocd_index);
RETURN(-EBADF);
}
+ /* FIXME: Do the same with the MDS UUID and lsd_peeruuid.
+ * FIXME: We don't strictly need the COMPAT flag for that,
+ * FIXME: as lsd_peeruuid[0] will tell us if that is set.
+ * FIXME: We needed it for the index, as index 0 is valid. */
}
if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_SIZE)) {
obd_export_nid2str(exp));
}
- /* FIXME: Do the same with the MDS UUID and fsd_peeruuid.
- * FIXME: We don't strictly need the COMPAT flag for that,
- * FIXME: as fsd_peeruuid[0] will tell us if that is set.
- * FIXME: We needed it for the index, as index 0 is valid. */
-
RETURN(0);
}
if (exp == NULL || obd == NULL || cluuid == NULL)
RETURN(-EINVAL);
- rc = filter_connect_internal(exp, data);
+ rc = filter_connect_internal(exp, data, 1);
if (rc == 0)
- filter_export_stats_init(obd, exp, localdata);
+ filter_export_stats_init(obd, exp, 1, localdata);
RETURN(rc);
}
struct lvfs_run_ctxt saved;
struct lustre_handle conn = { 0 };
struct obd_export *lexp;
- struct filter_export_data *fed;
- struct lsd_client_data *lcd = NULL;
__u32 group;
int rc;
ENTRY;
lexp = class_conn2export(&conn);
LASSERT(lexp != NULL);
- fed = &lexp->exp_filter_data;
-
- rc = filter_connect_internal(lexp, data);
+ rc = filter_connect_internal(lexp, data, 0);
if (rc)
GOTO(cleanup, rc);
- filter_export_stats_init(obd, lexp, localdata);
+ filter_export_stats_init(obd, lexp, 0, localdata);
if (obd->obd_replayable) {
- OBD_ALLOC(lcd, sizeof(*lcd));
- if (!lcd) {
- CERROR("filter: out of memory for client data\n");
- GOTO(cleanup, rc = -ENOMEM);
- }
-
+ struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd;
+ LASSERT(lcd);
memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
- fed->fed_lcd = lcd;
rc = filter_client_add(obd, lexp, -1);
if (rc)
GOTO(cleanup, rc);
cleanup:
if (rc) {
- if (lcd) {
- OBD_FREE_PTR(lcd);
- fed->fed_lcd = NULL;
- }
class_disconnect(lexp);
*exp = NULL;
} else {
obd_size tot_dirty = 0, tot_pending = 0, tot_granted = 0;
obd_size fo_tot_dirty, fo_tot_pending, fo_tot_granted;
- if (list_empty(&obd->obd_exports))
+ if (cfs_list_empty(&obd->obd_exports))
return;
/* We don't want to do this for large machines that do lots of
if (obd->obd_num_exports > 100)
return;
- spin_lock(&obd->obd_osfs_lock);
- spin_lock(&obd->obd_dev_lock);
- list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ cfs_spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
int error = 0;
fed = &exp->exp_filter_data;
if (fed->fed_grant < 0 || fed->fed_pending < 0 ||
fo_tot_granted = obd->u.filter.fo_tot_granted;
fo_tot_pending = obd->u.filter.fo_tot_pending;
fo_tot_dirty = obd->u.filter.fo_tot_dirty;
- spin_unlock(&obd->obd_dev_lock);
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
/* Do these assertions outside the spinlocks so we don't kill system */
if (tot_granted != fo_tot_granted)
struct filter_obd *filter = &obd->u.filter;
struct filter_export_data *fed = &exp->exp_filter_data;
- spin_lock(&obd->obd_osfs_lock);
- spin_lock(&obd->obd_dev_lock);
- list_del_init(&exp->exp_obd_chain);
- spin_unlock(&obd->obd_dev_lock);
-
+ cfs_spin_lock(&obd->obd_osfs_lock);
LASSERTF(filter->fo_tot_granted >= fed->fed_grant,
"%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
obd->obd_name, filter->fo_tot_granted,
fed->fed_dirty = 0;
fed->fed_grant = 0;
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
}
static int filter_destroy_export(struct obd_export *exp)
{
+ struct filter_export_data *fed = &exp->exp_filter_data;
ENTRY;
- if (exp->exp_filter_data.fed_pending)
+ if (fed->fed_pending)
CERROR("%s: cli %s/%p has %lu pending on destroyed export\n",
exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
- exp, exp->exp_filter_data.fed_pending);
+ exp, fed->fed_pending);
lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
target_destroy_export(exp);
ldlm_destroy_export(exp);
+ lut_client_free(exp);
if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid))
RETURN(0);
-
- if (exp->exp_obd->obd_replayable)
- filter_client_free(exp);
- else
+ if (!exp->exp_obd->obd_replayable)
fsfilt_sync(exp->exp_obd, exp->exp_obd->u.obt.obt_sb);
filter_grant_discard(exp);
}
if (!(exp->exp_flags & OBD_OPT_FORCE))
- filter_grant_sanity_check(exp->exp_obd, __FUNCTION__);
+ filter_grant_sanity_check(exp->exp_obd, __func__);
RETURN(0);
}
/* look for group with min. number, but > worked */
olg_min = NULL;
group = 1 << 30;
- spin_lock(&filter->fo_llog_list_lock);
- list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
- if (olg->olg_group <= worked) {
+ cfs_spin_lock(&filter->fo_llog_list_lock);
+ cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
+ if (olg->olg_seq <= worked) {
/* this group is already synced */
continue;
}
- if (group < olg->olg_group) {
+ if (group < olg->olg_seq) {
/* we have group with smaller number to sync */
continue;
}
/* store current minimal group */
olg_min = olg;
- group = olg->olg_group;
+ group = olg->olg_seq;
}
- spin_unlock(&filter->fo_llog_list_lock);
+ cfs_spin_unlock(&filter->fo_llog_list_lock);
if (olg_min == NULL)
break;
- worked = olg_min->olg_group;
+ worked = olg_min->olg_seq;
if (olg_min->olg_exp &&
(dexp == olg_min->olg_exp || dexp == NULL)) {
int err;
class_export_get(exp);
if (!(exp->exp_flags & OBD_OPT_FORCE))
- filter_grant_sanity_check(obd, __FUNCTION__);
+ filter_grant_sanity_check(obd, __func__);
filter_grant_discard(exp);
/* Flush any remaining cancel messages out to the target */
lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
- /* Disconnect early so that clients can't keep using export */
- rc = class_disconnect(exp);
- if (exp->exp_obd->obd_namespace != NULL)
- ldlm_cancel_locks_for_export(exp);
+ rc = server_disconnect_export(exp);
- fsfilt_sync(obd, obd->u.obt.obt_sb);
-
- lprocfs_exp_cleanup(exp);
+ /* Do not erase record for recoverable client. */
+ if (obd->obd_replayable && (!obd->obd_fail || exp->exp_failed))
+ filter_client_del(exp);
+ else
+ fsfilt_sync(obd, obd->u.obt.obt_sb);
class_export_put(exp);
RETURN(rc);
static int filter_ping(struct obd_export *exp)
{
filter_fmd_expire(exp);
-
return 0;
}
-struct dentry *__filter_oa2dentry(struct obd_device *obd, struct obdo *oa,
+struct dentry *__filter_oa2dentry(struct obd_device *obd, struct ost_id *ostid,
const char *what, int quiet)
{
struct dentry *dchild = NULL;
- obd_gr group = 0;
-
- if (oa->o_valid & OBD_MD_FLGROUP)
- group = oa->o_gr;
- dchild = filter_fid2dentry(obd, NULL, group, oa->o_id);
+ dchild = filter_fid2dentry(obd, NULL, ostid->oi_seq, ostid->oi_id);
if (IS_ERR(dchild)) {
- CERROR("%s error looking up object: "LPU64":"LPU64"\n",
- what, group, oa->o_id);
+ CERROR("%s error looking up object: "POSTID"\n",
+ what, ostid->oi_id, ostid->oi_seq);
RETURN(dchild);
}
if (dchild->d_inode == NULL) {
if (!quiet)
- CERROR("%s: %s on non-existent object: "LPU64"\n",
- obd->obd_name, what, oa->o_id);
+ CERROR("%s: %s on non-existent object: "POSTID" \n",
+ obd->obd_name, what, ostid->oi_id,ostid->oi_seq);
f_dput(dchild);
RETURN(ERR_PTR(-ENOENT));
}
int rc = 0;
ENTRY;
- rc = filter_auth_capa(exp, NULL, oinfo_mdsno(oinfo),
+ rc = filter_auth_capa(exp, NULL, oinfo->oi_oa->o_seq,
oinfo_capa(oinfo), CAPA_OPC_META_READ);
if (rc)
RETURN(rc);
RETURN(-EINVAL);
}
- dentry = filter_oa2dentry(obd, oinfo->oi_oa);
+ dentry = filter_oa2dentry(obd, &oinfo->oi_oa->o_oi);
if (IS_ERR(dentry))
RETURN(PTR_ERR(dentry));
/* Limit the valid bits in the return data to what we actually use */
oinfo->oi_oa->o_valid = OBD_MD_FLID;
- obdo_from_inode(oinfo->oi_oa, dentry->d_inode, FILTER_VALID_FLAGS);
+ obdo_from_inode(oinfo->oi_oa, dentry->d_inode, NULL, FILTER_VALID_FLAGS);
f_dput(dentry);
RETURN(rc);
struct filter_fid ff;
if (!(oa->o_valid & OBD_MD_FLGROUP))
- oa->o_gr = 0;
+ oa->o_seq = 0;
/* packing fid and converting it to LE for storing into EA.
* Here ->o_stripe_idx should be filled by LOV and rest of
* fields - by client. */
- ff.ff_fid.id = cpu_to_le64(oa->o_fid);
- ff.ff_fid.f_type = cpu_to_le32(oa->o_stripe_idx);
- ff.ff_fid.generation = cpu_to_le32(oa->o_generation);
+ ff.ff_parent.f_seq = cpu_to_le64(oa->o_parent_seq);
+ ff.ff_parent.f_oid = cpu_to_le32(oa->o_parent_oid);
+ /* XXX: we are ignoring o_parent_ver here, since this should
+ * be the same for all objects in this fileset. */
+ ff.ff_parent.f_ver = cpu_to_le32(oa->o_stripe_idx);
ff.ff_objid = cpu_to_le64(oa->o_id);
- ff.ff_group = cpu_to_le64(oa->o_gr);
+ ff.ff_seq = cpu_to_le64(oa->o_seq);
- CDEBUG(D_INODE, "storing filter fid EA ("LPU64"/%u/%u"
- LPU64"/"LPU64")\n", oa->o_fid, oa->o_stripe_idx,
- oa->o_generation, oa->o_id, oa->o_gr);
+ CDEBUG(D_INODE, "storing filter fid EA (parent "DFID" "
+ LPU64"/"LPU64")\n", PFID(&ff.ff_parent), oa->o_id,
+ oa->o_seq);
rc = fsfilt_set_md(obd, inode, handle, &ff, sizeof(ff), "fid");
if (rc)
unsigned int orig_ids[MAXQUOTAS] = {0, 0};
struct llog_cookie *fcc = NULL;
struct filter_obd *filter;
- int rc, err, locked = 0, sync = 0;
+ int rc, err, sync = 0;
loff_t old_size = 0;
unsigned int ia_valid;
struct inode *inode;
+ struct page *page = NULL;
struct iattr iattr;
void *handle;
ENTRY;
if (fcc != NULL)
*fcc = oa->o_lcookie;
}
-
- if (ia_valid & ATTR_SIZE || ia_valid & (ATTR_UID | ATTR_GID)) {
- DQUOT_INIT(inode);
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID)) {
+ unsigned long now = jiffies;
+ ll_vfs_dq_init(inode);
+ /* Filter truncates and writes are serialized by
+ * i_alloc_sem, see the comment in
+ * filter_preprw_write.*/
+ if (ia_valid & ATTR_SIZE)
+ down_write(&inode->i_alloc_sem);
LOCK_INODE_MUTEX(inode);
+ fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
old_size = i_size_read(inode);
- locked = 1;
+ }
+
+ /* VBR: version recovery check */
+ rc = filter_version_get_check(exp, oti, inode);
+ if (rc)
+ GOTO(out_unlock, rc);
+
+ /* Let's pin the last page so that ldiskfs_truncate
+ * should not start GFP_FS allocation. */
+ if (ia_valid & ATTR_SIZE) {
+ page = grab_cache_page(inode->i_mapping,
+ iattr.ia_size >> PAGE_CACHE_SHIFT);
+ if (page == NULL)
+ GOTO(out_unlock, rc = -ENOMEM);
+
+ unlock_page(page);
}
/* If the inode still has SUID+SGID bits set (see filter_precreate())
GOTO(out_unlock, rc = PTR_ERR(handle));
}
if (oa->o_valid & OBD_MD_FLFLAGS) {
- rc = fsfilt_iocontrol(exp->exp_obd, inode, NULL,
- EXT3_IOC_SETFLAGS, (long)&oa->o_flags);
+ rc = fsfilt_iocontrol(exp->exp_obd, dentry,
+ FSFILT_IOC_SETFLAGS, (long)&oa->o_flags);
} else {
rc = fsfilt_setattr(exp->exp_obd, dentry, handle, &iattr, 1);
if (fcc != NULL)
if (OBD_FAIL_CHECK(OBD_FAIL_OST_SETATTR_CREDITS))
fsfilt_extend(exp->exp_obd, inode, 0, handle);
- /* The truncate might have used up our transaction credits. Make
- * sure we have one left for the last_rcvd update. */
- err = fsfilt_extend(exp->exp_obd, inode, 1, handle);
+ /* The truncate might have used up our transaction credits. Make sure
+ * we have two left for the last_rcvd and VBR inode version updates. */
+ err = fsfilt_extend(exp->exp_obd, inode, 2, handle);
+
+ /* Update inode version only if data has changed => size has changed */
+ rc = filter_finish_transno(exp, ia_valid & ATTR_SIZE ? inode : NULL,
+ oti, rc, sync);
- rc = filter_finish_transno(exp, oti, rc, sync);
if (sync) {
filter_cancel_cookies_cb(exp->exp_obd, 0, fcc, rc);
fcc = NULL;
rc = err;
}
- if (locked) {
- UNLOCK_INODE_MUTEX(inode);
- locked = 0;
- }
-
EXIT;
+
out_unlock:
- if (locked)
- UNLOCK_INODE_MUTEX(inode);
+ if (page)
+ page_cache_release(page);
+ if (ia_valid & (ATTR_SIZE | ATTR_UID | ATTR_GID))
+ UNLOCK_INODE_MUTEX(inode);
+ if (ia_valid & ATTR_SIZE)
+ up_write(&inode->i_alloc_sem);
if (fcc)
OBD_FREE(fcc, sizeof(*fcc));
if (oa->o_valid & OBD_FL_TRUNC)
opc |= CAPA_OPC_OSS_TRUNC;
- rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa, opc);
+
+ rc = filter_auth_capa(exp, NULL, oa->o_seq, capa, opc);
if (rc)
RETURN(rc);
if (oa->o_valid & (OBD_MD_FLUID | OBD_MD_FLGID)) {
- rc = filter_capa_fixoa(exp, oa, obdo_mdsno(oa), capa);
+ rc = filter_capa_fixoa(exp, oa, oa->o_seq, capa);
if (rc)
RETURN(rc);
}
- osc_build_res_name(oa->o_id, oa->o_gr, &res_id);
+ osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
/* This would be very bad - accidentally truncating a file when
* changing the time or similar - bug 12203. */
if (oa->o_valid & OBD_MD_FLSIZE &&
static char mdsinum[48];
if (oa->o_valid & OBD_MD_FLFID)
- snprintf(mdsinum, sizeof(mdsinum) - 1,
- " of inode "LPU64"/%u", oa->o_fid,
- oa->o_generation);
+ snprintf(mdsinum, sizeof(mdsinum) - 1, " of inode "DFID,
+ oa->o_parent_seq, oa->o_parent_oid,
+ oa->o_parent_ver);
else
mdsinum[0] = '\0';
- CERROR("%s: setattr from %s trying to truncate objid "LPU64
- " %s\n",
- exp->exp_obd->obd_name, obd_export_nid2str(exp),
- oa->o_id, mdsinum);
+ CERROR("%s: setattr from %s trying to truncate objid "POSTID
+ "%s\n", exp->exp_obd->obd_name, obd_export_nid2str(exp),
+ oa->o_id, oa->o_seq, mdsinum);
RETURN(-EPERM);
}
- dentry = __filter_oa2dentry(exp->exp_obd, oa, __FUNCTION__, 1);
+ dentry = __filter_oa2dentry(exp->exp_obd, &oinfo->oi_oa->o_oi, __func__, 1);
if (IS_ERR(dentry))
RETURN(PTR_ERR(dentry));
filter = &exp->exp_obd->u.filter;
push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
- lock_kernel();
+ /*
+ * We need to be atomic against a concurrent write
+ * (which takes the semaphore for reading). fmd_mactime_xid
+ * checks will have no effect if a write request with lower
+ * xid starts just before a setattr and finishes later than
+ * the setattr (see bug 21489, comment 27).
+ */
if (oa->o_valid &
(OBD_MD_FLMTIME | OBD_MD_FLATIME | OBD_MD_FLCTIME)) {
- fmd = filter_fmd_get(exp, oa->o_id, oa->o_gr);
+ unsigned long now = jiffies;
+ down_write(&dentry->d_inode->i_alloc_sem);
+ fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem");
+ fmd = filter_fmd_get(exp, oa->o_id, oa->o_seq);
if (fmd && fmd->fmd_mactime_xid < oti->oti_xid)
fmd->fmd_mactime_xid = oti->oti_xid;
filter_fmd_put(exp, fmd);
+ up_write(&dentry->d_inode->i_alloc_sem);
}
/* setting objects attributes (including owner/group) */
if (res != NULL) {
LDLM_RESOURCE_ADDREF(res);
- rc = ldlm_res_lvbo_update(res, NULL, 0, 0);
+ rc = ldlm_res_lvbo_update(res, NULL, 0);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
}
oa->o_valid = OBD_MD_FLID;
/* Quota release need uid/gid info */
- obdo_from_inode(oa, dentry->d_inode,
+ obdo_from_inode(oa, dentry->d_inode, NULL,
FILTER_VALID_FLAGS | OBD_MD_FLUID | OBD_MD_FLGID);
EXIT;
out_unlock:
- unlock_kernel();
f_dput(dentry);
pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
return rc;
RETURN(lsm_size);
}
-/* caller must hold fo_create_locks[oa->o_gr] */
+/* caller must hold fo_create_locks[oa->o_seq] */
static int filter_destroy_precreated(struct obd_export *exp, struct obdo *oa,
struct filter_obd *filter)
{
- struct obdo doa; /* XXX obdo on stack */
+ struct obdo doa = { 0 }; /* XXX obdo on stack */
obd_id last, id;
int rc = 0;
int skip_orphan;
ENTRY;
- LASSERT(oa);
- LASSERT_MDS_GROUP(oa->o_gr);
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
- LASSERT(down_trylock(&filter->fo_create_locks[oa->o_gr]) != 0);
+ LASSERT(down_trylock(&filter->fo_create_locks[oa->o_seq]) != 0);
memset(&doa, 0, sizeof(doa));
doa.o_valid |= OBD_MD_FLGROUP;
- doa.o_gr = oa->o_gr;
+ doa.o_seq = oa->o_seq;
doa.o_mode = S_IFREG;
- if (!test_bit(doa.o_gr, &filter->fo_destroys_in_progress)) {
+ if (!cfs_test_bit(doa.o_seq, &filter->fo_destroys_in_progress)) {
CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
- exp->exp_obd->obd_name, doa.o_gr);
+ exp->exp_obd->obd_name, doa.o_seq);
RETURN(0);
}
- last = filter_last_id(filter, doa.o_gr);
+ last = filter_last_id(filter, doa.o_seq);
skip_orphan = !!(exp->exp_connect_flags & OBD_CONNECT_SKIP_ORPHAN);
- CWARN("%s: deleting orphan objects from "LPU64" to "LPU64"%s\n",
+ CDEBUG(D_HA, "%s: deleting orphan objects from "LPU64" to "LPU64"%s\n",
exp->exp_obd->obd_name, oa->o_id + 1, last,
skip_orphan ? ", orphan objids won't be reused any more." : ".");
/* update last_id on disk periodically so that if we restart
* we don't need to re-scan all of the just-deleted objects. */
if ((id & 511) == 0 && !skip_orphan) {
- filter_set_last_id(filter, id - 1, doa.o_gr);
- filter_update_last_objid(exp->exp_obd, doa.o_gr, 0);
+ filter_set_last_id(filter, id - 1, doa.o_seq);
+ filter_update_last_objid(exp->exp_obd, doa.o_seq, 0);
}
}
CDEBUG(D_HA, "%s: after destroy: set last_objids["LPU64"] = "LPU64"\n",
- exp->exp_obd->obd_name, doa.o_gr, oa->o_id);
+ exp->exp_obd->obd_name, doa.o_seq, oa->o_id);
if (!skip_orphan) {
- filter_set_last_id(filter, id, doa.o_gr);
- rc = filter_update_last_objid(exp->exp_obd, doa.o_gr, 1);
+ filter_set_last_id(filter, id, doa.o_seq);
+ rc = filter_update_last_objid(exp->exp_obd, doa.o_seq, 1);
} else {
- /* don't reuse orphan object, return last used objid */
+ /*
+ * We have destroyed orphan objects, but don't want to reuse
+ * them. Therefore we don't reset last_id to the last created
+ * objects. Instead, we report back to the MDS the object id
+ * of the last orphan, so that the MDS can restart allocating
+ * objects from this id + 1 and thus skip the whole orphan
+ * object id range
+ */
oa->o_id = last;
rc = 0;
}
- clear_bit(doa.o_gr, &filter->fo_destroys_in_progress);
+ cfs_clear_bit(doa.o_seq, &filter->fo_destroys_in_progress);
RETURN(rc);
}
static int filter_precreate(struct obd_device *obd, struct obdo *oa,
- obd_gr group, int *num);
+ obd_seq group, int *num);
/* returns a negative error or a nonnegative number of files to create */
static int filter_handle_precreate(struct obd_export *exp, struct obdo *oa,
- obd_gr group, struct obd_trans_info *oti)
+ obd_seq group, struct obd_trans_info *oti)
{
struct obd_device *obd = exp->exp_obd;
struct filter_obd *filter = &obd->u.filter;
RETURN(0);
}
/* This causes inflight precreates to abort and drop lock */
- set_bit(group, &filter->fo_destroys_in_progress);
- down(&filter->fo_create_locks[group]);
- if (!test_bit(group, &filter->fo_destroys_in_progress)) {
+ cfs_set_bit(group, &filter->fo_destroys_in_progress);
+ cfs_down(&filter->fo_create_locks[group]);
+ if (!cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
exp->exp_obd->obd_name, group);
- up(&filter->fo_create_locks[group]);
+ cfs_up(&filter->fo_create_locks[group]);
RETURN(0);
}
diff = oa->o_id - last;
GOTO(out, rc);
} else {
/* XXX: Used by MDS for the first time! */
- clear_bit(group, &filter->fo_destroys_in_progress);
+ cfs_clear_bit(group, &filter->fo_destroys_in_progress);
}
} else {
- down(&filter->fo_create_locks[group]);
+ cfs_down(&filter->fo_create_locks[group]);
if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
CERROR("%s: dropping old precreate request\n",
obd->obd_name);
GOTO(out, rc = 0);
}
/* only precreate if group == 0 and o_id is specfied */
- if (group == FILTER_GROUP_LLOG || oa->o_id == 0)
+ if (!fid_seq_is_mdt(group) || oa->o_id == 0)
diff = 1;
else
diff = oa->o_id - filter_last_id(filter, group);
oa->o_id = filter_last_id(&obd->u.filter, group);
rc = filter_precreate(obd, oa, group, &diff);
oa->o_id = filter_last_id(&obd->u.filter, group);
- oa->o_gr = group;
+ oa->o_seq = group;
oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
GOTO(out, rc);
}
/* else diff == 0 */
GOTO(out, rc = 0);
out:
- up(&filter->fo_create_locks[group]);
+ cfs_up(&filter->fo_create_locks[group]);
return rc;
}
/* at least try to account for cached pages. its still racey and
* might be under-reporting if clients haven't announced their
* caches with brw recently */
- spin_lock(&obd->obd_osfs_lock);
+ cfs_spin_lock(&obd->obd_osfs_lock);
rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
- spin_unlock(&obd->obd_osfs_lock);
+ cfs_spin_unlock(&obd->obd_osfs_lock);
CDEBUG(D_SUPER | D_CACHE, "blocks cached "LPU64" granted "LPU64
" pending "LPU64" free "LPU64" avail "LPU64"\n",
filter->fo_tot_pending,
osfs->os_bfree << blockbits, osfs->os_bavail << blockbits);
- filter_grant_sanity_check(obd, __FUNCTION__);
+ filter_grant_sanity_check(obd, __func__);
osfs->os_bavail -= min(osfs->os_bavail, GRANT_FOR_LLOG(obd) +
((filter->fo_tot_dirty + filter->fo_tot_pending +
osfs->os_bsize - 1) >> blockbits));
+ if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOSPC)) {
+ struct lr_server_data *lsd = class_server_data(obd);
+ int index = le32_to_cpu(lsd->lsd_ost_index);
+
+ if (obd_fail_val == -1 ||
+ index == obd_fail_val)
+ osfs->os_bfree = osfs->os_bavail = 2;
+ else if (obd_fail_loc & OBD_FAIL_ONCE)
+ obd_fail_loc &= ~OBD_FAILED; /* reset flag */
+ }
+
/* set EROFS to state field if FS is mounted as RDONLY. The goal is to
* stop creating files on MDS if OST is not good shape to create
* objects.*/
- osfs->os_state = (filter->fo_obt.obt_sb->s_flags & MS_RDONLY) ?
- EROFS : 0;
+ osfs->os_state = 0;
+
+ if (filter->fo_obt.obt_sb->s_flags & MS_RDONLY)
+ osfs->os_state = OS_STATE_READONLY;
+
+ if (filter->fo_raid_degraded)
+ osfs->os_state |= OS_STATE_DEGRADED;
RETURN(rc);
}
return rc;
}
+static __u64 filter_calc_free_inodes(struct obd_device *obd)
+{
+ int rc;
+ __u64 os_ffree = -1;
+
+ cfs_spin_lock(&obd->obd_osfs_lock);
+ rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, cfs_time_shift_64(1));
+ if (rc == 0)
+ os_ffree = obd->obd_osfs.os_ffree;
+ cfs_spin_unlock(&obd->obd_osfs_lock);
+
+ return os_ffree;
+}
/* We rely on the fact that only one thread will be creating files in a given
* group at a time, which is why we don't need an atomic filter_get_new_id.
* Caller must hold fo_create_locks[group]
*/
static int filter_precreate(struct obd_device *obd, struct obdo *oa,
- obd_gr group, int *num)
+ obd_seq group, int *num)
{
struct dentry *dchild = NULL, *dparent = NULL;
struct filter_obd *filter;
struct obd_statfs *osfs;
int err = 0, rc = 0, recreate_obj = 0, i;
cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT/2);
+ __u64 os_ffree;
obd_id next_id;
void *handle = NULL;
ENTRY;
OBD_ALLOC(osfs, sizeof(*osfs));
if (osfs == NULL)
RETURN(-ENOMEM);
- rc = filter_statfs(obd, osfs, cfs_time_current_64() - HZ, 0);
+ rc = filter_statfs(obd, osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
if (rc == 0 && osfs->os_bavail < (osfs->os_blocks >> 10)) {
CDEBUG(D_RPCTRACE,"%s: not enough space for create "
LPU64"\n", obd->obd_name, osfs->os_bavail <<
- filter->fo_vfsmnt->mnt_sb->s_blocksize_bits);
+ obd->u.obt.obt_vfsmnt->mnt_sb->s_blocksize_bits);
*num = 0;
rc = -ENOSPC;
}
for (i = 0; i < *num && err == 0; i++) {
int cleanup_phase = 0;
- if (test_bit(group, &filter->fo_destroys_in_progress)) {
+ if (cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
CWARN("%s: create aborted by destroy\n",
obd->obd_name);
rc = -EAGAIN;
} else
next_id = filter_last_id(filter, group) + 1;
+ /* Don't create objects beyond the valid range for this SEQ */
+ if (unlikely(fid_seq_is_mdt0(group) &&
+ next_id >= IDIF_MAX_OID)) {
+ CERROR("%s:"POSTID" hit the IDIF_MAX_OID (1<<48)!\n",
+ obd->obd_name, next_id, group);
+ GOTO(cleanup, rc = -ENOSPC);
+ } else if (unlikely(!fid_seq_is_mdt0(group) &&
+ next_id >= OBIF_MAX_OID)) {
+ CERROR("%s:"POSTID" hit the OBIF_MAX_OID (1<<32)!\n",
+ obd->obd_name, next_id, group);
+ GOTO(cleanup, rc = -ENOSPC);
+ }
+
dparent = filter_parent_lock(obd, group, next_id);
if (IS_ERR(dparent))
GOTO(cleanup, rc = PTR_ERR(dparent));
GOTO(cleanup, rc = PTR_ERR(handle));
cleanup_phase = 3;
- CDEBUG(D_INODE, "%s: filter_precreate(od->o_gr="LPU64
+ CDEBUG(D_INODE, "%s: filter_precreate(od->o_seq="LPU64
",od->o_id="LPU64")\n", obd->obd_name, group,
next_id);
S_IFREG | S_ISUID | S_ISGID | 0666, NULL);
if (rc) {
CERROR("create failed rc = %d\n", rc);
+ if (rc == -ENOSPC) {
+ os_ffree = filter_calc_free_inodes(obd);
+ if (os_ffree != -1)
+ CERROR("%s: free inode "LPU64"\n",
+ obd->obd_name, os_ffree);
+ }
GOTO(cleanup, rc);
}
+ if (dchild->d_inode)
+ CDEBUG(D_INFO, "objid "LPU64" got inum %lu\n", next_id,
+ dchild->d_inode->i_ino);
+
set_last_id:
if (!recreate_obj) {
filter_set_last_id(filter, next_id, group);
if (rc)
break;
- if (time_after(jiffies, enough_time)) {
+ if (cfs_time_after(jiffies, enough_time)) {
+ i++;
CDEBUG(D_RPCTRACE,
"%s: precreate slow - want %d got %d \n",
obd->obd_name, *num, i);
*num = i;
CDEBUG(D_RPCTRACE,
- "%s: created %d objects for group "LPU64": "LPU64" rc %d\n",
- obd->obd_name, i, group, filter->fo_last_objids[group], rc);
+ "%s: created %d objects for group "POSTID" rc %d\n",
+ obd->obd_name, i, filter->fo_last_objids[group], group, rc);
RETURN(rc);
}
-static int filter_create(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md **ea, struct obd_trans_info *oti)
+int filter_create(struct obd_export *exp, struct obdo *oa,
+ struct lov_stripe_md **ea, struct obd_trans_info *oti)
{
struct obd_device *obd = exp->exp_obd;
struct filter_export_data *fed;
struct filter_obd *filter;
struct lvfs_run_ctxt saved;
struct lov_stripe_md *lsm = NULL;
- int rc = 0, diff, group = oa->o_gr;
+ int rc = 0, diff;
ENTRY;
- CDEBUG(D_INODE, "%s: filter_create(od->o_gr="LPU64",od->o_id="
- LPU64")\n", obd->obd_name, oa->o_gr, oa->o_id);
-
- if (!(oa->o_valid & OBD_MD_FLGROUP)) {
- CERROR("!!! nid %s sent invalid object group %d\n",
- obd_export_nid2str(exp), group);
- RETURN(-EINVAL);
- }
+ CDEBUG(D_INODE, "%s: filter_create(group="LPU64",id="
+ LPU64")\n", obd->obd_name, oa->o_seq, oa->o_id);
fed = &exp->exp_filter_data;
filter = &obd->u.filter;
- if (fed->fed_group != group) {
- CERROR("!!! this export (nid %s) used object group %d "
- "earlier; now it's trying to use group %d! This could "
- "be a bug in the MDS. Please report to "
- "http://bugzilla.lustre.org/\n",
- obd_export_nid2str(exp), fed->fed_group, group);
+ if (fed->fed_group != oa->o_seq) {
+ CERROR("%s: this export (nid %s) used object group %d "
+ "earlier; now it's trying to use group "LPU64"!"
+ " This could be a bug in the MDS. Please report to "
+ "http://bugzilla.lustre.org/\n", obd->obd_name,
+ obd_export_nid2str(exp), fed->fed_group, oa->o_seq);
RETURN(-ENOTUNIQ);
}
if ((oa->o_valid & OBD_MD_FLFLAGS) &&
(oa->o_flags & OBD_FL_RECREATE_OBJS)) {
- if (oa->o_id > filter_last_id(filter, oa->o_gr)) {
+ if (!obd->obd_recovering ||
+ oa->o_id > filter_last_id(filter, oa->o_seq)) {
CERROR("recreate objid "LPU64" > last id "LPU64"\n",
- oa->o_id, filter_last_id(filter,
- oa->o_gr));
+ oa->o_id, filter_last_id(filter, oa->o_seq));
rc = -EINVAL;
} else {
diff = 1;
- down(&filter->fo_create_locks[oa->o_gr]);
- rc = filter_precreate(obd, oa, oa->o_gr, &diff);
- up(&filter->fo_create_locks[oa->o_gr]);
+ cfs_down(&filter->fo_create_locks[oa->o_seq]);
+ rc = filter_precreate(obd, oa, oa->o_seq, &diff);
+ cfs_up(&filter->fo_create_locks[oa->o_seq]);
}
} else {
- rc = filter_handle_precreate(exp, oa, oa->o_gr, oti);
+ rc = filter_handle_precreate(exp, oa, oa->o_seq, oti);
}
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
struct obd_device *obd;
struct filter_obd *filter;
struct dentry *dchild = NULL, *dparent = NULL;
+ struct lustre_handle lockh = { 0 };
struct lvfs_run_ctxt saved;
void *handle = NULL;
struct llog_cookie *fcc = NULL;
int rc, rc2, cleanup_phase = 0, sync = 0;
struct iattr iattr;
+ unsigned long now;
ENTRY;
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
-
- rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa),
+ rc = filter_auth_capa(exp, NULL, oa->o_seq,
(struct lustre_capa *)capa, CAPA_OPC_OSS_DESTROY);
if (rc)
RETURN(rc);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
cleanup_phase = 1;
- CDEBUG(D_INODE, "%s: filter_destroy(od->o_gr="LPU64",od->o_id="
- LPU64")\n", obd->obd_name, oa->o_gr, oa->o_id);
+ CDEBUG(D_INODE, "%s: filter_destroy(group="LPU64",oid="
+ LPU64")\n", obd->obd_name, oa->o_seq, oa->o_id);
- dchild = filter_fid2dentry(obd, NULL, oa->o_gr, oa->o_id);
+ dchild = filter_fid2dentry(obd, NULL, oa->o_seq, oa->o_id);
if (IS_ERR(dchild))
GOTO(cleanup, rc = PTR_ERR(dchild));
cleanup_phase = 2;
if (dchild->d_inode == NULL) {
- CDEBUG(D_INODE, "destroying non-existent object "LPU64"\n",
- oa->o_id);
+ CDEBUG(D_INODE, "destroying non-existent object "POSTID"\n",
+ oa->o_id, oa->o_seq);
/* If object already gone, cancel cookie right now */
if (oa->o_valid & OBD_MD_FLCOOKIE) {
struct llog_ctxt *ctxt;
struct obd_llog_group *olg;
- fcc = &oa->o_lcookie;
- olg = filter_find_olg(obd, oa->o_gr);
+
+ olg = filter_find_olg(obd, oa->o_seq);
if (!olg) {
CERROR(" %s: can not find olg of group %d\n",
- obd->obd_name, (int)oa->o_gr);
+ obd->obd_name, (int)oa->o_seq);
GOTO(cleanup, rc = PTR_ERR(olg));
}
- llog_group_set_export(olg, exp);
-
+ fcc = &oa->o_lcookie;
ctxt = llog_group_get_ctxt(olg, fcc->lgc_subsys + 1);
llog_cancel(ctxt, NULL, 1, fcc, 0);
llog_ctxt_put(ctxt);
GOTO(cleanup, rc = -ENOENT);
}
- filter_prepare_destroy(obd, oa->o_id, oa->o_gr);
+ rc = filter_prepare_destroy(obd, oa->o_id, oa->o_seq, &lockh);
+ if (rc)
+ GOTO(cleanup, rc);
/* Our MDC connection is established by the MDS to us */
if (oa->o_valid & OBD_MD_FLCOOKIE) {
if (fcc != NULL)
*fcc = oa->o_lcookie;
}
- DQUOT_INIT(dchild->d_inode);
+ ll_vfs_dq_init(dchild->d_inode);
/* we're gonna truncate it first in order to avoid possible deadlock:
* P1 P2
* down(i_zombie) down(i_zombie)
* restart transaction
* (see BUG 4180) -bzzz
+ *
+ * take i_alloc_sem too to prevent other threads from writing to the
+ * file while we are truncating it. This can cause lock ordering issue
+ * between page lock, i_mutex & starting new journal handle.
+ * (see bug 20321) -johann
*/
+ now = jiffies;
+ down_write(&dchild->d_inode->i_alloc_sem);
LOCK_INODE_MUTEX(dchild->d_inode);
+ fsfilt_check_slow(exp->exp_obd, now, "i_alloc_sem and i_mutex");
+
+ /* VBR: version recovery check */
+ rc = filter_version_get_check(exp, oti, dchild->d_inode);
+ if (rc) {
+ UNLOCK_INODE_MUTEX(dchild->d_inode);
+ up_write(&dchild->d_inode->i_alloc_sem);
+ GOTO(cleanup, rc);
+ }
+
handle = fsfilt_start_log(obd, dchild->d_inode, FSFILT_OP_SETATTR,
NULL, 1);
if (IS_ERR(handle)) {
UNLOCK_INODE_MUTEX(dchild->d_inode);
+ up_write(&dchild->d_inode->i_alloc_sem);
GOTO(cleanup, rc = PTR_ERR(handle));
}
rc = fsfilt_setattr(obd, dchild, handle, &iattr, 1);
rc2 = fsfilt_commit(obd, dchild->d_inode, handle, 0);
UNLOCK_INODE_MUTEX(dchild->d_inode);
+ up_write(&dchild->d_inode->i_alloc_sem);
if (rc)
GOTO(cleanup, rc);
if (rc2)
* here, and not while truncating above. That avoids holding the
* parent lock for a long time during truncate, which can block other
* threads from doing anything to objects in that directory. bug 7171 */
- dparent = filter_parent_lock(obd, oa->o_gr, oa->o_id);
+ dparent = filter_parent_lock(obd, oa->o_seq, oa->o_id);
if (IS_ERR(dparent))
GOTO(cleanup, rc = PTR_ERR(dparent));
cleanup_phase = 3; /* filter_parent_unlock */
cleanup_phase = 4; /* fsfilt_commit */
/* Quota release need uid/gid of inode */
- obdo_from_inode(oa, dchild->d_inode, OBD_MD_FLUID|OBD_MD_FLGID);
+ obdo_from_inode(oa, dchild->d_inode, NULL, OBD_MD_FLUID|OBD_MD_FLGID);
- filter_fmd_drop(exp, oa->o_id, oa->o_gr);
+ filter_fmd_drop(exp, oa->o_id, oa->o_seq);
/* this drops dchild->d_inode->i_mutex unconditionally */
- rc = filter_destroy_internal(obd, oa->o_id, oa->o_gr, dparent, dchild);
+ rc = filter_destroy_internal(obd, oa->o_id, oa->o_seq, dparent, dchild);
EXIT;
cleanup:
* on commit. then we call callback directly to free
* the fcc.
*/
- rc = filter_finish_transno(exp, oti, rc, sync);
+ rc = filter_finish_transno(exp, NULL, oti, rc, sync);
if (sync) {
filter_cancel_cookies_cb(obd, 0, fcc, rc);
fcc = NULL;
case 3:
filter_parent_unlock(dparent);
case 2:
+ filter_fini_destroy(obd, &lockh);
+
f_dput(dchild);
if (fcc != NULL)
OBD_FREE(fcc, sizeof(*fcc));
}
CDEBUG(D_INODE, "calling truncate for object "LPU64", valid = "LPX64
- ", o_size = "LPD64"\n", oinfo->oi_oa->o_id,
- oinfo->oi_oa->o_valid, oinfo->oi_policy.l_extent.start);
+ ", o_size = "LPD64"\n", oinfo->oi_oa->o_id,oinfo->oi_oa->o_valid,
+ oinfo->oi_policy.l_extent.start);
oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
oinfo->oi_oa->o_valid |= OBD_FL_TRUNC;
void *capa)
{
struct lvfs_run_ctxt saved;
- struct filter_obd *filter;
+ struct obd_device_target *obt;
struct dentry *dentry;
int rc, rc2;
ENTRY;
- rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa),
+ rc = filter_auth_capa(exp, NULL, oa->o_seq,
(struct lustre_capa *)capa, CAPA_OPC_OSS_WRITE);
if (rc)
RETURN(rc);
- filter = &exp->exp_obd->u.filter;
+ obt = &exp->exp_obd->u.obt;
/* An objid of zero is taken to mean "sync whole filesystem" */
if (!oa || !(oa->o_valid & OBD_MD_FLID)) {
- rc = fsfilt_sync(exp->exp_obd, filter->fo_obt.obt_sb);
+ rc = fsfilt_sync(exp->exp_obd, obt->obt_sb);
/* Flush any remaining cancel messages out to the target */
filter_sync_llogs(exp->exp_obd, exp);
RETURN(rc);
}
- dentry = filter_oa2dentry(exp->exp_obd, oa);
+ dentry = filter_oa2dentry(exp->exp_obd, &oa->o_oi);
if (IS_ERR(dentry))
RETURN(PTR_ERR(dentry));
rc = filemap_fdatawrite(dentry->d_inode->i_mapping);
if (rc == 0) {
/* just any file to grab fsync method - "file" arg unused */
- struct file *file = filter->fo_rcvd_filp;
+ struct file *file = obt->obt_rcvd_filp;
if (file->f_op && file->f_op->fsync)
rc = file->f_op->fsync(NULL, dentry, 1);
UNLOCK_INODE_MUTEX(dentry->d_inode);
oa->o_valid = OBD_MD_FLID;
- obdo_from_inode(oa, dentry->d_inode, FILTER_VALID_FLAGS);
+ obdo_from_inode(oa, dentry->d_inode, NULL, FILTER_VALID_FLAGS);
pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
RETURN(0);
}
- dentry = __filter_oa2dentry(exp->exp_obd, &fm_key->oa,
- __FUNCTION__, 1);
+ dentry = __filter_oa2dentry(exp->exp_obd, &fm_key->oa.o_oi,
+ __func__, 1);
if (IS_ERR(dentry))
RETURN(PTR_ERR(dentry));
memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- rc = fsfilt_iocontrol(obd, dentry->d_inode, NULL,
- EXT3_IOC_FIEMAP, (long)fiemap);
+ rc = fsfilt_iocontrol(obd, dentry, FSFILT_IOC_FIEMAP,
+ (long)fiemap);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
f_dput(dentry);
RETURN(rc);
}
+ if (KEY_IS(KEY_SYNC_LOCK_CANCEL)) {
+ *((__u32 *) val) = obd->u.filter.fo_sync_lock_cancel;
+ *vallen = sizeof(__u32);
+ RETURN(0);
+ }
+
CDEBUG(D_IOCTL, "invalid key\n");
RETURN(-EINVAL);
}
llog_ctxt_put(ctxt);
return rc;
}
+
+static int filter_set_grant_shrink(struct obd_export *exp,
+ struct ost_body *body)
+{
+ /* handle shrink grant */
+ cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
+ filter_grant_incoming(exp, &body->oa);
+ cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
+
+ RETURN(0);
+
+}
+
+static int filter_set_mds_conn(struct obd_export *exp, void *val)
+{
+ struct obd_device *obd;
+ int rc = 0, group;
+ ENTRY;
+
+ obd = exp->exp_obd;
+ if (obd == NULL) {
+ CDEBUG(D_IOCTL, "invalid export %p\n", exp);
+ RETURN(-EINVAL);
+ }
+
+ LCONSOLE_WARN("%s: received MDS connection from %s\n", obd->obd_name,
+ obd_export_nid2str(exp));
+ obd->u.filter.fo_mdc_conn.cookie = exp->exp_handle.h_cookie;
+
+ /* setup llog imports */
+ if (val != NULL)
+ group = (int)(*(__u32 *)val);
+ else
+ group = 0; /* default value */
+
+ LASSERT_SEQ_IS_MDT(group);
+ rc = filter_setup_llog_group(exp, obd, group);
+ if (rc)
+ goto out;
+
+ if (group == FID_SEQ_OST_MDT0) {
+ /* setup llog group 1 for interop */
+ filter_setup_llog_group(exp, obd, FID_SEQ_LLOG);
+ }
+
+ lquota_setinfo(filter_quota_interface_ref, obd, exp);
+out:
+ RETURN(rc);
+}
+
static int filter_set_info_async(struct obd_export *exp, __u32 keylen,
void *key, __u32 vallen, void *val,
struct ptlrpc_request_set *set)
{
struct obd_device *obd;
- int rc = 0, group;
ENTRY;
obd = exp->exp_obd;
}
if (KEY_IS(KEY_CAPA_KEY)) {
+ int rc;
rc = filter_update_capa_key(obd, (struct lustre_capa_key *)val);
if (rc)
CERROR("filter update capability key failed: %d\n", rc);
RETURN(0);
}
- if (KEY_IS(KEY_GRANT_SHRINK)) {
- struct ost_body *body = (struct ost_body *)val;
- /* handle shrink grant */
- spin_lock(&exp->exp_obd->obd_osfs_lock);
- filter_grant_incoming(exp, &body->oa);
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
- RETURN(rc);
- }
+ if (KEY_IS(KEY_MDS_CONN))
+ RETURN(filter_set_mds_conn(exp, val));
- if (!KEY_IS(KEY_MDS_CONN))
- RETURN(-EINVAL);
+ if (KEY_IS(KEY_GRANT_SHRINK))
+ RETURN(filter_set_grant_shrink(exp, val));
- LCONSOLE_WARN("%s: received MDS connection from %s\n", obd->obd_name,
- obd_export_nid2str(exp));
- obd->u.filter.fo_mdc_conn.cookie = exp->exp_handle.h_cookie;
-
- /* setup llog imports */
- if (val != NULL)
- group = (int)(*(__u32 *)val);
- else
- group = 0; /* default value */
-
- LASSERT_MDS_GROUP(group);
- rc = filter_setup_llog_group(exp, obd, group);
- if (rc)
- goto out;
-
- lquota_setinfo(filter_quota_interface_ref, obd, exp);
-
- if (group == FILTER_GROUP_MDS0) {
- /* setup llog group 1 for interop */
- filter_setup_llog_group(exp, obd, FILTER_GROUP_LLOG);
- }
-out:
- RETURN(rc);
+ RETURN(-EINVAL);
}
int filter_iocontrol(unsigned int cmd, struct obd_export *exp,
switch (cmd) {
case OBD_IOC_ABORT_RECOVERY: {
- CERROR("aborting recovery for device %s\n", obd->obd_name);
+ LCONSOLE_WARN("%s: Aborting recovery.\n", obd->obd_name);
target_stop_recovery_thread(obd);
RETURN(0);
}
rc = 1;
#ifdef USE_HEALTH_CHECK_WRITE
- LASSERT(filter->fo_health_check_filp != NULL);
- rc |= !!lvfs_check_io_health(obd, filter->fo_health_check_filp);
+ LASSERT(filter->fo_obt.obt_health_check_filp != NULL);
+ rc |= !!lvfs_check_io_health(obd, filter->fo_obt.obt_health_check_filp);
#endif
return rc;
}
static int __init obdfilter_init(void)
{
struct lprocfs_static_vars lvars;
- int rc;
+ int rc, i;
+
+ /** sanity check for group<->mdsno conversion */
+ for (i = 0; i < MAX_MDT_COUNT; i++)
+ LASSERT(objseq_to_mdsno(mdt_to_obd_objseq(i)) == i);
lprocfs_filter_init_vars(&lvars);
- request_module("lquota");
+ cfs_request_module("%s", "lquota");
OBD_ALLOC(obdfilter_created_scratchpad,
OBDFILTER_CREATED_SCRATCHPAD_ENTRIES *
sizeof(*obdfilter_created_scratchpad));