*
* 2) send synchronous truncate RPC with just assigned id
*/
- LASSERT(attr != NULL);
+
+ /* there are few places in MDD code still passing NULL
+ * XXX: to be fixed soon */
+ if (attr == NULL)
+ RETURN(0);
+
if (attr->la_valid & LA_SIZE && attr->la_size > 0) {
LASSERT(!dt_object_exists(dt));
osp_object_assign_id(env, d, o);
RETURN(rc);
}
+ if (o->opo_new) {
+ /* no need in logging for new objects being created */
+ RETURN(0);
+ }
+
if (!(attr->la_valid & (LA_UID | LA_GID)))
RETURN(0);
if (!(attr->la_valid & (LA_UID | LA_GID)))
RETURN(0);
+ /* new object, the very first ->attr_set()
+ * initializing attributes needs no logging
+ * all subsequent one are subject to the
+ * logging and synchronization with OST */
+ if (o->opo_new) {
+ o->opo_new = 0;
+ RETURN(0);
+ }
+
/*
* once transaction is committed put proper command on
* the queue going to our OST
ENTRY;
+ /* should happen to non-0 OSP only so that at least one object
+ * has been already declared in the scenario and LOD should
+ * cleanup that */
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_CREATE_FAIL) && d->opd_index == 1)
+ RETURN(-ENOSPC);
+
LASSERT(d->opd_last_used_file);
fid = lu_object_fid(&dt->do_lu);
/*
* There can be gaps in precreated ids and record to unlink llog
+ * XXX: we do not handle gaps yet, implemented before solution
+ * was found to be racy, so we disabled that. there is no
+ * point in making useless but expensive llog declaration.
*/
- rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th);
+ /* rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th); */
if (unlikely(!fid_is_zero(fid))) {
/* replay case: caller knows fid */
th);
} else {
/* not needed in the cache anymore */
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE,
+ set_bit(LU_OBJECT_HEARD_BANSHEE,
&dt->do_lu.lo_header->loh_flags);
}
RETURN(rc);
rc = fid_ostid_pack(lu_object_fid(&dt->do_lu), &osi->osi_oi);
LASSERT(rc == 0);
osi->osi_id = ostid_id(&osi->osi_oi);
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
osp_update_last_id(d, osi->osi_id);
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
}
LASSERT(osi->osi_id);
/* we might have lost precreated objects */
if (unlikely(d->opd_gap_count) > 0) {
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
if (d->opd_gap_count > 0) {
int count = d->opd_gap_count;
osi->osi_oi.oi_id = d->opd_gap_start;
d->opd_gap_count = 0;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
CDEBUG(D_HA, "Found gap "LPU64"+%d in objids\n",
d->opd_gap_start, count);
/* real gap handling is disabled intil ORI-692 will be
* fixed, now we only report gaps */
} else {
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
}
}
+ /* new object, the very first ->attr_set()
+ * initializing attributes needs no logging */
+ o->opo_new = 1;
+
osp_objid_buf_prep(osi, d, d->opd_index);
rc = dt_record_write(env, d->opd_last_used_file, &osi->osi_lb,
&osi->osi_off, th);
+ CDEBUG(D_HA, "%s: Wrote last used ID: "LPU64": %d\n",
+ d->opd_obd->obd_name, le64_to_cpu(d->opd_last_used_id), rc);
+
RETURN(rc);
}
rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
/* not needed in cache any more */
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
RETURN(rc);
}
*/
if (unlikely(po->opo_reserved)) {
LASSERT(d->opd_pre_reserved > 0);
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
d->opd_pre_reserved--;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
/* not needed in cache any more */
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
}
EXIT;
}