Description: Heavy concurrent write and delete load may cause data corruption.
Details : It was possible under high-load situations to have an extent
metadata block in the block device cache from a just-unlinked
file overwrite a newly-allocated data block. We now unmap any
metadata buffers that alias just-allocated data blocks.
b=6998
Description: lconf+mke2fs fail when formatting filesystem with > 77 stripes
Details : lconf specifies an inode size of 4096 bytes when the default
stripe_count is larger than 77. This conflicts with the default
inode density of 1 per 4096 bytes. Allocate smaller inodes in
this case to avoid pinning too much memory for large EAs.
b=7241
to complete instead of consuming its dirty limit. With change
we get improved performance when max_dirty_mb is small.
-tbd Cluster File Systems, Inc. <info@clusterfs.com>
+2005-08-08 Cluster File Systems, Inc. <info@clusterfs.com>
* version 1.4.4
* bug fixes
code in the 2.6 kernel caused the inode and/or EA data not to
be read from disk, causing single-file corruption.
+Severity : critical
+Bugzilla : 6998
+Frequency : rare, only 2.6 filesystems using extents
+Description: Heavy concurrent write and delete load may cause data corruption.
+Details : It was possible under high-load situations to have an extent
+ metadata block in the block device cache from a just-unlinked
+ file overwrite a newly-allocated data block. We now unmap any
+ metadata buffers that alias just-allocated data blocks.
+
+Severity : minor
+Bugzilla : 7241
+Frequency : filesystems with default stripe_count larger than 77
+Description: lconf+mke2fs fail when formatting filesystem with > 77 stripes
+Details : lconf specifies an inode size of 4096 bytes when the default
+ stripe_count is larger than 77. This conflicts with the default
+ inode density of 1 per 4096 bytes. Allocate smaller inodes in
+ this case to avoid pinning too much memory for large EAs.
+
------------------------------------------------------------------------------
2005-07-07 Cluster File Systems, Inc. <info@clusterfs.com>
-m4_define([LUSTRE_VERSION],[1.4.3.6])
+m4_define([LUSTRE_VERSION],[1.4.4.1])
loi = &lsm->lsm_oinfo[req->rq_stripe];
if (rc && lov->tgts[req->rq_idx].active) {
- CERROR("error creating objid "LPX64" sub-object"
+ CERROR("error creating fid "LPX64" sub-object"
" on OST idx %d/%d: rc = %d\n",
set->set_oa->o_id, req->rq_idx,
lsm->lsm_stripe_count, rc);
unlock_24kernel();
map:
if (err >= 0) {
+ struct block_device *bdev = inode->i_sb->s_bdev;
+
/* map blocks */
if (bp->num == 0) {
CERROR("hmm. why do we find this extent?\n");
CERROR("nothing to do?! i = %d, e_num = %u\n",
i, cex->ec_len);
for (; i < cex->ec_len && bp->num; i++) {
- if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
+ *(bp->blocks) = cex->ec_start + i;
+ if (cex->ec_type == EXT3_EXT_CACHE_EXTENT) {
*(bp->created) = 0;
- else
+ } else {
*(bp->created) = 1;
+ /* unmap any possible underlying metadata from
+ * the block device mapping. bug 6998.
+ * This only compiles on 2.6, but there are
+ * no users of mballoc on 2.4. */
+ unmap_underlying_metadata(bdev, *(bp->blocks));
+ }
bp->created++;
- *(bp->blocks) = cex->ec_start + i;
bp->blocks++;
bp->num--;
bp->start++;
{
void **pool;
int i;
- int result;
+ int result = 0;
ENTRY;
else:
stripe_count = 1
if stripe_count > 77:
- self.inode_size = 4096
+ self.inode_size = 512
elif stripe_count > 34:
self.inode_size = 2048
elif stripe_count > 13:
if not node_db:
panic("No node entry for " + node_uuid + " was found.")
- new_services += add_clumanager_node(node_db, nodes, services)
+ new_services = new_services + append(add_clumanager_node(node_db, nodes, services))
nodenames = []
for node in nodes.keys():
i = 0
for node in nodenames:
print " <member id=\"%d\" name=\"%s\" watchdog=\"yes\"/>" % (i, node)
- i += 1
+ i = i + 1
print " </members>\n <failoverdomains>"
for service in servicekeys:
svcdb = lustreDB.lookup(service)
print " <failoverdomain id=\"%d\" name=\"%s\" ordered=\"yes\" restricted=\"yes\">" % (i, svcdb.getName())
- i += 1
+ i = i + 1
j = 0
active_uuid = get_active_target(svcdb)
svc_nodedb = lustreDB.lookup(svc_node_uuid)
print " <failoverdomainnode id=\"%d\" name=\"%s\"/>" % (j, svc_nodedb.getName())
- j += 1
+ j = j + 1
print " </failoverdomain>"
print " <service checkinterval=\"30\" failoverdomain=\"%s\" id=\"%d\" name=\"%s\" userscript=\"%s/%s\">" \
% ( svcdb.getName(), i, svcdb.getName(), config.service_scripts, svcdb.getName())
print " <service_ipaddresses/>\n </service>"
- i += 1
+ i = i + 1
print " </services>\n</cluconfig>"