free(cache);
}
+/* generic hashing taken from the Linux kernel */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
+_INLINE_ __u32 __hash_32(__u32 val)
+{
+ return val * GOLDEN_RATIO_32;
+}
+
+_INLINE_ __u32 hash_32(__u32 val, unsigned int bits)
+{
+ /* High bits are more random, so use them. */
+ return __hash_32(val) >> (32 - bits);
+}
+
+_INLINE_ __u32 hash_64(__u64 val, unsigned int bits)
+{
+ if (sizeof(long) >= 8) {
+ /* 64x64-bit multiply is efficient on all 64-bit processors */
+ return val * GOLDEN_RATIO_64 >> (64 - bits);
+ } else {
+ /* Hash 64 bits using only 32x32-bit multiply. */
+ return hash_32((__u32)val ^ __hash_32(val >> 32), bits);
+ }
+}
+
#undef _INLINE_
#endif
}
static int check_inode_extra_negative_epoch(__u32 xtime, __u32 extra) {
- return (xtime & (1 << 31)) != 0 &&
+ return (xtime & (1U << 31)) != 0 &&
(extra & EXT4_EPOCH_MASK) == EXT4_EPOCH_MASK;
}
pb.previous_block = 0;
pb.is_dir = LINUX_S_ISDIR(inode->i_mode);
pb.is_reg = LINUX_S_ISREG(inode->i_mode);
- pb.max_blocks = 1 << (31 - fs->super->s_log_block_size);
+ pb.max_blocks = 1U << (31 - fs->super->s_log_block_size);
pb.inode = inode;
pb.pctx = pctx;
pb.ctx = ctx;
#endif /* __KERNEL__ */
+static inline __u32 get_be32(__be32 *p)
+{
+ unsigned char *cp = (unsigned char *) p;
+ __u32 ret;
+
+ ret = *cp++;
+ ret = (ret << 8) + *cp++;
+ ret = (ret << 8) + *cp++;
+ ret = (ret << 8) + *cp++;
+ return ret;
+}
+
+static inline __u16 get_be16(__be16 *p)
+{
+ unsigned char *cp = (unsigned char *) p;
+ __u16 ret;
+
+ ret = *cp++;
+ ret = (ret << 8) + *cp++;
+ return ret;
+}
/*
* Read a block from the journal
nr++;
tagp += tag_bytes;
- if (!(tag->t_flags & ext2fs_cpu_to_be16(JFS_FLAG_SAME_UUID)))
+ if (!(get_be16(&tag->t_flags) & JFS_FLAG_SAME_UUID))
tagp += 16;
- if (tag->t_flags & ext2fs_cpu_to_be16(JFS_FLAG_LAST_TAG))
+ if (get_be16(&tag->t_flags) & JFS_FLAG_LAST_TAG)
break;
}
return err;
}
-static inline __u32 get_be32(__be32 *p)
-{
- unsigned char *cp = (unsigned char *) p;
- __u32 ret;
-
- ret = *cp++;
- ret = (ret << 8) + *cp++;
- ret = (ret << 8) + *cp++;
- ret = (ret << 8) + *cp++;
- return ret;
-}
-
static inline unsigned long long read_tag_block(journal_t *journal,
journal_block_tag_t *tag)
{
csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
if (jfs_has_feature_csum3(j))
- return tag3->t_checksum == ext2fs_cpu_to_be32(csum32);
+ return get_be32(&tag3->t_checksum) == csum32;
- return tag->t_checksum == ext2fs_cpu_to_be16(csum32);
+ return get_be16(&tag->t_checksum) == (csum32 & 0xFFFF);
}
static int do_one_pass(journal_t *journal,
unsigned long io_block;
tag = (journal_block_tag_t *) tagp;
- flags = ext2fs_be16_to_cpu(tag->t_flags);
+ flags = get_be16(&tag->t_flags);
io_block = next_log_block++;
wrap(journal, next_log_block);
static inline int hash(journal_t *journal, unsigned long long block)
{
struct jbd2_revoke_table_s *table = journal->j_revoke;
- int hash_shift = table->hash_shift;
- int hash = (int)block ^ (int)((block >> 31) >> 1);
- return ((hash << (hash_shift - 6)) ^
- (hash >> 13) ^
- (hash << (hash_shift - 12))) & (table->hash_size - 1);
+ return (hash_64(block, table->hash_shift));
}
static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
}
check_for_ro_violation_return(ctx, ret);
if (!*tind_block || (ret & BLOCK_ABORT)) {
- ctx->bcount += limit*limit*limit;
+ ctx->bcount += ((unsigned long long) limit)*limit*limit;
return ret;
}
if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) ||
* need to read in more blocks.
*/
if (scan->bytes_left < scan->inode_size) {
- memcpy(scan->temp_buffer, scan->ptr, scan->bytes_left);
+ if (scan->bytes_left)
+ memcpy(scan->temp_buffer, scan->ptr, scan->bytes_left);
extra_bytes = scan->bytes_left;
retval = get_next_blocks(scan);