For 32 bit platforms to perform 64 bit divides or modulo
operations the do_div64() macro has to used to avoid the
__umoddi missing errors. Those errors prevent lustre from
working properly on 32 bit machines. The fix for o2iblnd
is a backport of upstream kernel commit:
Linux-commit:
5f43264c5320624f3b458c5794f37220c4fc2934
Change-Id: Iee44fb707ef11be5e484989f0455951e2c84ceb5
Signed-off-by: Bob Glossman <bob.glossman@intel.com>
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: http://review.whamcloud.com/18819
Tested-by: Jenkins
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: John L. Hammond <john.hammond@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
int vectors;
int off;
int i;
int vectors;
int off;
int i;
vectors = conn->ibc_cmid->device->num_comp_vectors;
if (vectors <= 1)
vectors = conn->ibc_cmid->device->num_comp_vectors;
if (vectors <= 1)
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
/* hash NID to CPU id in this partition... */
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
/* hash NID to CPU id in this partition... */
- off = conn->ibc_peer->ibp_nid % cpumask_weight(mask);
+ ibp_nid = conn->ibc_peer->ibp_nid;
+ off = do_div(ibp_nid, cpumask_weight(mask));
for_each_cpu(i, mask) {
if (off-- == 0)
return i % vectors;
for_each_cpu(i, mask) {
if (off-- == 0)
return i % vectors;
}
} else { /* rate based drop */
}
} else { /* rate based drop */
- drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
- if (rule->dr_stat.fs_count % attr->u.drop.da_rate == 0) {
+ drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
+ count = rule->dr_stat.fs_count;
+ if (do_div(count, attr->u.drop.da_rate) == 0) {
rule->dr_drop_at = rule->dr_stat.fs_count +
cfs_rand() % attr->u.drop.da_rate;
CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
rule->dr_drop_at = rule->dr_stat.fs_count +
cfs_rand() % attr->u.drop.da_rate;
CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
}
} else { /* rate based delay */
}
} else { /* rate based delay */
delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
/* generate the next random rate sequence */
delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
/* generate the next random rate sequence */
- if (rule->dl_stat.fs_count % attr->u.delay.la_rate == 0) {
+ count = rule->dl_stat.fs_count;
+ if (do_div(count, attr->u.delay.la_rate) == 0) {
rule->dl_delay_at = rule->dl_stat.fs_count +
cfs_rand() % attr->u.delay.la_rate;
CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
rule->dl_delay_at = rule->dl_stat.fs_count +
cfs_rand() % attr->u.delay.la_rate;
CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
static inline unsigned int
lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
{
static inline unsigned int
lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
{
hash = lustre_hash_fnv_1a_64(name, namelen);
hash = lustre_hash_fnv_1a_64(name, namelen);
- hash = hash % count;
-
- return hash;
+ return do_div(hash, count);
}
static inline int lmv_name_to_stripe_index(__u32 lmv_hash_type,
}
static inline int lmv_name_to_stripe_index(__u32 lmv_hash_type,
struct llog_process_cat_data *cd = lpi->lpi_catdata;
char *buf;
size_t chunk_size;
struct llog_process_cat_data *cd = lpi->lpi_catdata;
char *buf;
size_t chunk_size;
+ __u64 cur_offset, tmp_offset;
int rc = 0, index = 1, last_index;
int saved_index = 0;
int last_called_index = 0;
int rc = 0, index = 1, last_index;
int saved_index = 0;
int last_called_index = 0;
* The absolute offset of the current chunk is calculated
* from cur_offset value and stored in chunk_offset variable.
*/
* The absolute offset of the current chunk is calculated
* from cur_offset value and stored in chunk_offset variable.
*/
- if (cur_offset % chunk_size != 0) {
+ tmp_offset = cur_offset;
+ if (do_div(tmp_offset, chunk_size) != 0) {
partial_chunk = true;
chunk_offset = cur_offset & ~(chunk_size - 1);
} else {
partial_chunk = true;
chunk_offset = cur_offset & ~(chunk_size - 1);
} else {
test_3_rec_off = lgh->lgh_cur_offset;
if (lgh->lgh_cur_offset != test_3_rec_off) {
test_3_rec_off = lgh->lgh_cur_offset;
if (lgh->lgh_cur_offset != test_3_rec_off) {
+ __u64 tmp = lgh->lgh_cur_offset;
+
/* there can be padding record */
/* there can be padding record */
- if ((lgh->lgh_cur_offset % chunk_size == 0) &&
+ if ((do_div(tmp, chunk_size) == 0) &&
(lgh->lgh_cur_offset - test_3_rec_off <
rec->lrh_len + LLOG_MIN_REC_SIZE)) {
test_3_rec_off = lgh->lgh_cur_offset;
(lgh->lgh_cur_offset - test_3_rec_off <
rec->lrh_len + LLOG_MIN_REC_SIZE)) {
test_3_rec_off = lgh->lgh_cur_offset;
memcpy(rule->tr_name, start->tc_name, strlen(start->tc_name));
rule->tr_rpc_rate = start->tc_rpc_rate;
memcpy(rule->tr_name, start->tc_name, strlen(start->tc_name));
rule->tr_rpc_rate = start->tc_rpc_rate;
- rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate;
+ rule->tr_nsecs = NSEC_PER_SEC;
+ do_div(rule->tr_nsecs, rule->tr_rpc_rate);
rule->tr_depth = tbf_depth;
atomic_set(&rule->tr_ref, 1);
INIT_LIST_HEAD(&rule->tr_cli_list);
rule->tr_depth = tbf_depth;
atomic_set(&rule->tr_ref, 1);
INIT_LIST_HEAD(&rule->tr_cli_list);
return -ENOENT;
rule->tr_rpc_rate = change->tc_rpc_rate;
return -ENOENT;
rule->tr_rpc_rate = change->tc_rpc_rate;
- rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate;
+ rule->tr_nsecs = NSEC_PER_SEC;
+ do_div(rule->tr_nsecs, rule->tr_rpc_rate);
rule->tr_generation++;
nrs_tbf_rule_put(rule);
rule->tr_generation++;
nrs_tbf_rule_put(rule);
} else {
__u64 now = ktime_to_ns(ktime_get());
__u64 passed;
} else {
__u64 now = ktime_to_ns(ktime_get());
__u64 passed;
__u64 deadline;
deadline = cli->tc_check_time +
cli->tc_nsecs;
LASSERT(now >= cli->tc_check_time);
passed = now - cli->tc_check_time;
__u64 deadline;
deadline = cli->tc_check_time +
cli->tc_nsecs;
LASSERT(now >= cli->tc_check_time);
passed = now - cli->tc_check_time;
- ntoken = (passed * cli->tc_rpc_rate) / NSEC_PER_SEC;
+ ntoken = passed * cli->tc_rpc_rate;
+ do_div(ntoken, NSEC_PER_SEC);
ntoken += cli->tc_ntoken;
if (ntoken > cli->tc_depth)
ntoken = cli->tc_depth;
ntoken += cli->tc_ntoken;
if (ntoken > cli->tc_depth)
ntoken = cli->tc_depth;