From: Bob Glossman Date: Wed, 23 Mar 2016 22:35:16 +0000 (-0400) Subject: LU-6163 kernel: use do_div64() for 64 bit divides X-Git-Tag: 2.8.52~26 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=refs%2Fchanges%2F19%2F18819%2F8;p=fs%2Flustre-release.git LU-6163 kernel: use do_div64() for 64 bit divides For 32 bit platforms to perform 64 bit divides or modulo operations the do_div64() macro has to used to avoid the __umoddi missing errors. Those errors prevent lustre from working properly on 32 bit machines. The fix for o2iblnd is a backport of upstream kernel commit: Linux-commit: 5f43264c5320624f3b458c5794f37220c4fc2934 Change-Id: Iee44fb707ef11be5e484989f0455951e2c84ceb5 Signed-off-by: Bob Glossman Signed-off-by: James Simmons Reviewed-on: http://review.whamcloud.com/18819 Tested-by: Jenkins Reviewed-by: Dmitry Eremin Tested-by: Maloo Reviewed-by: John L. Hammond Reviewed-by: Oleg Drokin --- diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 4d25dd4..d13ace0 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -675,6 +675,7 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) int vectors; int off; int i; + lnet_nid_t ibp_nid; vectors = conn->ibc_cmid->device->num_comp_vectors; if (vectors <= 1) @@ -683,7 +684,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); /* hash NID to CPU id in this partition... */ - off = conn->ibc_peer->ibp_nid % cpumask_weight(mask); + ibp_nid = conn->ibc_peer->ibp_nid; + off = do_div(ibp_nid, cpumask_weight(mask)); for_each_cpu(i, mask) { if (off-- == 0) return i % vectors; diff --git a/lnet/lnet/net_fault.c b/lnet/lnet/net_fault.c index 661cc5d..b5c69f2 100644 --- a/lnet/lnet/net_fault.c +++ b/lnet/lnet/net_fault.c @@ -334,9 +334,11 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, } } else { /* rate based drop */ - drop = rule->dr_stat.fs_count++ == rule->dr_drop_at; + __u64 count; - if (rule->dr_stat.fs_count % attr->u.drop.da_rate == 0) { + drop = rule->dr_stat.fs_count++ == rule->dr_drop_at; + count = rule->dr_stat.fs_count; + if (do_div(count, attr->u.drop.da_rate) == 0) { rule->dr_drop_at = rule->dr_stat.fs_count + cfs_rand() % attr->u.drop.da_rate; CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n", @@ -502,9 +504,12 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, } } else { /* rate based delay */ + __u64 count; + delay = rule->dl_stat.fs_count++ == rule->dl_delay_at; /* generate the next random rate sequence */ - if (rule->dl_stat.fs_count % attr->u.delay.la_rate == 0) { + count = rule->dl_stat.fs_count; + if (do_div(count, attr->u.delay.la_rate) == 0) { rule->dl_delay_at = rule->dl_stat.fs_count + cfs_rand() % attr->u.delay.la_rate; CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n", diff --git a/lustre/include/lustre_lmv.h b/lustre/include/lustre_lmv.h index bc71c04..1cf102a 100644 --- a/lustre/include/lustre_lmv.h +++ b/lustre/include/lustre_lmv.h @@ -130,13 +130,11 @@ lmv_hash_all_chars(unsigned int count, const char *name, int namelen) static inline unsigned int lmv_hash_fnv1a(unsigned int count, const char *name, int namelen) { - __u64 hash; + __u64 hash; hash = lustre_hash_fnv_1a_64(name, namelen); - hash = hash % count; - - return hash; + return do_div(hash, count); } static inline int lmv_name_to_stripe_index(__u32 lmv_hash_type, diff --git a/lustre/obdclass/llog.c b/lustre/obdclass/llog.c index 68ee8fa..971c643 100644 --- a/lustre/obdclass/llog.c +++ b/lustre/obdclass/llog.c @@ -422,7 +422,7 @@ static int llog_process_thread(void *arg) struct llog_process_cat_data *cd = lpi->lpi_catdata; char *buf; size_t chunk_size; - __u64 cur_offset; + __u64 cur_offset, tmp_offset; int rc = 0, index = 1, last_index; int saved_index = 0; int last_called_index = 0; @@ -482,7 +482,8 @@ repeat: * The absolute offset of the current chunk is calculated * from cur_offset value and stored in chunk_offset variable. */ - if (cur_offset % chunk_size != 0) { + tmp_offset = cur_offset; + if (do_div(tmp_offset, chunk_size) != 0) { partial_chunk = true; chunk_offset = cur_offset & ~(chunk_size - 1); } else { diff --git a/lustre/obdclass/llog_test.c b/lustre/obdclass/llog_test.c index befea8d..a7c79a6 100644 --- a/lustre/obdclass/llog_test.c +++ b/lustre/obdclass/llog_test.c @@ -311,8 +311,10 @@ static int test3_check_n_add_cb(const struct lu_env *env, test_3_rec_off = lgh->lgh_cur_offset; if (lgh->lgh_cur_offset != test_3_rec_off) { + __u64 tmp = lgh->lgh_cur_offset; + /* there can be padding record */ - if ((lgh->lgh_cur_offset % chunk_size == 0) && + if ((do_div(tmp, chunk_size) == 0) && (lgh->lgh_cur_offset - test_3_rec_off < rec->lrh_len + LLOG_MIN_REC_SIZE)) { test_3_rec_off = lgh->lgh_cur_offset; diff --git a/lustre/ptlrpc/nrs_tbf.c b/lustre/ptlrpc/nrs_tbf.c index 7e6868d..4e3db7e 100644 --- a/lustre/ptlrpc/nrs_tbf.c +++ b/lustre/ptlrpc/nrs_tbf.c @@ -296,7 +296,8 @@ nrs_tbf_rule_start(struct ptlrpc_nrs_policy *policy, memcpy(rule->tr_name, start->tc_name, strlen(start->tc_name)); rule->tr_rpc_rate = start->tc_rpc_rate; - rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate; + rule->tr_nsecs = NSEC_PER_SEC; + do_div(rule->tr_nsecs, rule->tr_rpc_rate); rule->tr_depth = tbf_depth; atomic_set(&rule->tr_ref, 1); INIT_LIST_HEAD(&rule->tr_cli_list); @@ -346,7 +347,8 @@ nrs_tbf_rule_change(struct ptlrpc_nrs_policy *policy, return -ENOENT; rule->tr_rpc_rate = change->tc_rpc_rate; - rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate; + rule->tr_nsecs = NSEC_PER_SEC; + do_div(rule->tr_nsecs, rule->tr_rpc_rate); rule->tr_generation++; nrs_tbf_rule_put(rule); @@ -1399,14 +1401,15 @@ struct ptlrpc_nrs_request *nrs_tbf_req_get(struct ptlrpc_nrs_policy *policy, } else { __u64 now = ktime_to_ns(ktime_get()); __u64 passed; - long ntoken; + __u64 ntoken; __u64 deadline; deadline = cli->tc_check_time + cli->tc_nsecs; LASSERT(now >= cli->tc_check_time); passed = now - cli->tc_check_time; - ntoken = (passed * cli->tc_rpc_rate) / NSEC_PER_SEC; + ntoken = passed * cli->tc_rpc_rate; + do_div(ntoken, NSEC_PER_SEC); ntoken += cli->tc_ntoken; if (ntoken > cli->tc_depth) ntoken = cli->tc_depth;