Whamcloud - gitweb
LU-6163 kernel: use do_div64() for 64 bit divides 19/18819/8
authorBob Glossman <bob.glossman@intel.com>
Wed, 23 Mar 2016 22:35:16 +0000 (18:35 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 6 Apr 2016 01:40:14 +0000 (01:40 +0000)
For 32 bit platforms to perform 64 bit divides or modulo
operations the do_div64() macro has to used to avoid the
__umoddi missing errors. Those errors prevent lustre from
working properly on 32 bit machines. The fix for o2iblnd
is a backport of upstream kernel commit:

Linux-commit: 5f43264c5320624f3b458c5794f37220c4fc2934

Change-Id: Iee44fb707ef11be5e484989f0455951e2c84ceb5
Signed-off-by: Bob Glossman <bob.glossman@intel.com>
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: http://review.whamcloud.com/18819
Tested-by: Jenkins
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: John L. Hammond <john.hammond@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lnet/klnds/o2iblnd/o2iblnd.c
lnet/lnet/net_fault.c
lustre/include/lustre_lmv.h
lustre/obdclass/llog.c
lustre/obdclass/llog_test.c
lustre/ptlrpc/nrs_tbf.c

index 4d25dd4..d13ace0 100644 (file)
@@ -675,6 +675,7 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
        int             vectors;
        int             off;
        int             i;
+       lnet_nid_t      ibp_nid;
 
        vectors = conn->ibc_cmid->device->num_comp_vectors;
        if (vectors <= 1)
@@ -683,7 +684,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
        mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
 
        /* hash NID to CPU id in this partition... */
-       off = conn->ibc_peer->ibp_nid % cpumask_weight(mask);
+       ibp_nid = conn->ibc_peer->ibp_nid;
+       off = do_div(ibp_nid, cpumask_weight(mask));
        for_each_cpu(i, mask) {
                if (off-- == 0)
                        return i % vectors;
index 661cc5d..b5c69f2 100644 (file)
@@ -334,9 +334,11 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
                }
 
        } else { /* rate based drop */
-               drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
+               __u64 count;
 
-               if (rule->dr_stat.fs_count % attr->u.drop.da_rate == 0) {
+               drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
+               count = rule->dr_stat.fs_count;
+               if (do_div(count, attr->u.drop.da_rate) == 0) {
                        rule->dr_drop_at = rule->dr_stat.fs_count +
                                           cfs_rand() % attr->u.drop.da_rate;
                        CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
@@ -502,9 +504,12 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
                }
 
        } else { /* rate based delay */
+               __u64 count;
+
                delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
                /* generate the next random rate sequence */
-               if (rule->dl_stat.fs_count % attr->u.delay.la_rate == 0) {
+               count = rule->dl_stat.fs_count;
+               if (do_div(count, attr->u.delay.la_rate) == 0) {
                        rule->dl_delay_at = rule->dl_stat.fs_count +
                                            cfs_rand() % attr->u.delay.la_rate;
                        CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
index bc71c04..1cf102a 100644 (file)
@@ -130,13 +130,11 @@ lmv_hash_all_chars(unsigned int count, const char *name, int namelen)
 static inline unsigned int
 lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
 {
-       __u64   hash;
+       __u64 hash;
 
        hash = lustre_hash_fnv_1a_64(name, namelen);
 
-       hash = hash % count;
-
-       return hash;
+       return do_div(hash, count);
 }
 
 static inline int lmv_name_to_stripe_index(__u32 lmv_hash_type,
index 68ee8fa..971c643 100644 (file)
@@ -422,7 +422,7 @@ static int llog_process_thread(void *arg)
        struct llog_process_cat_data    *cd  = lpi->lpi_catdata;
        char                            *buf;
        size_t                           chunk_size;
-       __u64                            cur_offset;
+       __u64                            cur_offset, tmp_offset;
        int                              rc = 0, index = 1, last_index;
        int                              saved_index = 0;
        int                              last_called_index = 0;
@@ -482,7 +482,8 @@ repeat:
                 * The absolute offset of the current chunk is calculated
                 * from cur_offset value and stored in chunk_offset variable.
                 */
-               if (cur_offset % chunk_size != 0) {
+               tmp_offset = cur_offset;
+               if (do_div(tmp_offset, chunk_size) != 0) {
                        partial_chunk = true;
                        chunk_offset = cur_offset & ~(chunk_size - 1);
                } else {
index befea8d..a7c79a6 100644 (file)
@@ -311,8 +311,10 @@ static int test3_check_n_add_cb(const struct lu_env *env,
                        test_3_rec_off = lgh->lgh_cur_offset;
 
                if (lgh->lgh_cur_offset != test_3_rec_off) {
+                       __u64 tmp = lgh->lgh_cur_offset;
+
                        /* there can be padding record */
-                       if ((lgh->lgh_cur_offset % chunk_size == 0) &&
+                       if ((do_div(tmp, chunk_size) == 0) &&
                            (lgh->lgh_cur_offset - test_3_rec_off <
                             rec->lrh_len + LLOG_MIN_REC_SIZE)) {
                                test_3_rec_off = lgh->lgh_cur_offset;
index 7e6868d..4e3db7e 100644 (file)
@@ -296,7 +296,8 @@ nrs_tbf_rule_start(struct ptlrpc_nrs_policy *policy,
 
        memcpy(rule->tr_name, start->tc_name, strlen(start->tc_name));
        rule->tr_rpc_rate = start->tc_rpc_rate;
-       rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate;
+       rule->tr_nsecs = NSEC_PER_SEC;
+       do_div(rule->tr_nsecs, rule->tr_rpc_rate);
        rule->tr_depth = tbf_depth;
        atomic_set(&rule->tr_ref, 1);
        INIT_LIST_HEAD(&rule->tr_cli_list);
@@ -346,7 +347,8 @@ nrs_tbf_rule_change(struct ptlrpc_nrs_policy *policy,
                return -ENOENT;
 
        rule->tr_rpc_rate = change->tc_rpc_rate;
-       rule->tr_nsecs = NSEC_PER_SEC / rule->tr_rpc_rate;
+       rule->tr_nsecs = NSEC_PER_SEC;
+       do_div(rule->tr_nsecs, rule->tr_rpc_rate);
        rule->tr_generation++;
        nrs_tbf_rule_put(rule);
 
@@ -1399,14 +1401,15 @@ struct ptlrpc_nrs_request *nrs_tbf_req_get(struct ptlrpc_nrs_policy *policy,
        } else {
                __u64 now = ktime_to_ns(ktime_get());
                __u64 passed;
-               long  ntoken;
+               __u64 ntoken;
                __u64 deadline;
 
                deadline = cli->tc_check_time +
                          cli->tc_nsecs;
                LASSERT(now >= cli->tc_check_time);
                passed = now - cli->tc_check_time;
-               ntoken = (passed * cli->tc_rpc_rate) / NSEC_PER_SEC;
+               ntoken = passed * cli->tc_rpc_rate;
+               do_div(ntoken, NSEC_PER_SEC);
                ntoken += cli->tc_ntoken;
                if (ntoken > cli->tc_depth)
                        ntoken = cli->tc_depth;