Whamcloud - gitweb
LU-5342 lmv: fix some byte order issues
[fs/lustre-release.git] / lustre / ptlrpc / pack_generic.c
index 262ee3b..03515ca 100644 (file)
@@ -96,46 +96,6 @@ int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
 }
 EXPORT_SYMBOL(ptlrpc_buf_need_swab);
 
-/* This enlarges the req buffer of request \a req to the next power of 2
- * multiple of \a newbuf_size.
- * Returns zero on success or ENOMEM if it failed to allocate the new buffer.
- *
- * This is used in the reply path on the client if the server responded
- * with a bigger message than we expected so we can save the new state for
- * a possible future replay where we'll need to present this new info
- * (usually striping that's not available at create time) */
-int ptlrpc_enlarge_req_buffer(struct ptlrpc_request *req, int newbuf_size)
-{
-       struct lustre_msg *newbuf;
-
-       newbuf_size = size_roundup_power2(newbuf_size);
-
-       OBD_ALLOC_LARGE(newbuf, newbuf_size);
-       if (newbuf == NULL)
-               return -ENOMEM;
-
-       /* Must lock this, so that otherwise unprotected change of
-        * rq_reqmsg is not racing with parallel processing of
-        * imp_replay_list traversing threads. See LU-3333
-        * This is a bandaid at best, we really need to deal with this
-        * in request enlarging code before unpacking what's already
-        * there */
-       if (req->rq_import)
-               spin_lock(&req->rq_import->imp_lock);
-
-       memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
-
-       OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
-       req->rq_reqbuf = newbuf;
-       req->rq_reqbuf_len = newbuf_size;
-       req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
-
-       if (req->rq_import)
-               spin_unlock(&req->rq_import->imp_lock);
-
-       return 0;
-}
-
 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
                                               __u32 version)
 {
@@ -301,20 +261,21 @@ int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
 EXPORT_SYMBOL(lustre_pack_request);
 
 #if RS_DEBUG
-CFS_LIST_HEAD(ptlrpc_rs_debug_lru);
+struct list_head ptlrpc_rs_debug_lru =
+       LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
 spinlock_t ptlrpc_rs_debug_lock;
 
 #define PTLRPC_RS_DEBUG_LRU_ADD(rs)                                    \
 do {                                                                   \
        spin_lock(&ptlrpc_rs_debug_lock);                               \
-       cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru);  \
+       list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru);      \
        spin_unlock(&ptlrpc_rs_debug_lock);                             \
 } while (0)
 
 #define PTLRPC_RS_DEBUG_LRU_DEL(rs)                                    \
 do {                                                                   \
        spin_lock(&ptlrpc_rs_debug_lock);                               \
-       cfs_list_del(&(rs)->rs_debug_list);                             \
+       list_del(&(rs)->rs_debug_list);                         \
        spin_unlock(&ptlrpc_rs_debug_lock);                             \
 } while (0)
 #else
@@ -330,7 +291,7 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
        spin_lock(&svcpt->scp_rep_lock);
 
        /* See if we have anything in a pool, and wait if nothing */
-       while (cfs_list_empty(&svcpt->scp_rep_idle)) {
+       while (list_empty(&svcpt->scp_rep_idle)) {
                struct l_wait_info      lwi;
                int                     rc;
 
@@ -339,15 +300,15 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
                 * bail out instead of waiting infinitely */
                lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
                rc = l_wait_event(svcpt->scp_rep_waitq,
-                                 !cfs_list_empty(&svcpt->scp_rep_idle), &lwi);
+                                 !list_empty(&svcpt->scp_rep_idle), &lwi);
                if (rc != 0)
                        goto out;
                spin_lock(&svcpt->scp_rep_lock);
        }
 
-       rs = cfs_list_entry(svcpt->scp_rep_idle.next,
+       rs = list_entry(svcpt->scp_rep_idle.next,
                            struct ptlrpc_reply_state, rs_list);
-       cfs_list_del(&rs->rs_list);
+       list_del(&rs->rs_list);
 
        spin_unlock(&svcpt->scp_rep_lock);
 
@@ -364,7 +325,7 @@ void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
        struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
 
        spin_lock(&svcpt->scp_rep_lock);
-       cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
+       list_add(&rs->rs_list, &svcpt->scp_rep_idle);
        spin_unlock(&svcpt->scp_rep_lock);
        wake_up(&svcpt->scp_rep_waitq);
 }
@@ -394,9 +355,9 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
        rs->rs_cb_id.cbid_fn = reply_out_callback;
        rs->rs_cb_id.cbid_arg = rs;
        rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
-       CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
-       CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
-       CFS_INIT_LIST_HEAD(&rs->rs_list);
+       INIT_LIST_HEAD(&rs->rs_exp_list);
+       INIT_LIST_HEAD(&rs->rs_obd_list);
+       INIT_LIST_HEAD(&rs->rs_list);
        spin_lock_init(&rs->rs_lock);
 
         req->rq_replen = msg_len;
@@ -559,8 +520,8 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
        LASSERT(!rs->rs_scheduled);
        LASSERT(rs->rs_export == NULL);
        LASSERT(rs->rs_nlocks == 0);
-       LASSERT(cfs_list_empty(&rs->rs_exp_list));
-       LASSERT(cfs_list_empty(&rs->rs_obd_list));
+       LASSERT(list_empty(&rs->rs_exp_list));
+       LASSERT(list_empty(&rs->rs_obd_list));
 
        sptlrpc_svc_free_rs(rs);
 }
@@ -1838,11 +1799,11 @@ void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
 }
 EXPORT_SYMBOL(lustre_swab_obd_ioobj);
 
-void lustre_swab_niobuf_remote (struct niobuf_remote *nbr)
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
 {
-        __swab64s (&nbr->offset);
-        __swab32s (&nbr->len);
-        __swab32s (&nbr->flags);
+       __swab64s(&nbr->rnb_offset);
+       __swab32s(&nbr->rnb_len);
+       __swab32s(&nbr->rnb_flags);
 }
 EXPORT_SYMBOL(lustre_swab_niobuf_remote);
 
@@ -1911,35 +1872,35 @@ EXPORT_SYMBOL(lustre_swab_lquota_lvb);
 
 void lustre_swab_mdt_body (struct mdt_body *b)
 {
-       lustre_swab_lu_fid(&b->fid1);
-       lustre_swab_lu_fid(&b->fid2);
+       lustre_swab_lu_fid(&b->mbo_fid1);
+       lustre_swab_lu_fid(&b->mbo_fid2);
        /* handle is opaque */
-       __swab64s(&b->valid);
-       __swab64s(&b->size);
-       __swab64s(&b->mtime);
-       __swab64s(&b->atime);
-       __swab64s(&b->ctime);
-       __swab64s(&b->blocks);
-       __swab64s(&b->ioepoch);
-       __swab64s(&b->t_state);
-       __swab32s(&b->fsuid);
-       __swab32s(&b->fsgid);
-       __swab32s(&b->capability);
-       __swab32s(&b->mode);
-       __swab32s(&b->uid);
-       __swab32s(&b->gid);
-       __swab32s(&b->flags);
-       __swab32s(&b->rdev);
-       __swab32s(&b->nlink);
-       CLASSERT(offsetof(typeof(*b), unused2) != 0);
-       __swab32s(&b->suppgid);
-       __swab32s(&b->eadatasize);
-       __swab32s(&b->aclsize);
-       __swab32s(&b->max_mdsize);
-       __swab32s(&b->max_cookiesize);
-       __swab32s(&b->uid_h);
-       __swab32s(&b->gid_h);
-       CLASSERT(offsetof(typeof(*b), padding_5) != 0);
+       __swab64s(&b->mbo_valid);
+       __swab64s(&b->mbo_size);
+       __swab64s(&b->mbo_mtime);
+       __swab64s(&b->mbo_atime);
+       __swab64s(&b->mbo_ctime);
+       __swab64s(&b->mbo_blocks);
+       __swab64s(&b->mbo_ioepoch);
+       __swab64s(&b->mbo_t_state);
+       __swab32s(&b->mbo_fsuid);
+       __swab32s(&b->mbo_fsgid);
+       __swab32s(&b->mbo_capability);
+       __swab32s(&b->mbo_mode);
+       __swab32s(&b->mbo_uid);
+       __swab32s(&b->mbo_gid);
+       __swab32s(&b->mbo_flags);
+       __swab32s(&b->mbo_rdev);
+       __swab32s(&b->mbo_nlink);
+       CLASSERT(offsetof(typeof(*b), mbo_unused2) != 0);
+       __swab32s(&b->mbo_suppgid);
+       __swab32s(&b->mbo_eadatasize);
+       __swab32s(&b->mbo_aclsize);
+       __swab32s(&b->mbo_max_mdsize);
+       __swab32s(&b->mbo_max_cookiesize);
+       __swab32s(&b->mbo_uid_h);
+       __swab32s(&b->mbo_gid_h);
+       CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0);
 }
 EXPORT_SYMBOL(lustre_swab_mdt_body);
 
@@ -2185,22 +2146,16 @@ void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
                break;
        }
 }
+EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
 
 void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
 {
-       int i;
-
        __swab32s(&lum->lum_magic);
        __swab32s(&lum->lum_stripe_count);
        __swab32s(&lum->lum_stripe_offset);
        __swab32s(&lum->lum_hash_type);
        __swab32s(&lum->lum_type);
        CLASSERT(offsetof(typeof(*lum), lum_padding1) != 0);
-       for (i = 0; i < lum->lum_stripe_count; i++) {
-               __swab32s(&lum->lum_objects[i].lum_mds);
-               lustre_swab_lu_fid(&lum->lum_objects[i].lum_fid);
-       }
-
 }
 EXPORT_SYMBOL(lustre_swab_lmv_user_md);
 
@@ -2370,8 +2325,8 @@ EXPORT_SYMBOL(dump_ioo);
 
 void dump_rniobuf(struct niobuf_remote *nb)
 {
-        CDEBUG(D_RPCTRACE, "niobuf_remote: offset="LPU64", len=%d, flags=%x\n",
-               nb->offset, nb->len, nb->flags);
+       CDEBUG(D_RPCTRACE, "niobuf_remote: offset="LPU64", len=%d, flags=%x\n",
+              nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
 }
 EXPORT_SYMBOL(dump_rniobuf);