4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/pack_generic.c
34 * (Un)packing of OST requests
36 * Author: Peter J. Braam <braam@clusterfs.com>
37 * Author: Phil Schwan <phil@clusterfs.com>
38 * Author: Eric Barton <eeb@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_RPC
43 #include <libcfs/libcfs.h>
45 #include <llog_swab.h>
46 #include <lustre_net.h>
47 #include <lustre_swab.h>
48 #include <obd_cksum.h>
49 #include <obd_class.h>
50 #include <obd_support.h>
51 #include <obj_update.h>
53 #include "ptlrpc_internal.h"
55 static inline __u32 lustre_msg_hdr_size_v2(__u32 count)
57 return cfs_size_round(offsetof(struct lustre_msg_v2,
61 __u32 lustre_msg_hdr_size(__u32 magic, __u32 count)
64 case LUSTRE_MSG_MAGIC_V2:
65 return lustre_msg_hdr_size_v2(count);
67 LASSERTF(0, "incorrect message magic: %08x\n", magic);
72 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
76 lustre_set_req_swabbed(req, index);
78 lustre_set_rep_swabbed(req, index);
81 bool ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
85 return (ptlrpc_req_need_swab(req) &&
86 !lustre_req_swabbed(req, index));
88 return (ptlrpc_rep_need_swab(req) && !lustre_rep_swabbed(req, index));
91 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
92 enum lustre_msg_version version)
94 enum lustre_msg_version ver = lustre_msg_get_version(msg);
96 return (ver & LUSTRE_VERSION_MASK) != version;
99 int lustre_msg_check_version(struct lustre_msg *msg,
100 enum lustre_msg_version version)
102 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
103 switch (msg->lm_magic) {
104 case LUSTRE_MSG_MAGIC_V1:
105 CERROR("msg v1 not supported - please upgrade you system\n");
107 case LUSTRE_MSG_MAGIC_V2:
108 return lustre_msg_check_version_v2(msg, version);
110 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
113 #undef LUSTRE_MSG_MAGIC_V1
116 /* early reply size */
117 __u32 lustre_msg_early_size()
121 /* Always reply old ptlrpc_body_v2 to keep interoprability
122 * with the old client (< 2.3) which doesn't have pb_jobid
123 * in the ptlrpc_body.
125 * XXX Remove this whenever we dorp interoprability with such
128 __u32 pblen = sizeof(struct ptlrpc_body_v2);
129 size = lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
133 EXPORT_SYMBOL(lustre_msg_early_size);
135 __u32 lustre_msg_size_v2(int count, __u32 *lengths)
140 size = lustre_msg_hdr_size_v2(count);
141 for (i = 0; i < count; i++)
142 size += cfs_size_round(lengths[i]);
146 EXPORT_SYMBOL(lustre_msg_size_v2);
148 /* This returns the size of the buffer that is required to hold a lustre_msg
149 * with the given sub-buffer lengths.
150 * NOTE: this should only be used for NEW requests, and should always be
151 * in the form of a v2 request. If this is a connection to a v1
152 * target then the first buffer will be stripped because the ptlrpc
153 * data is part of the lustre_msg_v1 header. b=14043 */
154 __u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
156 __u32 size[] = { sizeof(struct ptlrpc_body) };
164 LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
167 case LUSTRE_MSG_MAGIC_V2:
168 return lustre_msg_size_v2(count, lens);
170 LASSERTF(0, "incorrect message magic: %08x\n", magic);
175 /* This is used to determine the size of a buffer that was already packed
176 * and will correctly handle the different message formats. */
177 __u32 lustre_packed_msg_size(struct lustre_msg *msg)
179 switch (msg->lm_magic) {
180 case LUSTRE_MSG_MAGIC_V2:
181 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
183 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
187 EXPORT_SYMBOL(lustre_packed_msg_size);
189 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
195 msg->lm_bufcount = count;
196 /* XXX: lm_secflvr uninitialized here */
197 msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
199 for (i = 0; i < count; i++)
200 msg->lm_buflens[i] = lens[i];
205 ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
206 for (i = 0; i < count; i++) {
210 memcpy(ptr, tmp, lens[i]);
211 ptr += cfs_size_round(lens[i]);
214 EXPORT_SYMBOL(lustre_init_msg_v2);
216 static int lustre_pack_request_v2(struct ptlrpc_request *req,
217 int count, __u32 *lens, char **bufs)
221 reqlen = lustre_msg_size_v2(count, lens);
223 rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
227 req->rq_reqlen = reqlen;
229 lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
230 lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
234 int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
235 __u32 *lens, char **bufs)
237 __u32 size[] = { sizeof(struct ptlrpc_body) };
245 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
247 /* only use new format, we don't need to be compatible with 1.4 */
248 magic = LUSTRE_MSG_MAGIC_V2;
251 case LUSTRE_MSG_MAGIC_V2:
252 return lustre_pack_request_v2(req, count, lens, bufs);
254 LASSERTF(0, "incorrect message magic: %08x\n", magic);
260 struct list_head ptlrpc_rs_debug_lru =
261 LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
262 spinlock_t ptlrpc_rs_debug_lock;
264 #define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
266 spin_lock(&ptlrpc_rs_debug_lock); \
267 list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
268 spin_unlock(&ptlrpc_rs_debug_lock); \
271 #define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
273 spin_lock(&ptlrpc_rs_debug_lock); \
274 list_del(&(rs)->rs_debug_list); \
275 spin_unlock(&ptlrpc_rs_debug_lock); \
278 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
279 # define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
282 struct ptlrpc_reply_state *
283 lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
285 struct ptlrpc_reply_state *rs = NULL;
287 spin_lock(&svcpt->scp_rep_lock);
289 /* See if we have anything in a pool, and wait if nothing */
290 while (list_empty(&svcpt->scp_rep_idle)) {
291 struct l_wait_info lwi;
294 spin_unlock(&svcpt->scp_rep_lock);
295 /* If we cannot get anything for some long time, we better
296 * bail out instead of waiting infinitely */
297 lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
298 rc = l_wait_event(svcpt->scp_rep_waitq,
299 !list_empty(&svcpt->scp_rep_idle), &lwi);
302 spin_lock(&svcpt->scp_rep_lock);
305 rs = list_entry(svcpt->scp_rep_idle.next,
306 struct ptlrpc_reply_state, rs_list);
307 list_del(&rs->rs_list);
309 spin_unlock(&svcpt->scp_rep_lock);
311 memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
312 rs->rs_size = svcpt->scp_service->srv_max_reply_size;
313 rs->rs_svcpt = svcpt;
319 void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
321 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
323 spin_lock(&svcpt->scp_rep_lock);
324 list_add(&rs->rs_list, &svcpt->scp_rep_idle);
325 spin_unlock(&svcpt->scp_rep_lock);
326 wake_up(&svcpt->scp_rep_waitq);
329 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
330 __u32 *lens, char **bufs, int flags)
332 struct ptlrpc_reply_state *rs;
336 LASSERT(req->rq_reply_state == NULL);
338 if ((flags & LPRFL_EARLY_REPLY) == 0) {
339 spin_lock(&req->rq_lock);
340 req->rq_packed_final = 1;
341 spin_unlock(&req->rq_lock);
344 msg_len = lustre_msg_size_v2(count, lens);
345 rc = sptlrpc_svc_alloc_rs(req, msg_len);
349 rs = req->rq_reply_state;
350 atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
351 rs->rs_cb_id.cbid_fn = reply_out_callback;
352 rs->rs_cb_id.cbid_arg = rs;
353 rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
354 INIT_LIST_HEAD(&rs->rs_exp_list);
355 INIT_LIST_HEAD(&rs->rs_obd_list);
356 INIT_LIST_HEAD(&rs->rs_list);
357 spin_lock_init(&rs->rs_lock);
359 req->rq_replen = msg_len;
360 req->rq_reply_state = rs;
361 req->rq_repmsg = rs->rs_msg;
363 lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
364 lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
366 PTLRPC_RS_DEBUG_LRU_ADD(rs);
370 EXPORT_SYMBOL(lustre_pack_reply_v2);
372 int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
373 char **bufs, int flags)
376 __u32 size[] = { sizeof(struct ptlrpc_body) };
384 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
386 switch (req->rq_reqmsg->lm_magic) {
387 case LUSTRE_MSG_MAGIC_V2:
388 rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
391 LASSERTF(0, "incorrect message magic: %08x\n",
392 req->rq_reqmsg->lm_magic);
396 CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
397 lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
401 int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
404 return lustre_pack_reply_flags(req, count, lens, bufs, 0);
406 EXPORT_SYMBOL(lustre_pack_reply);
408 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size)
410 __u32 i, offset, buflen, bufcount;
414 bufcount = m->lm_bufcount;
415 if (unlikely(n >= bufcount)) {
416 CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
421 buflen = m->lm_buflens[n];
422 if (unlikely(buflen < min_size)) {
423 CERROR("msg %p buffer[%d] size %d too small "
424 "(required %d, opc=%d)\n", m, n, buflen, min_size,
425 n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
429 offset = lustre_msg_hdr_size_v2(bufcount);
430 for (i = 0; i < n; i++)
431 offset += cfs_size_round(m->lm_buflens[i]);
433 return (char *)m + offset;
436 void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 min_size)
438 switch (m->lm_magic) {
439 case LUSTRE_MSG_MAGIC_V2:
440 return lustre_msg_buf_v2(m, n, min_size);
442 LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n",
447 EXPORT_SYMBOL(lustre_msg_buf);
449 static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
450 unsigned int newlen, int move_data)
452 char *tail = NULL, *newpos;
456 LASSERT(msg->lm_bufcount > segment);
457 LASSERT(msg->lm_buflens[segment] >= newlen);
459 if (msg->lm_buflens[segment] == newlen)
462 if (move_data && msg->lm_bufcount > segment + 1) {
463 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
464 for (n = segment + 1; n < msg->lm_bufcount; n++)
465 tail_len += cfs_size_round(msg->lm_buflens[n]);
468 msg->lm_buflens[segment] = newlen;
470 if (tail && tail_len) {
471 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
472 LASSERT(newpos <= tail);
474 memmove(newpos, tail, tail_len);
477 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
481 * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
482 * we also move data forward from @segment + 1.
484 * if @newlen == 0, we remove the segment completely, but we still keep the
485 * totally bufcount the same to save possible data moving. this will leave a
486 * unused segment with size 0 at the tail, but that's ok.
488 * return new msg size after shrinking.
491 * + if any buffers higher than @segment has been filled in, must call shrink
492 * with non-zero @move_data.
493 * + caller should NOT keep pointers to msg buffers which higher than @segment
496 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
497 unsigned int newlen, int move_data)
499 switch (msg->lm_magic) {
500 case LUSTRE_MSG_MAGIC_V2:
501 return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
503 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
506 EXPORT_SYMBOL(lustre_shrink_msg);
508 void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
510 PTLRPC_RS_DEBUG_LRU_DEL(rs);
512 LASSERT(atomic_read(&rs->rs_refcount) == 0);
513 LASSERT(!rs->rs_difficult || rs->rs_handled);
514 LASSERT(!rs->rs_on_net);
515 LASSERT(!rs->rs_scheduled);
516 LASSERT(rs->rs_export == NULL);
517 LASSERT(rs->rs_nlocks == 0);
518 LASSERT(list_empty(&rs->rs_exp_list));
519 LASSERT(list_empty(&rs->rs_obd_list));
521 sptlrpc_svc_free_rs(rs);
524 static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
526 int swabbed, required_len, i;
528 /* Now we know the sender speaks my language. */
529 required_len = lustre_msg_hdr_size_v2(0);
530 if (len < required_len) {
531 /* can't even look inside the message */
532 CERROR("message length %d too small for lustre_msg\n", len);
536 swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
539 __swab32s(&m->lm_magic);
540 __swab32s(&m->lm_bufcount);
541 __swab32s(&m->lm_secflvr);
542 __swab32s(&m->lm_repsize);
543 __swab32s(&m->lm_cksum);
544 __swab32s(&m->lm_flags);
545 CLASSERT(offsetof(typeof(*m), lm_padding_2) != 0);
546 CLASSERT(offsetof(typeof(*m), lm_padding_3) != 0);
549 required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
550 if (len < required_len) {
551 /* didn't receive all the buffer lengths */
552 CERROR ("message length %d too small for %d buflens\n",
553 len, m->lm_bufcount);
557 for (i = 0; i < m->lm_bufcount; i++) {
559 __swab32s(&m->lm_buflens[i]);
560 required_len += cfs_size_round(m->lm_buflens[i]);
563 if (len < required_len) {
564 CERROR("len: %d, required_len %d\n", len, required_len);
565 CERROR("bufcount: %d\n", m->lm_bufcount);
566 for (i = 0; i < m->lm_bufcount; i++)
567 CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
574 int __lustre_unpack_msg(struct lustre_msg *m, int len)
576 int required_len, rc;
579 /* We can provide a slightly better error log, if we check the
580 * message magic and version first. In the future, struct
581 * lustre_msg may grow, and we'd like to log a version mismatch,
582 * rather than a short message.
585 required_len = offsetof(struct lustre_msg, lm_magic) +
587 if (len < required_len) {
588 /* can't even look inside the message */
589 CERROR("message length %d too small for magic/version check\n",
594 rc = lustre_unpack_msg_v2(m, len);
598 EXPORT_SYMBOL(__lustre_unpack_msg);
600 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
603 rc = __lustre_unpack_msg(req->rq_reqmsg, len);
605 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
611 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
614 rc = __lustre_unpack_msg(req->rq_repmsg, len);
616 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
622 static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
623 const int inout, int offset)
625 struct ptlrpc_body *pb;
626 struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg;
628 pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
630 CERROR("error unpacking ptlrpc body\n");
633 if (ptlrpc_buf_need_swab(req, inout, offset)) {
634 lustre_swab_ptlrpc_body(pb);
635 ptlrpc_buf_set_swabbed(req, inout, offset);
638 if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
639 CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
644 pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
649 int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
651 switch (req->rq_reqmsg->lm_magic) {
652 case LUSTRE_MSG_MAGIC_V2:
653 return lustre_unpack_ptlrpc_body_v2(req, 1, offset);
655 CERROR("bad lustre msg magic: %08x\n",
656 req->rq_reqmsg->lm_magic);
661 int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
663 switch (req->rq_repmsg->lm_magic) {
664 case LUSTRE_MSG_MAGIC_V2:
665 return lustre_unpack_ptlrpc_body_v2(req, 0, offset);
667 CERROR("bad lustre msg magic: %08x\n",
668 req->rq_repmsg->lm_magic);
673 static inline __u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, __u32 n)
675 if (n >= m->lm_bufcount)
678 return m->lm_buflens[n];
682 * lustre_msg_buflen - return the length of buffer \a n in message \a m
683 * \param m lustre_msg (request or reply) to look at
684 * \param n message index (base 0)
686 * returns zero for non-existent message indices
688 __u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n)
690 switch (m->lm_magic) {
691 case LUSTRE_MSG_MAGIC_V2:
692 return lustre_msg_buflen_v2(m, n);
694 CERROR("incorrect message magic: %08x\n", m->lm_magic);
698 EXPORT_SYMBOL(lustre_msg_buflen);
701 lustre_msg_set_buflen_v2(struct lustre_msg_v2 *m, __u32 n, __u32 len)
703 if (n >= m->lm_bufcount)
706 m->lm_buflens[n] = len;
709 void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len)
711 switch (m->lm_magic) {
712 case LUSTRE_MSG_MAGIC_V2:
713 lustre_msg_set_buflen_v2(m, n, len);
716 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
720 /* NB return the bufcount for lustre_msg_v2 format, so if message is packed
721 * in V1 format, the result is one bigger. (add struct ptlrpc_body). */
722 __u32 lustre_msg_bufcount(struct lustre_msg *m)
724 switch (m->lm_magic) {
725 case LUSTRE_MSG_MAGIC_V2:
726 return m->lm_bufcount;
728 CERROR("incorrect message magic: %08x\n", m->lm_magic);
733 char *lustre_msg_string(struct lustre_msg *m, __u32 index, __u32 max_len)
735 /* max_len == 0 means the string should fill the buffer */
739 switch (m->lm_magic) {
740 case LUSTRE_MSG_MAGIC_V2:
741 str = lustre_msg_buf_v2(m, index, 0);
742 blen = lustre_msg_buflen_v2(m, index);
745 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
749 CERROR ("can't unpack string in msg %p buffer[%d]\n", m, index);
753 slen = strnlen(str, blen);
755 if (slen == blen) { /* not NULL terminated */
756 CERROR("can't unpack non-NULL terminated string in "
757 "msg %p buffer[%d] len %d\n", m, index, blen);
762 if (slen != blen - 1) {
763 CERROR("can't unpack short string in msg %p "
764 "buffer[%d] len %d: strlen %d\n",
765 m, index, blen, slen);
768 } else if (slen > max_len) {
769 CERROR("can't unpack oversized string in msg %p "
770 "buffer[%d] len %d strlen %d: max %d expected\n",
771 m, index, blen, slen, max_len);
778 /* Wrap up the normal fixed length cases */
779 static inline void *__lustre_swab_buf(struct lustre_msg *msg, __u32 index,
780 __u32 min_size, void *swabber)
784 LASSERT(msg != NULL);
785 switch (msg->lm_magic) {
786 case LUSTRE_MSG_MAGIC_V2:
787 ptr = lustre_msg_buf_v2(msg, index, min_size);
790 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
793 if (ptr != NULL && swabber != NULL)
794 ((void (*)(void *))swabber)(ptr);
799 static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
801 return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
802 sizeof(struct ptlrpc_body_v2));
805 enum lustre_msghdr lustre_msghdr_get_flags(struct lustre_msg *msg)
807 switch (msg->lm_magic) {
808 case LUSTRE_MSG_MAGIC_V2:
809 /* already in host endian */
810 return msg->lm_flags;
812 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
816 EXPORT_SYMBOL(lustre_msghdr_get_flags);
818 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
820 switch (msg->lm_magic) {
821 case LUSTRE_MSG_MAGIC_V2:
822 msg->lm_flags = flags;
825 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
829 __u32 lustre_msg_get_flags(struct lustre_msg *msg)
831 switch (msg->lm_magic) {
832 case LUSTRE_MSG_MAGIC_V2: {
833 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
837 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
841 /* flags might be printed in debug code while message
846 EXPORT_SYMBOL(lustre_msg_get_flags);
848 void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags)
850 switch (msg->lm_magic) {
851 case LUSTRE_MSG_MAGIC_V2: {
852 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
853 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
854 pb->pb_flags |= flags;
858 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
861 EXPORT_SYMBOL(lustre_msg_add_flags);
863 void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags)
865 switch (msg->lm_magic) {
866 case LUSTRE_MSG_MAGIC_V2: {
867 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
868 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
869 pb->pb_flags = flags;
873 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
877 void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags)
879 switch (msg->lm_magic) {
880 case LUSTRE_MSG_MAGIC_V2: {
881 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
882 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
883 pb->pb_flags &= ~flags;
888 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
891 EXPORT_SYMBOL(lustre_msg_clear_flags);
893 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
895 switch (msg->lm_magic) {
896 case LUSTRE_MSG_MAGIC_V2: {
897 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
899 return pb->pb_op_flags;
901 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
909 void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags)
911 switch (msg->lm_magic) {
912 case LUSTRE_MSG_MAGIC_V2: {
913 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
914 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
915 pb->pb_op_flags |= flags;
919 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
922 EXPORT_SYMBOL(lustre_msg_add_op_flags);
924 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
926 switch (msg->lm_magic) {
927 case LUSTRE_MSG_MAGIC_V2: {
928 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
930 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
933 return &pb->pb_handle;
936 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
941 __u32 lustre_msg_get_type(struct lustre_msg *msg)
943 switch (msg->lm_magic) {
944 case LUSTRE_MSG_MAGIC_V2: {
945 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
947 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
948 return PTL_RPC_MSG_ERR;
953 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
954 return PTL_RPC_MSG_ERR;
957 EXPORT_SYMBOL(lustre_msg_get_type);
959 enum lustre_msg_version lustre_msg_get_version(struct lustre_msg *msg)
961 switch (msg->lm_magic) {
962 case LUSTRE_MSG_MAGIC_V2: {
963 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
965 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
968 return pb->pb_version;
971 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
976 void lustre_msg_add_version(struct lustre_msg *msg, __u32 version)
978 switch (msg->lm_magic) {
979 case LUSTRE_MSG_MAGIC_V2: {
980 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
981 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
982 pb->pb_version |= version;
986 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
990 __u32 lustre_msg_get_opc(struct lustre_msg *msg)
992 switch (msg->lm_magic) {
993 case LUSTRE_MSG_MAGIC_V2: {
994 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
996 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1002 CERROR("incorrect message magic: %08x (msg:%p)\n",
1003 msg->lm_magic, msg);
1007 EXPORT_SYMBOL(lustre_msg_get_opc);
1009 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg)
1011 switch (msg->lm_magic) {
1012 case LUSTRE_MSG_MAGIC_V2: {
1013 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1015 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1018 return pb->pb_last_xid;
1021 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1025 EXPORT_SYMBOL(lustre_msg_get_last_xid);
1027 __u16 lustre_msg_get_tag(struct lustre_msg *msg)
1029 switch (msg->lm_magic) {
1030 case LUSTRE_MSG_MAGIC_V2: {
1031 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1033 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1039 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1043 EXPORT_SYMBOL(lustre_msg_get_tag);
1045 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
1047 switch (msg->lm_magic) {
1048 case LUSTRE_MSG_MAGIC_V2: {
1049 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1051 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1054 return pb->pb_last_committed;
1057 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1061 EXPORT_SYMBOL(lustre_msg_get_last_committed);
1063 __u64 *lustre_msg_get_versions(struct lustre_msg *msg)
1065 switch (msg->lm_magic) {
1066 case LUSTRE_MSG_MAGIC_V2: {
1067 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1069 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1072 return pb->pb_pre_versions;
1075 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1079 EXPORT_SYMBOL(lustre_msg_get_versions);
1081 __u64 lustre_msg_get_transno(struct lustre_msg *msg)
1083 switch (msg->lm_magic) {
1084 case LUSTRE_MSG_MAGIC_V2: {
1085 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1087 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1090 return pb->pb_transno;
1093 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1097 EXPORT_SYMBOL(lustre_msg_get_transno);
1099 int lustre_msg_get_status(struct lustre_msg *msg)
1101 switch (msg->lm_magic) {
1102 case LUSTRE_MSG_MAGIC_V2: {
1103 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1105 return pb->pb_status;
1106 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1110 /* status might be printed in debug code while message
1115 EXPORT_SYMBOL(lustre_msg_get_status);
1117 __u64 lustre_msg_get_slv(struct lustre_msg *msg)
1119 switch (msg->lm_magic) {
1120 case LUSTRE_MSG_MAGIC_V2: {
1121 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1123 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1129 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1135 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
1137 switch (msg->lm_magic) {
1138 case LUSTRE_MSG_MAGIC_V2: {
1139 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1141 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1148 CERROR("invalid msg magic %x\n", msg->lm_magic);
1153 __u32 lustre_msg_get_limit(struct lustre_msg *msg)
1155 switch (msg->lm_magic) {
1156 case LUSTRE_MSG_MAGIC_V2: {
1157 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1159 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1162 return pb->pb_limit;
1165 CERROR("invalid msg magic %x\n", msg->lm_magic);
1171 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
1173 switch (msg->lm_magic) {
1174 case LUSTRE_MSG_MAGIC_V2: {
1175 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1177 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1180 pb->pb_limit = limit;
1184 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1189 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
1191 switch (msg->lm_magic) {
1192 case LUSTRE_MSG_MAGIC_V2: {
1193 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1195 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1198 return pb->pb_conn_cnt;
1201 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1205 EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
1207 __u32 lustre_msg_get_magic(struct lustre_msg *msg)
1209 switch (msg->lm_magic) {
1210 case LUSTRE_MSG_MAGIC_V2:
1211 return msg->lm_magic;
1213 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1218 __u32 lustre_msg_get_timeout(struct lustre_msg *msg)
1220 switch (msg->lm_magic) {
1221 case LUSTRE_MSG_MAGIC_V2: {
1222 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1224 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1227 return pb->pb_timeout;
1230 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1235 __u32 lustre_msg_get_service_time(struct lustre_msg *msg)
1237 switch (msg->lm_magic) {
1238 case LUSTRE_MSG_MAGIC_V2: {
1239 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1241 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1244 return pb->pb_service_time;
1247 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1252 char *lustre_msg_get_jobid(struct lustre_msg *msg)
1254 switch (msg->lm_magic) {
1255 case LUSTRE_MSG_MAGIC_V2: {
1256 struct ptlrpc_body *pb =
1257 lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1258 sizeof(struct ptlrpc_body));
1262 return pb->pb_jobid;
1265 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1269 EXPORT_SYMBOL(lustre_msg_get_jobid);
1271 __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
1273 switch (msg->lm_magic) {
1274 case LUSTRE_MSG_MAGIC_V2:
1275 return msg->lm_cksum;
1277 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1282 __u64 lustre_msg_get_mbits(struct lustre_msg *msg)
1284 switch (msg->lm_magic) {
1285 case LUSTRE_MSG_MAGIC_V2: {
1286 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1288 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1291 return pb->pb_mbits;
1294 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1299 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
1301 switch (msg->lm_magic) {
1302 case LUSTRE_MSG_MAGIC_V2: {
1303 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1304 __u32 len = lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF);
1306 unsigned int hsize = 4;
1309 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1310 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
1311 len, NULL, 0, (unsigned char *)&crc,
1316 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1321 void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
1323 switch (msg->lm_magic) {
1324 case LUSTRE_MSG_MAGIC_V2: {
1325 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1326 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1327 pb->pb_handle = *handle;
1331 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1335 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
1337 switch (msg->lm_magic) {
1338 case LUSTRE_MSG_MAGIC_V2: {
1339 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1340 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1345 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1349 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
1351 switch (msg->lm_magic) {
1352 case LUSTRE_MSG_MAGIC_V2: {
1353 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1354 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1359 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1363 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid)
1365 switch (msg->lm_magic) {
1366 case LUSTRE_MSG_MAGIC_V2: {
1367 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1368 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1369 pb->pb_last_xid = last_xid;
1373 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1376 EXPORT_SYMBOL(lustre_msg_set_last_xid);
1378 void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
1380 switch (msg->lm_magic) {
1381 case LUSTRE_MSG_MAGIC_V2: {
1382 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1383 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1388 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1391 EXPORT_SYMBOL(lustre_msg_set_tag);
1393 void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed)
1395 switch (msg->lm_magic) {
1396 case LUSTRE_MSG_MAGIC_V2: {
1397 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1398 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1399 pb->pb_last_committed = last_committed;
1403 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1407 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
1409 switch (msg->lm_magic) {
1410 case LUSTRE_MSG_MAGIC_V2: {
1411 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1412 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1413 pb->pb_pre_versions[0] = versions[0];
1414 pb->pb_pre_versions[1] = versions[1];
1415 pb->pb_pre_versions[2] = versions[2];
1416 pb->pb_pre_versions[3] = versions[3];
1420 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1423 EXPORT_SYMBOL(lustre_msg_set_versions);
1425 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
1427 switch (msg->lm_magic) {
1428 case LUSTRE_MSG_MAGIC_V2: {
1429 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1430 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1431 pb->pb_transno = transno;
1435 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1438 EXPORT_SYMBOL(lustre_msg_set_transno);
1440 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
1442 switch (msg->lm_magic) {
1443 case LUSTRE_MSG_MAGIC_V2: {
1444 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1445 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1446 pb->pb_status = status;
1450 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1453 EXPORT_SYMBOL(lustre_msg_set_status);
1455 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
1457 switch (msg->lm_magic) {
1458 case LUSTRE_MSG_MAGIC_V2: {
1459 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1460 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1461 pb->pb_conn_cnt = conn_cnt;
1465 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1469 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout)
1471 switch (msg->lm_magic) {
1472 case LUSTRE_MSG_MAGIC_V2: {
1473 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1474 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1475 pb->pb_timeout = timeout;
1479 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1483 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time)
1485 switch (msg->lm_magic) {
1486 case LUSTRE_MSG_MAGIC_V2: {
1487 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1488 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1489 pb->pb_service_time = service_time;
1493 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1497 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
1499 switch (msg->lm_magic) {
1500 case LUSTRE_MSG_MAGIC_V2: {
1501 __u32 opc = lustre_msg_get_opc(msg);
1502 struct ptlrpc_body *pb;
1504 /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
1505 * See the comment in ptlrpc_request_pack(). */
1506 if (!opc || opc == LDLM_BL_CALLBACK ||
1507 opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
1510 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1511 sizeof(struct ptlrpc_body));
1512 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1515 memcpy(pb->pb_jobid, jobid, sizeof(pb->pb_jobid));
1516 else if (pb->pb_jobid[0] == '\0')
1517 lustre_get_jobid(pb->pb_jobid, sizeof(pb->pb_jobid));
1521 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1524 EXPORT_SYMBOL(lustre_msg_set_jobid);
1526 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
1528 switch (msg->lm_magic) {
1529 case LUSTRE_MSG_MAGIC_V2:
1530 msg->lm_cksum = cksum;
1533 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1537 void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
1539 switch (msg->lm_magic) {
1540 case LUSTRE_MSG_MAGIC_V2: {
1541 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1543 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1544 pb->pb_mbits = mbits;
1548 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1552 void ptlrpc_request_set_replen(struct ptlrpc_request *req)
1554 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
1556 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
1557 req->rq_pill.rc_area[RCL_SERVER]);
1558 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1559 req->rq_reqmsg->lm_repsize = req->rq_replen;
1561 EXPORT_SYMBOL(ptlrpc_request_set_replen);
1563 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *lens)
1565 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens);
1566 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1567 req->rq_reqmsg->lm_repsize = req->rq_replen;
1571 * Send a remote set_info_async.
1573 * This may go from client to server or server to client.
1575 int do_set_info_async(struct obd_import *imp,
1576 int opcode, int version,
1577 size_t keylen, void *key,
1578 size_t vallen, void *val,
1579 struct ptlrpc_request_set *set)
1581 struct ptlrpc_request *req;
1586 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
1590 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
1591 RCL_CLIENT, keylen);
1592 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
1593 RCL_CLIENT, vallen);
1594 rc = ptlrpc_request_pack(req, version, opcode);
1596 ptlrpc_request_free(req);
1600 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1601 memcpy(tmp, key, keylen);
1602 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1603 memcpy(tmp, val, vallen);
1605 ptlrpc_request_set_replen(req);
1608 ptlrpc_set_add_req(set, req);
1609 ptlrpc_check_set(NULL, set);
1611 rc = ptlrpc_queue_wait(req);
1612 ptlrpc_req_finished(req);
1617 EXPORT_SYMBOL(do_set_info_async);
1619 /* byte flipping routines for all wire types declared in
1620 * lustre_idl.h implemented here.
1622 void lustre_swab_ptlrpc_body(struct ptlrpc_body *body)
1624 __swab32s(&body->pb_type);
1625 __swab32s(&body->pb_version);
1626 __swab32s(&body->pb_opc);
1627 __swab32s(&body->pb_status);
1628 __swab64s(&body->pb_last_xid);
1629 __swab16s(&body->pb_tag);
1630 CLASSERT(offsetof(typeof(*body), pb_padding0) != 0);
1631 CLASSERT(offsetof(typeof(*body), pb_padding1) != 0);
1632 __swab64s(&body->pb_last_committed);
1633 __swab64s(&body->pb_transno);
1634 __swab32s(&body->pb_flags);
1635 __swab32s(&body->pb_op_flags);
1636 __swab32s(&body->pb_conn_cnt);
1637 __swab32s(&body->pb_timeout);
1638 __swab32s(&body->pb_service_time);
1639 __swab32s(&body->pb_limit);
1640 __swab64s(&body->pb_slv);
1641 __swab64s(&body->pb_pre_versions[0]);
1642 __swab64s(&body->pb_pre_versions[1]);
1643 __swab64s(&body->pb_pre_versions[2]);
1644 __swab64s(&body->pb_pre_versions[3]);
1645 __swab64s(&body->pb_mbits);
1646 CLASSERT(offsetof(typeof(*body), pb_padding64_0) != 0);
1647 CLASSERT(offsetof(typeof(*body), pb_padding64_1) != 0);
1648 CLASSERT(offsetof(typeof(*body), pb_padding64_2) != 0);
1649 /* While we need to maintain compatibility between
1650 * clients and servers without ptlrpc_body_v2 (< 2.3)
1651 * do not swab any fields beyond pb_jobid, as we are
1652 * using this swab function for both ptlrpc_body
1653 * and ptlrpc_body_v2. */
1654 /* pb_jobid is an ASCII string and should not be swabbed */
1655 CLASSERT(offsetof(typeof(*body), pb_jobid) != 0);
1658 void lustre_swab_connect(struct obd_connect_data *ocd)
1660 __swab64s(&ocd->ocd_connect_flags);
1661 __swab32s(&ocd->ocd_version);
1662 __swab32s(&ocd->ocd_grant);
1663 __swab64s(&ocd->ocd_ibits_known);
1664 __swab32s(&ocd->ocd_index);
1665 __swab32s(&ocd->ocd_brw_size);
1666 /* ocd_blocksize and ocd_inodespace don't need to be swabbed because
1667 * they are 8-byte values */
1668 __swab16s(&ocd->ocd_grant_tax_kb);
1669 __swab32s(&ocd->ocd_grant_max_blks);
1670 __swab64s(&ocd->ocd_transno);
1671 __swab32s(&ocd->ocd_group);
1672 __swab32s(&ocd->ocd_cksum_types);
1673 __swab32s(&ocd->ocd_instance);
1674 /* Fields after ocd_cksum_types are only accessible by the receiver
1675 * if the corresponding flag in ocd_connect_flags is set. Accessing
1676 * any field after ocd_maxbytes on the receiver without a valid flag
1677 * may result in out-of-bound memory access and kernel oops. */
1678 if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
1679 __swab32s(&ocd->ocd_max_easize);
1680 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
1681 __swab64s(&ocd->ocd_maxbytes);
1682 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1683 __swab16s(&ocd->ocd_maxmodrpcs);
1684 CLASSERT(offsetof(typeof(*ocd), padding0) != 0);
1685 CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
1686 if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
1687 __swab64s(&ocd->ocd_connect_flags2);
1688 CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
1689 CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
1690 CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
1691 CLASSERT(offsetof(typeof(*ocd), padding6) != 0);
1692 CLASSERT(offsetof(typeof(*ocd), padding7) != 0);
1693 CLASSERT(offsetof(typeof(*ocd), padding8) != 0);
1694 CLASSERT(offsetof(typeof(*ocd), padding9) != 0);
1695 CLASSERT(offsetof(typeof(*ocd), paddingA) != 0);
1696 CLASSERT(offsetof(typeof(*ocd), paddingB) != 0);
1697 CLASSERT(offsetof(typeof(*ocd), paddingC) != 0);
1698 CLASSERT(offsetof(typeof(*ocd), paddingD) != 0);
1699 CLASSERT(offsetof(typeof(*ocd), paddingE) != 0);
1700 CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
1703 static void lustre_swab_ost_layout(struct ost_layout *ol)
1705 __swab32s(&ol->ol_stripe_size);
1706 __swab32s(&ol->ol_stripe_count);
1707 __swab64s(&ol->ol_comp_start);
1708 __swab64s(&ol->ol_comp_end);
1709 __swab32s(&ol->ol_comp_id);
1712 void lustre_swab_obdo (struct obdo *o)
1714 __swab64s(&o->o_valid);
1715 lustre_swab_ost_id(&o->o_oi);
1716 __swab64s(&o->o_parent_seq);
1717 __swab64s(&o->o_size);
1718 __swab64s(&o->o_mtime);
1719 __swab64s(&o->o_atime);
1720 __swab64s(&o->o_ctime);
1721 __swab64s(&o->o_blocks);
1722 __swab64s(&o->o_grant);
1723 __swab32s(&o->o_blksize);
1724 __swab32s(&o->o_mode);
1725 __swab32s(&o->o_uid);
1726 __swab32s(&o->o_gid);
1727 __swab32s(&o->o_flags);
1728 __swab32s(&o->o_nlink);
1729 __swab32s(&o->o_parent_oid);
1730 __swab32s(&o->o_misc);
1731 __swab64s(&o->o_ioepoch);
1732 __swab32s(&o->o_stripe_idx);
1733 __swab32s(&o->o_parent_ver);
1734 lustre_swab_ost_layout(&o->o_layout);
1735 __swab32s(&o->o_layout_version);
1736 __swab32s(&o->o_uid_h);
1737 __swab32s(&o->o_gid_h);
1738 __swab64s(&o->o_data_version);
1739 __swab32s(&o->o_projid);
1740 CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
1741 CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
1742 CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
1745 EXPORT_SYMBOL(lustre_swab_obdo);
1747 void lustre_swab_obd_statfs (struct obd_statfs *os)
1749 __swab64s(&os->os_type);
1750 __swab64s(&os->os_blocks);
1751 __swab64s(&os->os_bfree);
1752 __swab64s(&os->os_bavail);
1753 __swab64s(&os->os_files);
1754 __swab64s(&os->os_ffree);
1755 /* no need to swab os_fsid */
1756 __swab32s(&os->os_bsize);
1757 __swab32s(&os->os_namelen);
1758 __swab64s(&os->os_maxbytes);
1759 __swab32s(&os->os_state);
1760 __swab32s(&os->os_fprecreated);
1761 __swab32s(&os->os_granted);
1762 CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
1763 CLASSERT(offsetof(typeof(*os), os_spare4) != 0);
1764 CLASSERT(offsetof(typeof(*os), os_spare5) != 0);
1765 CLASSERT(offsetof(typeof(*os), os_spare6) != 0);
1766 CLASSERT(offsetof(typeof(*os), os_spare7) != 0);
1767 CLASSERT(offsetof(typeof(*os), os_spare8) != 0);
1768 CLASSERT(offsetof(typeof(*os), os_spare9) != 0);
1771 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
1773 lustre_swab_ost_id(&ioo->ioo_oid);
1774 __swab32s(&ioo->ioo_max_brw);
1775 __swab32s(&ioo->ioo_bufcnt);
1778 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
1780 __swab64s(&nbr->rnb_offset);
1781 __swab32s(&nbr->rnb_len);
1782 __swab32s(&nbr->rnb_flags);
1785 void lustre_swab_ost_body (struct ost_body *b)
1787 lustre_swab_obdo (&b->oa);
1790 void lustre_swab_ost_last_id(u64 *id)
1795 void lustre_swab_generic_32s(__u32 *val)
1800 void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *desc)
1802 lustre_swab_lu_fid(&desc->gl_id.qid_fid);
1803 __swab64s(&desc->gl_flags);
1804 __swab64s(&desc->gl_ver);
1805 __swab64s(&desc->gl_hardlimit);
1806 __swab64s(&desc->gl_softlimit);
1807 __swab64s(&desc->gl_time);
1808 CLASSERT(offsetof(typeof(*desc), gl_pad2) != 0);
1810 EXPORT_SYMBOL(lustre_swab_gl_lquota_desc);
1812 void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *desc)
1814 __swab32s(&desc->lgbd_status);
1815 __swab32s(&desc->lgbd_timeout);
1816 CLASSERT(offsetof(typeof(*desc), lgbd_padding) != 0);
1818 EXPORT_SYMBOL(lustre_swab_gl_barrier_desc);
1820 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
1822 __swab64s(&lvb->lvb_size);
1823 __swab64s(&lvb->lvb_mtime);
1824 __swab64s(&lvb->lvb_atime);
1825 __swab64s(&lvb->lvb_ctime);
1826 __swab64s(&lvb->lvb_blocks);
1828 EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
1830 void lustre_swab_ost_lvb(struct ost_lvb *lvb)
1832 __swab64s(&lvb->lvb_size);
1833 __swab64s(&lvb->lvb_mtime);
1834 __swab64s(&lvb->lvb_atime);
1835 __swab64s(&lvb->lvb_ctime);
1836 __swab64s(&lvb->lvb_blocks);
1837 __swab32s(&lvb->lvb_mtime_ns);
1838 __swab32s(&lvb->lvb_atime_ns);
1839 __swab32s(&lvb->lvb_ctime_ns);
1840 __swab32s(&lvb->lvb_padding);
1842 EXPORT_SYMBOL(lustre_swab_ost_lvb);
1844 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
1846 __swab64s(&lvb->lvb_flags);
1847 __swab64s(&lvb->lvb_id_may_rel);
1848 __swab64s(&lvb->lvb_id_rel);
1849 __swab64s(&lvb->lvb_id_qunit);
1850 __swab64s(&lvb->lvb_pad1);
1852 EXPORT_SYMBOL(lustre_swab_lquota_lvb);
1854 void lustre_swab_barrier_lvb(struct barrier_lvb *lvb)
1856 __swab32s(&lvb->lvb_status);
1857 __swab32s(&lvb->lvb_index);
1858 CLASSERT(offsetof(typeof(*lvb), lvb_padding) != 0);
1860 EXPORT_SYMBOL(lustre_swab_barrier_lvb);
1862 void lustre_swab_mdt_body (struct mdt_body *b)
1864 lustre_swab_lu_fid(&b->mbo_fid1);
1865 lustre_swab_lu_fid(&b->mbo_fid2);
1866 /* handle is opaque */
1867 __swab64s(&b->mbo_valid);
1868 __swab64s(&b->mbo_size);
1869 __swab64s(&b->mbo_mtime);
1870 __swab64s(&b->mbo_atime);
1871 __swab64s(&b->mbo_ctime);
1872 __swab64s(&b->mbo_blocks);
1873 __swab64s(&b->mbo_version);
1874 __swab64s(&b->mbo_t_state);
1875 __swab32s(&b->mbo_fsuid);
1876 __swab32s(&b->mbo_fsgid);
1877 __swab32s(&b->mbo_capability);
1878 __swab32s(&b->mbo_mode);
1879 __swab32s(&b->mbo_uid);
1880 __swab32s(&b->mbo_gid);
1881 __swab32s(&b->mbo_flags);
1882 __swab32s(&b->mbo_rdev);
1883 __swab32s(&b->mbo_nlink);
1884 __swab32s(&b->mbo_layout_gen);
1885 __swab32s(&b->mbo_suppgid);
1886 __swab32s(&b->mbo_eadatasize);
1887 __swab32s(&b->mbo_aclsize);
1888 __swab32s(&b->mbo_max_mdsize);
1889 CLASSERT(offsetof(typeof(*b), mbo_unused3) != 0);
1890 __swab32s(&b->mbo_uid_h);
1891 __swab32s(&b->mbo_gid_h);
1892 __swab32s(&b->mbo_projid);
1893 __swab64s(&b->mbo_dom_size);
1894 __swab64s(&b->mbo_dom_blocks);
1895 CLASSERT(offsetof(typeof(*b), mbo_padding_8) != 0);
1896 CLASSERT(offsetof(typeof(*b), mbo_padding_9) != 0);
1897 CLASSERT(offsetof(typeof(*b), mbo_padding_10) != 0);
1900 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
1902 /* mio_open_handle is opaque */
1903 CLASSERT(offsetof(typeof(*b), mio_unused1) != 0);
1904 CLASSERT(offsetof(typeof(*b), mio_unused2) != 0);
1905 CLASSERT(offsetof(typeof(*b), mio_padding) != 0);
1908 void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
1912 __swab32s(&mti->mti_lustre_ver);
1913 __swab32s(&mti->mti_stripe_index);
1914 __swab32s(&mti->mti_config_ver);
1915 __swab32s(&mti->mti_flags);
1916 __swab32s(&mti->mti_instance);
1917 __swab32s(&mti->mti_nid_count);
1918 CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
1919 for (i = 0; i < MTI_NIDS_MAX; i++)
1920 __swab64s(&mti->mti_nids[i]);
1923 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
1927 __swab64s(&entry->mne_version);
1928 __swab32s(&entry->mne_instance);
1929 __swab32s(&entry->mne_index);
1930 __swab32s(&entry->mne_length);
1932 /* mne_nid_(count|type) must be one byte size because we're gonna
1933 * access it w/o swapping. */
1934 CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
1935 CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
1937 /* remove this assertion if ipv6 is supported. */
1938 LASSERT(entry->mne_nid_type == 0);
1939 for (i = 0; i < entry->mne_nid_count; i++) {
1940 CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
1941 __swab64s(&entry->u.nids[i]);
1944 EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
1946 void lustre_swab_mgs_config_body(struct mgs_config_body *body)
1948 __swab64s(&body->mcb_offset);
1949 __swab32s(&body->mcb_units);
1950 __swab16s(&body->mcb_type);
1953 void lustre_swab_mgs_config_res(struct mgs_config_res *body)
1955 __swab64s(&body->mcr_offset);
1956 __swab64s(&body->mcr_size);
1959 static void lustre_swab_obd_dqinfo (struct obd_dqinfo *i)
1961 __swab64s (&i->dqi_bgrace);
1962 __swab64s (&i->dqi_igrace);
1963 __swab32s (&i->dqi_flags);
1964 __swab32s (&i->dqi_valid);
1967 static void lustre_swab_obd_dqblk (struct obd_dqblk *b)
1969 __swab64s (&b->dqb_ihardlimit);
1970 __swab64s (&b->dqb_isoftlimit);
1971 __swab64s (&b->dqb_curinodes);
1972 __swab64s (&b->dqb_bhardlimit);
1973 __swab64s (&b->dqb_bsoftlimit);
1974 __swab64s (&b->dqb_curspace);
1975 __swab64s (&b->dqb_btime);
1976 __swab64s (&b->dqb_itime);
1977 __swab32s (&b->dqb_valid);
1978 CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
1981 void lustre_swab_obd_quotactl (struct obd_quotactl *q)
1983 __swab32s (&q->qc_cmd);
1984 __swab32s (&q->qc_type);
1985 __swab32s (&q->qc_id);
1986 __swab32s (&q->qc_stat);
1987 lustre_swab_obd_dqinfo (&q->qc_dqinfo);
1988 lustre_swab_obd_dqblk (&q->qc_dqblk);
1991 void lustre_swab_fid2path(struct getinfo_fid2path *gf)
1993 lustre_swab_lu_fid(&gf->gf_fid);
1994 __swab64s(&gf->gf_recno);
1995 __swab32s(&gf->gf_linkno);
1996 __swab32s(&gf->gf_pathlen);
1998 EXPORT_SYMBOL(lustre_swab_fid2path);
2000 static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
2002 __swab64s(&fm_extent->fe_logical);
2003 __swab64s(&fm_extent->fe_physical);
2004 __swab64s(&fm_extent->fe_length);
2005 __swab32s(&fm_extent->fe_flags);
2006 __swab32s(&fm_extent->fe_device);
2009 void lustre_swab_fiemap(struct fiemap *fiemap)
2013 __swab64s(&fiemap->fm_start);
2014 __swab64s(&fiemap->fm_length);
2015 __swab32s(&fiemap->fm_flags);
2016 __swab32s(&fiemap->fm_mapped_extents);
2017 __swab32s(&fiemap->fm_extent_count);
2018 __swab32s(&fiemap->fm_reserved);
2020 for (i = 0; i < fiemap->fm_mapped_extents; i++)
2021 lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
2024 void lustre_swab_idx_info(struct idx_info *ii)
2026 __swab32s(&ii->ii_magic);
2027 __swab32s(&ii->ii_flags);
2028 __swab16s(&ii->ii_count);
2029 __swab32s(&ii->ii_attrs);
2030 lustre_swab_lu_fid(&ii->ii_fid);
2031 __swab64s(&ii->ii_version);
2032 __swab64s(&ii->ii_hash_start);
2033 __swab64s(&ii->ii_hash_end);
2034 __swab16s(&ii->ii_keysize);
2035 __swab16s(&ii->ii_recsize);
2038 void lustre_swab_lip_header(struct lu_idxpage *lip)
2041 __swab32s(&lip->lip_magic);
2042 __swab16s(&lip->lip_flags);
2043 __swab16s(&lip->lip_nr);
2045 EXPORT_SYMBOL(lustre_swab_lip_header);
2047 void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
2049 __swab32s(&rr->rr_opcode);
2050 __swab32s(&rr->rr_cap);
2051 __swab32s(&rr->rr_fsuid);
2052 /* rr_fsuid_h is unused */
2053 __swab32s(&rr->rr_fsgid);
2054 /* rr_fsgid_h is unused */
2055 __swab32s(&rr->rr_suppgid1);
2056 /* rr_suppgid1_h is unused */
2057 __swab32s(&rr->rr_suppgid2);
2058 /* rr_suppgid2_h is unused */
2059 lustre_swab_lu_fid(&rr->rr_fid1);
2060 lustre_swab_lu_fid(&rr->rr_fid2);
2061 __swab64s(&rr->rr_mtime);
2062 __swab64s(&rr->rr_atime);
2063 __swab64s(&rr->rr_ctime);
2064 __swab64s(&rr->rr_size);
2065 __swab64s(&rr->rr_blocks);
2066 __swab32s(&rr->rr_bias);
2067 __swab32s(&rr->rr_mode);
2068 __swab32s(&rr->rr_flags);
2069 __swab32s(&rr->rr_flags_h);
2070 __swab32s(&rr->rr_umask);
2071 __swab16s(&rr->rr_mirror_id);
2073 CLASSERT(offsetof(typeof(*rr), rr_padding_4) != 0);
2076 void lustre_swab_lov_desc (struct lov_desc *ld)
2078 __swab32s (&ld->ld_tgt_count);
2079 __swab32s (&ld->ld_active_tgt_count);
2080 __swab32s (&ld->ld_default_stripe_count);
2081 __swab32s (&ld->ld_pattern);
2082 __swab64s (&ld->ld_default_stripe_size);
2083 __swab64s (&ld->ld_default_stripe_offset);
2084 __swab32s (&ld->ld_qos_maxage);
2085 /* uuid endian insensitive */
2087 EXPORT_SYMBOL(lustre_swab_lov_desc);
2089 void lustre_swab_lmv_desc (struct lmv_desc *ld)
2091 __swab32s (&ld->ld_tgt_count);
2092 __swab32s (&ld->ld_active_tgt_count);
2093 __swab32s (&ld->ld_default_stripe_count);
2094 __swab32s (&ld->ld_pattern);
2095 __swab64s (&ld->ld_default_hash_size);
2096 __swab32s (&ld->ld_qos_maxage);
2097 /* uuid endian insensitive */
2100 /* This structure is always in little-endian */
2101 static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
2105 __swab32s(&lmm1->lmv_magic);
2106 __swab32s(&lmm1->lmv_stripe_count);
2107 __swab32s(&lmm1->lmv_master_mdt_index);
2108 __swab32s(&lmm1->lmv_hash_type);
2109 __swab32s(&lmm1->lmv_layout_version);
2110 for (i = 0; i < lmm1->lmv_stripe_count; i++)
2111 lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
2114 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
2116 switch (lmm->lmv_magic) {
2118 lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
2124 EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
2126 void lustre_swab_lmv_user_md_objects(struct lmv_user_mds_data *lmd,
2131 for (i = 0; i < stripe_count; i++)
2132 __swab32s(&(lmd[i].lum_mds));
2134 EXPORT_SYMBOL(lustre_swab_lmv_user_md_objects);
2137 void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
2141 if (lum->lum_magic == LMV_MAGIC_FOREIGN) {
2142 __swab32s(&lum->lum_magic);
2143 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_length);
2147 count = lum->lum_stripe_count;
2148 __swab32s(&lum->lum_magic);
2149 __swab32s(&lum->lum_stripe_count);
2150 __swab32s(&lum->lum_stripe_offset);
2151 __swab32s(&lum->lum_hash_type);
2152 __swab32s(&lum->lum_type);
2153 CLASSERT(offsetof(typeof(*lum), lum_padding1) != 0);
2154 switch (lum->lum_magic) {
2155 case LMV_USER_MAGIC_SPECIFIC:
2156 count = lum->lum_stripe_count;
2157 case __swab32(LMV_USER_MAGIC_SPECIFIC):
2158 lustre_swab_lmv_user_md_objects(lum->lum_objects, count);
2164 EXPORT_SYMBOL(lustre_swab_lmv_user_md);
2166 static void lustre_print_v1v3(unsigned int lvl, struct lov_user_md *lum,
2169 CDEBUG(lvl, "%s lov_user_md %p:\n", msg, lum);
2170 CDEBUG(lvl, "\tlmm_magic: %#x\n", lum->lmm_magic);
2171 CDEBUG(lvl, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
2172 CDEBUG(lvl, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
2173 CDEBUG(lvl, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
2174 CDEBUG(lvl, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
2175 CDEBUG(lvl, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
2176 CDEBUG(lvl, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
2177 lum->lmm_stripe_offset);
2178 if (lum->lmm_magic == LOV_USER_MAGIC_V3) {
2179 struct lov_user_md_v3 *v3 = (void *)lum;
2180 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2182 if (lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2183 struct lov_user_md_v3 *v3 = (void *)lum;
2186 if (v3->lmm_pool_name[0] != '\0')
2187 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2189 CDEBUG(lvl, "\ttarget list:\n");
2190 for (i = 0; i < v3->lmm_stripe_count; i++)
2191 CDEBUG(lvl, "\t\t%u\n", v3->lmm_objects[i].l_ost_idx);
2195 void lustre_print_user_md(unsigned int lvl, struct lov_user_md *lum,
2198 struct lov_comp_md_v1 *comp_v1;
2201 if (likely(!cfs_cdebug_show(lvl, DEBUG_SUBSYSTEM)))
2204 if (lum->lmm_magic == LOV_USER_MAGIC_V1 ||
2205 lum->lmm_magic == LOV_USER_MAGIC_V3) {
2206 lustre_print_v1v3(lvl, lum, msg);
2210 if (lum->lmm_magic != LOV_USER_MAGIC_COMP_V1) {
2211 CDEBUG(lvl, "%s: bad magic: %x\n", msg, lum->lmm_magic);
2215 comp_v1 = (struct lov_comp_md_v1 *)lum;
2216 CDEBUG(lvl, "%s: lov_comp_md_v1 %p:\n", msg, lum);
2217 CDEBUG(lvl, "\tlcm_magic: %#x\n", comp_v1->lcm_magic);
2218 CDEBUG(lvl, "\tlcm_size: %#x\n", comp_v1->lcm_size);
2219 CDEBUG(lvl, "\tlcm_layout_gen: %#x\n", comp_v1->lcm_layout_gen);
2220 CDEBUG(lvl, "\tlcm_flags: %#x\n", comp_v1->lcm_flags);
2221 CDEBUG(lvl, "\tlcm_entry_count: %#x\n\n", comp_v1->lcm_entry_count);
2222 CDEBUG(lvl, "\tlcm_mirror_count: %#x\n\n", comp_v1->lcm_mirror_count);
2224 for (i = 0; i < comp_v1->lcm_entry_count; i++) {
2225 struct lov_comp_md_entry_v1 *ent = &comp_v1->lcm_entries[i];
2226 struct lov_user_md *v1;
2228 CDEBUG(lvl, "\tentry %d:\n", i);
2229 CDEBUG(lvl, "\tlcme_id: %#x\n", ent->lcme_id);
2230 CDEBUG(lvl, "\tlcme_flags: %#x\n", ent->lcme_flags);
2231 if (ent->lcme_flags & LCME_FL_NOSYNC)
2232 CDEBUG(lvl, "\tlcme_timestamp: %llu\n",
2233 ent->lcme_timestamp);
2234 CDEBUG(lvl, "\tlcme_extent.e_start: %llu\n",
2235 ent->lcme_extent.e_start);
2236 CDEBUG(lvl, "\tlcme_extent.e_end: %llu\n",
2237 ent->lcme_extent.e_end);
2238 CDEBUG(lvl, "\tlcme_offset: %#x\n", ent->lcme_offset);
2239 CDEBUG(lvl, "\tlcme_size: %#x\n\n", ent->lcme_size);
2241 v1 = (struct lov_user_md *)((char *)comp_v1 +
2242 comp_v1->lcm_entries[i].lcme_offset);
2243 lustre_print_v1v3(lvl, v1, msg);
2246 EXPORT_SYMBOL(lustre_print_user_md);
2248 static void lustre_swab_lmm_oi(struct ost_id *oi)
2250 __swab64s(&oi->oi.oi_id);
2251 __swab64s(&oi->oi.oi_seq);
2254 static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
2257 __swab32s(&lum->lmm_magic);
2258 __swab32s(&lum->lmm_pattern);
2259 lustre_swab_lmm_oi(&lum->lmm_oi);
2260 __swab32s(&lum->lmm_stripe_size);
2261 __swab16s(&lum->lmm_stripe_count);
2262 __swab16s(&lum->lmm_stripe_offset);
2266 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
2269 CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
2270 lustre_swab_lov_user_md_common(lum);
2273 EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
2275 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
2278 CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
2279 lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
2280 /* lmm_pool_name nothing to do with char */
2283 EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
2285 void lustre_swab_lov_comp_md_v1(struct lov_comp_md_v1 *lum)
2287 struct lov_comp_md_entry_v1 *ent;
2288 struct lov_user_md_v1 *v1;
2289 struct lov_user_md_v3 *v3;
2293 __u16 ent_count, stripe_count;
2296 cpu_endian = lum->lcm_magic == LOV_USER_MAGIC_COMP_V1;
2297 ent_count = lum->lcm_entry_count;
2299 __swab16s(&ent_count);
2301 CDEBUG(D_IOCTL, "swabbing lov_user_comp_md v1\n");
2302 __swab32s(&lum->lcm_magic);
2303 __swab32s(&lum->lcm_size);
2304 __swab32s(&lum->lcm_layout_gen);
2305 __swab16s(&lum->lcm_flags);
2306 __swab16s(&lum->lcm_entry_count);
2307 __swab16s(&lum->lcm_mirror_count);
2308 CLASSERT(offsetof(typeof(*lum), lcm_padding1) != 0);
2309 CLASSERT(offsetof(typeof(*lum), lcm_padding2) != 0);
2311 for (i = 0; i < ent_count; i++) {
2312 ent = &lum->lcm_entries[i];
2313 off = ent->lcme_offset;
2314 size = ent->lcme_size;
2320 __swab32s(&ent->lcme_id);
2321 __swab32s(&ent->lcme_flags);
2322 __swab64s(&ent->lcme_timestamp);
2323 __swab64s(&ent->lcme_extent.e_start);
2324 __swab64s(&ent->lcme_extent.e_end);
2325 __swab32s(&ent->lcme_offset);
2326 __swab32s(&ent->lcme_size);
2327 __swab32s(&ent->lcme_layout_gen);
2328 CLASSERT(offsetof(typeof(*ent), lcme_padding_1) != 0);
2330 v1 = (struct lov_user_md_v1 *)((char *)lum + off);
2331 stripe_count = v1->lmm_stripe_count;
2333 __swab16s(&stripe_count);
2335 if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V1) ||
2336 v1->lmm_magic == LOV_USER_MAGIC_V1) {
2337 lustre_swab_lov_user_md_v1(v1);
2338 if (size > sizeof(*v1))
2339 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2341 } else if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V3) ||
2342 v1->lmm_magic == LOV_USER_MAGIC_V3 ||
2343 v1->lmm_magic == __swab32(LOV_USER_MAGIC_SPECIFIC) ||
2344 v1->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2345 v3 = (struct lov_user_md_v3 *)v1;
2346 lustre_swab_lov_user_md_v3(v3);
2347 if (size > sizeof(*v3))
2348 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2351 CERROR("Invalid magic %#x\n", v1->lmm_magic);
2355 EXPORT_SYMBOL(lustre_swab_lov_comp_md_v1);
2357 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
2360 CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
2361 __swab32s(&lmm->lmm_magic);
2362 __swab32s(&lmm->lmm_pattern);
2363 lustre_swab_lmm_oi(&lmm->lmm_oi);
2364 __swab32s(&lmm->lmm_stripe_size);
2365 __swab16s(&lmm->lmm_stripe_count);
2366 __swab16s(&lmm->lmm_layout_gen);
2369 EXPORT_SYMBOL(lustre_swab_lov_mds_md);
2371 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
2376 for (i = 0; i < stripe_count; i++) {
2377 lustre_swab_ost_id(&(lod[i].l_ost_oi));
2378 __swab32s(&(lod[i].l_ost_gen));
2379 __swab32s(&(lod[i].l_ost_idx));
2383 EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
2385 void lustre_swab_ldlm_res_id (struct ldlm_res_id *id)
2389 for (i = 0; i < RES_NAME_SIZE; i++)
2390 __swab64s (&id->name[i]);
2393 void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
2395 /* the lock data is a union and the first two fields are always an
2396 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
2397 * data the same way. */
2398 __swab64s(&d->l_extent.start);
2399 __swab64s(&d->l_extent.end);
2400 __swab64s(&d->l_extent.gid);
2401 __swab64s(&d->l_flock.lfw_owner);
2402 __swab32s(&d->l_flock.lfw_pid);
2405 void lustre_swab_ldlm_intent (struct ldlm_intent *i)
2410 void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
2412 __swab32s(&r->lr_type);
2413 CLASSERT(offsetof(typeof(*r), lr_pad) != 0);
2414 lustre_swab_ldlm_res_id(&r->lr_name);
2417 void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l)
2419 lustre_swab_ldlm_resource_desc (&l->l_resource);
2420 __swab32s (&l->l_req_mode);
2421 __swab32s (&l->l_granted_mode);
2422 lustre_swab_ldlm_policy_data (&l->l_policy_data);
2425 void lustre_swab_ldlm_request (struct ldlm_request *rq)
2427 __swab32s (&rq->lock_flags);
2428 lustre_swab_ldlm_lock_desc (&rq->lock_desc);
2429 __swab32s (&rq->lock_count);
2430 /* lock_handle[] opaque */
2433 void lustre_swab_ldlm_reply (struct ldlm_reply *r)
2435 __swab32s (&r->lock_flags);
2436 CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
2437 lustre_swab_ldlm_lock_desc (&r->lock_desc);
2438 /* lock_handle opaque */
2439 __swab64s (&r->lock_policy_res1);
2440 __swab64s (&r->lock_policy_res2);
2443 void lustre_swab_quota_body(struct quota_body *b)
2445 lustre_swab_lu_fid(&b->qb_fid);
2446 lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
2447 __swab32s(&b->qb_flags);
2448 __swab64s(&b->qb_count);
2449 __swab64s(&b->qb_usage);
2450 __swab64s(&b->qb_slv_ver);
2453 /* Dump functions */
2454 void dump_ioo(struct obd_ioobj *ioo)
2457 "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
2458 "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
2462 void dump_rniobuf(struct niobuf_remote *nb)
2464 CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
2465 nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
2468 void dump_obdo(struct obdo *oa)
2470 u64 valid = oa->o_valid;
2472 CDEBUG(D_RPCTRACE, "obdo: o_valid = %#llx\n", valid);
2473 if (valid & OBD_MD_FLID)
2474 CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
2475 if (valid & OBD_MD_FLFID)
2476 CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
2478 if (valid & OBD_MD_FLSIZE)
2479 CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
2480 if (valid & OBD_MD_FLMTIME)
2481 CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
2482 if (valid & OBD_MD_FLATIME)
2483 CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
2484 if (valid & OBD_MD_FLCTIME)
2485 CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
2486 if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
2487 CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
2488 if (valid & OBD_MD_FLGRANT)
2489 CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
2490 if (valid & OBD_MD_FLBLKSZ)
2491 CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
2492 if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
2493 CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
2494 oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
2495 (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
2496 if (valid & OBD_MD_FLUID)
2497 CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
2498 if (valid & OBD_MD_FLUID)
2499 CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
2500 if (valid & OBD_MD_FLGID)
2501 CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
2502 if (valid & OBD_MD_FLGID)
2503 CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
2504 if (valid & OBD_MD_FLFLAGS)
2505 CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
2506 if (valid & OBD_MD_FLNLINK)
2507 CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
2508 else if (valid & OBD_MD_FLCKSUM)
2509 CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
2511 if (valid & OBD_MD_FLPARENT)
2512 CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
2514 if (valid & OBD_MD_FLFID) {
2515 CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
2517 CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
2520 if (valid & OBD_MD_FLHANDLE)
2521 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
2522 oa->o_handle.cookie);
2525 void dump_ost_body(struct ost_body *ob)
2530 void dump_rcs(__u32 *rc)
2532 CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
2535 static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2537 LASSERT(req->rq_reqmsg);
2539 switch (req->rq_reqmsg->lm_magic) {
2540 case LUSTRE_MSG_MAGIC_V2:
2541 return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF);
2543 CERROR("bad lustre msg magic: %#08X\n",
2544 req->rq_reqmsg->lm_magic);
2549 static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2551 if (unlikely(!req->rq_repmsg))
2554 switch (req->rq_repmsg->lm_magic) {
2555 case LUSTRE_MSG_MAGIC_V2:
2556 return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF);
2558 /* uninitialized yet */
2563 void _debug_req(struct ptlrpc_request *req,
2564 struct libcfs_debug_msg_data *msgdata, const char *fmt, ...)
2566 bool req_ok = req->rq_reqmsg != NULL;
2567 bool rep_ok = false;
2568 lnet_nid_t nid = LNET_NID_ANY;
2571 int rep_status = -1;
2573 spin_lock(&req->rq_early_free_lock);
2577 if (ptlrpc_req_need_swab(req)) {
2578 req_ok = req_ok && req_ptlrpc_body_swabbed(req);
2579 rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
2583 rep_flags = lustre_msg_get_flags(req->rq_repmsg);
2584 rep_status = lustre_msg_get_status(req->rq_repmsg);
2586 spin_unlock(&req->rq_early_free_lock);
2588 if (req->rq_import && req->rq_import->imp_connection)
2589 nid = req->rq_import->imp_connection->c_peer.nid;
2590 else if (req->rq_export && req->rq_export->exp_connection)
2591 nid = req->rq_export->exp_connection->c_peer.nid;
2593 va_start(args, fmt);
2594 libcfs_debug_vmsg2(msgdata, fmt, args,
2595 " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
2596 req, req->rq_xid, req->rq_transno,
2597 req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
2598 req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
2600 req->rq_import->imp_obd->obd_name :
2602 req->rq_export->exp_client_uuid.uuid :
2604 libcfs_nid2str(nid),
2605 req->rq_request_portal, req->rq_reply_portal,
2606 req->rq_reqlen, req->rq_replen,
2607 req->rq_early_count, (s64)req->rq_timedout,
2608 (s64)req->rq_deadline,
2609 atomic_read(&req->rq_refcount),
2610 DEBUG_REQ_FLAGS(req),
2611 req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
2612 rep_flags, req->rq_status, rep_status);
2615 EXPORT_SYMBOL(_debug_req);
2617 void lustre_swab_lustre_capa(struct lustre_capa *c)
2619 lustre_swab_lu_fid(&c->lc_fid);
2620 __swab64s (&c->lc_opc);
2621 __swab64s (&c->lc_uid);
2622 __swab64s (&c->lc_gid);
2623 __swab32s (&c->lc_flags);
2624 __swab32s (&c->lc_keyid);
2625 __swab32s (&c->lc_timeout);
2626 __swab32s (&c->lc_expiry);
2629 void lustre_swab_lustre_capa_key(struct lustre_capa_key *k)
2631 __swab64s (&k->lk_seq);
2632 __swab32s (&k->lk_keyid);
2633 CLASSERT(offsetof(typeof(*k), lk_padding) != 0);
2636 void lustre_swab_hsm_user_state(struct hsm_user_state *state)
2638 __swab32s(&state->hus_states);
2639 __swab32s(&state->hus_archive_id);
2642 void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
2644 __swab32s(&hss->hss_valid);
2645 __swab64s(&hss->hss_setmask);
2646 __swab64s(&hss->hss_clearmask);
2647 __swab32s(&hss->hss_archive_id);
2650 static void lustre_swab_hsm_extent(struct hsm_extent *extent)
2652 __swab64s(&extent->offset);
2653 __swab64s(&extent->length);
2656 void lustre_swab_hsm_current_action(struct hsm_current_action *action)
2658 __swab32s(&action->hca_state);
2659 __swab32s(&action->hca_action);
2660 lustre_swab_hsm_extent(&action->hca_location);
2663 void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
2665 lustre_swab_lu_fid(&hui->hui_fid);
2666 lustre_swab_hsm_extent(&hui->hui_extent);
2669 void lustre_swab_lu_extent(struct lu_extent *le)
2671 __swab64s(&le->e_start);
2672 __swab64s(&le->e_end);
2675 void lustre_swab_layout_intent(struct layout_intent *li)
2677 __swab32s(&li->li_opc);
2678 __swab32s(&li->li_flags);
2679 lustre_swab_lu_extent(&li->li_extent);
2682 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
2684 lustre_swab_lu_fid(&hpk->hpk_fid);
2685 __swab64s(&hpk->hpk_cookie);
2686 __swab64s(&hpk->hpk_extent.offset);
2687 __swab64s(&hpk->hpk_extent.length);
2688 __swab16s(&hpk->hpk_flags);
2689 __swab16s(&hpk->hpk_errval);
2692 void lustre_swab_hsm_request(struct hsm_request *hr)
2694 __swab32s(&hr->hr_action);
2695 __swab32s(&hr->hr_archive_id);
2696 __swab64s(&hr->hr_flags);
2697 __swab32s(&hr->hr_itemcount);
2698 __swab32s(&hr->hr_data_len);
2701 void lustre_swab_object_update(struct object_update *ou)
2703 struct object_update_param *param;
2706 __swab16s(&ou->ou_type);
2707 __swab16s(&ou->ou_params_count);
2708 __swab32s(&ou->ou_result_size);
2709 __swab32s(&ou->ou_flags);
2710 __swab32s(&ou->ou_padding1);
2711 __swab64s(&ou->ou_batchid);
2712 lustre_swab_lu_fid(&ou->ou_fid);
2713 param = &ou->ou_params[0];
2714 for (i = 0; i < ou->ou_params_count; i++) {
2715 __swab16s(¶m->oup_len);
2716 __swab16s(¶m->oup_padding);
2717 __swab32s(¶m->oup_padding2);
2718 param = (struct object_update_param *)((char *)param +
2719 object_update_param_size(param));
2723 void lustre_swab_object_update_request(struct object_update_request *our)
2726 __swab32s(&our->ourq_magic);
2727 __swab16s(&our->ourq_count);
2728 __swab16s(&our->ourq_padding);
2729 for (i = 0; i < our->ourq_count; i++) {
2730 struct object_update *ou;
2732 ou = object_update_request_get(our, i, NULL);
2735 lustre_swab_object_update(ou);
2739 void lustre_swab_object_update_result(struct object_update_result *our)
2741 __swab32s(&our->our_rc);
2742 __swab16s(&our->our_datalen);
2743 __swab16s(&our->our_padding);
2746 void lustre_swab_object_update_reply(struct object_update_reply *our)
2750 __swab32s(&our->ourp_magic);
2751 __swab16s(&our->ourp_count);
2752 __swab16s(&our->ourp_padding);
2753 for (i = 0; i < our->ourp_count; i++) {
2754 struct object_update_result *ourp;
2756 __swab16s(&our->ourp_lens[i]);
2757 ourp = object_update_result_get(our, i, NULL);
2760 lustre_swab_object_update_result(ourp);
2764 void lustre_swab_out_update_header(struct out_update_header *ouh)
2766 __swab32s(&ouh->ouh_magic);
2767 __swab32s(&ouh->ouh_count);
2768 __swab32s(&ouh->ouh_inline_length);
2769 __swab32s(&ouh->ouh_reply_size);
2771 EXPORT_SYMBOL(lustre_swab_out_update_header);
2773 void lustre_swab_out_update_buffer(struct out_update_buffer *oub)
2775 __swab32s(&oub->oub_size);
2776 __swab32s(&oub->oub_padding);
2778 EXPORT_SYMBOL(lustre_swab_out_update_buffer);
2780 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
2782 __swab64s(&msl->msl_flags);
2785 void lustre_swab_close_data(struct close_data *cd)
2787 lustre_swab_lu_fid(&cd->cd_fid);
2788 __swab64s(&cd->cd_data_version);
2791 void lustre_swab_close_data_resync_done(struct close_data_resync_done *resync)
2795 __swab32s(&resync->resync_count);
2796 /* after swab, resync_count must in CPU endian */
2797 if (resync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) {
2798 for (i = 0; i < resync->resync_count; i++)
2799 __swab32s(&resync->resync_ids_inline[i]);
2802 EXPORT_SYMBOL(lustre_swab_close_data_resync_done);
2804 void lustre_swab_lfsck_request(struct lfsck_request *lr)
2806 __swab32s(&lr->lr_event);
2807 __swab32s(&lr->lr_index);
2808 __swab32s(&lr->lr_flags);
2809 __swab32s(&lr->lr_valid);
2810 __swab32s(&lr->lr_speed);
2811 __swab16s(&lr->lr_version);
2812 __swab16s(&lr->lr_active);
2813 __swab16s(&lr->lr_param);
2814 __swab16s(&lr->lr_async_windows);
2815 __swab32s(&lr->lr_flags);
2816 lustre_swab_lu_fid(&lr->lr_fid);
2817 lustre_swab_lu_fid(&lr->lr_fid2);
2818 __swab32s(&lr->lr_comp_id);
2819 CLASSERT(offsetof(typeof(*lr), lr_padding_0) != 0);
2820 CLASSERT(offsetof(typeof(*lr), lr_padding_1) != 0);
2821 CLASSERT(offsetof(typeof(*lr), lr_padding_2) != 0);
2822 CLASSERT(offsetof(typeof(*lr), lr_padding_3) != 0);
2825 void lustre_swab_lfsck_reply(struct lfsck_reply *lr)
2827 __swab32s(&lr->lr_status);
2828 CLASSERT(offsetof(typeof(*lr), lr_padding_1) != 0);
2829 __swab64s(&lr->lr_repaired);
2832 static void lustre_swab_orphan_rec(struct lu_orphan_rec *rec)
2834 lustre_swab_lu_fid(&rec->lor_fid);
2835 __swab32s(&rec->lor_uid);
2836 __swab32s(&rec->lor_gid);
2839 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent)
2841 lustre_swab_lu_fid(&ent->loe_key);
2842 lustre_swab_orphan_rec(&ent->loe_rec);
2844 EXPORT_SYMBOL(lustre_swab_orphan_ent);
2846 void lustre_swab_orphan_ent_v2(struct lu_orphan_ent_v2 *ent)
2848 lustre_swab_lu_fid(&ent->loe_key);
2849 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
2850 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
2851 CLASSERT(offsetof(typeof(ent->loe_rec), lor_padding) != 0);
2853 EXPORT_SYMBOL(lustre_swab_orphan_ent_v2);
2855 void lustre_swab_orphan_ent_v3(struct lu_orphan_ent_v3 *ent)
2857 lustre_swab_lu_fid(&ent->loe_key);
2858 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
2859 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
2860 __swab32s(&ent->loe_rec.lor_layout_version);
2861 __swab32s(&ent->loe_rec.lor_range);
2862 CLASSERT(offsetof(typeof(ent->loe_rec), lor_padding_1) != 0);
2863 CLASSERT(offsetof(typeof(ent->loe_rec), lor_padding_2) != 0);
2865 EXPORT_SYMBOL(lustre_swab_orphan_ent_v3);
2867 void lustre_swab_ladvise(struct lu_ladvise *ladvise)
2869 __swab16s(&ladvise->lla_advice);
2870 __swab16s(&ladvise->lla_value1);
2871 __swab32s(&ladvise->lla_value2);
2872 __swab64s(&ladvise->lla_start);
2873 __swab64s(&ladvise->lla_end);
2874 __swab32s(&ladvise->lla_value3);
2875 __swab32s(&ladvise->lla_value4);
2877 EXPORT_SYMBOL(lustre_swab_ladvise);
2879 void lustre_swab_ladvise_hdr(struct ladvise_hdr *ladvise_hdr)
2881 __swab32s(&ladvise_hdr->lah_magic);
2882 __swab32s(&ladvise_hdr->lah_count);
2883 __swab64s(&ladvise_hdr->lah_flags);
2884 __swab32s(&ladvise_hdr->lah_value1);
2885 __swab32s(&ladvise_hdr->lah_value2);
2886 __swab64s(&ladvise_hdr->lah_value3);
2888 EXPORT_SYMBOL(lustre_swab_ladvise_hdr);