4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/pack_generic.c
34 * (Un)packing of OST requests
36 * Author: Peter J. Braam <braam@clusterfs.com>
37 * Author: Phil Schwan <phil@clusterfs.com>
38 * Author: Eric Barton <eeb@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_RPC
43 #include <libcfs/libcfs.h>
45 #include <llog_swab.h>
46 #include <lustre_net.h>
47 #include <lustre_swab.h>
48 #include <obd_cksum.h>
49 #include <obd_class.h>
50 #include <obd_support.h>
51 #include <obj_update.h>
53 #include "ptlrpc_internal.h"
55 static inline __u32 lustre_msg_hdr_size_v2(__u32 count)
57 return cfs_size_round(offsetof(struct lustre_msg_v2,
61 __u32 lustre_msg_hdr_size(__u32 magic, __u32 count)
64 case LUSTRE_MSG_MAGIC_V2:
65 return lustre_msg_hdr_size_v2(count);
67 LASSERTF(0, "incorrect message magic: %08x\n", magic);
72 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
76 lustre_set_req_swabbed(req, index);
78 lustre_set_rep_swabbed(req, index);
81 int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
85 return (ptlrpc_req_need_swab(req) &&
86 !lustre_req_swabbed(req, index));
88 return (ptlrpc_rep_need_swab(req) &&
89 !lustre_rep_swabbed(req, index));
92 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
95 __u32 ver = lustre_msg_get_version(msg);
96 return (ver & LUSTRE_VERSION_MASK) != version;
99 int lustre_msg_check_version(struct lustre_msg *msg, __u32 version)
101 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
102 switch (msg->lm_magic) {
103 case LUSTRE_MSG_MAGIC_V1:
104 CERROR("msg v1 not supported - please upgrade you system\n");
106 case LUSTRE_MSG_MAGIC_V2:
107 return lustre_msg_check_version_v2(msg, version);
109 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
112 #undef LUSTRE_MSG_MAGIC_V1
115 /* early reply size */
116 __u32 lustre_msg_early_size()
120 /* Always reply old ptlrpc_body_v2 to keep interoprability
121 * with the old client (< 2.3) which doesn't have pb_jobid
122 * in the ptlrpc_body.
124 * XXX Remove this whenever we dorp interoprability with such
127 __u32 pblen = sizeof(struct ptlrpc_body_v2);
128 size = lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
132 EXPORT_SYMBOL(lustre_msg_early_size);
134 __u32 lustre_msg_size_v2(int count, __u32 *lengths)
139 size = lustre_msg_hdr_size_v2(count);
140 for (i = 0; i < count; i++)
141 size += cfs_size_round(lengths[i]);
145 EXPORT_SYMBOL(lustre_msg_size_v2);
147 /* This returns the size of the buffer that is required to hold a lustre_msg
148 * with the given sub-buffer lengths.
149 * NOTE: this should only be used for NEW requests, and should always be
150 * in the form of a v2 request. If this is a connection to a v1
151 * target then the first buffer will be stripped because the ptlrpc
152 * data is part of the lustre_msg_v1 header. b=14043 */
153 __u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
155 __u32 size[] = { sizeof(struct ptlrpc_body) };
163 LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
166 case LUSTRE_MSG_MAGIC_V2:
167 return lustre_msg_size_v2(count, lens);
169 LASSERTF(0, "incorrect message magic: %08x\n", magic);
174 /* This is used to determine the size of a buffer that was already packed
175 * and will correctly handle the different message formats. */
176 __u32 lustre_packed_msg_size(struct lustre_msg *msg)
178 switch (msg->lm_magic) {
179 case LUSTRE_MSG_MAGIC_V2:
180 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
182 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
187 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
193 msg->lm_bufcount = count;
194 /* XXX: lm_secflvr uninitialized here */
195 msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
197 for (i = 0; i < count; i++)
198 msg->lm_buflens[i] = lens[i];
203 ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
204 for (i = 0; i < count; i++) {
208 memcpy(ptr, tmp, lens[i]);
209 ptr += cfs_size_round(lens[i]);
212 EXPORT_SYMBOL(lustre_init_msg_v2);
214 static int lustre_pack_request_v2(struct ptlrpc_request *req,
215 int count, __u32 *lens, char **bufs)
219 reqlen = lustre_msg_size_v2(count, lens);
221 rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
225 req->rq_reqlen = reqlen;
227 lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
228 lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
232 int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
233 __u32 *lens, char **bufs)
235 __u32 size[] = { sizeof(struct ptlrpc_body) };
243 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
245 /* only use new format, we don't need to be compatible with 1.4 */
246 magic = LUSTRE_MSG_MAGIC_V2;
249 case LUSTRE_MSG_MAGIC_V2:
250 return lustre_pack_request_v2(req, count, lens, bufs);
252 LASSERTF(0, "incorrect message magic: %08x\n", magic);
258 struct list_head ptlrpc_rs_debug_lru =
259 LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
260 spinlock_t ptlrpc_rs_debug_lock;
262 #define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
264 spin_lock(&ptlrpc_rs_debug_lock); \
265 list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
266 spin_unlock(&ptlrpc_rs_debug_lock); \
269 #define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
271 spin_lock(&ptlrpc_rs_debug_lock); \
272 list_del(&(rs)->rs_debug_list); \
273 spin_unlock(&ptlrpc_rs_debug_lock); \
276 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
277 # define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
280 struct ptlrpc_reply_state *
281 lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
283 struct ptlrpc_reply_state *rs = NULL;
285 spin_lock(&svcpt->scp_rep_lock);
287 /* See if we have anything in a pool, and wait if nothing */
288 while (list_empty(&svcpt->scp_rep_idle)) {
289 struct l_wait_info lwi;
292 spin_unlock(&svcpt->scp_rep_lock);
293 /* If we cannot get anything for some long time, we better
294 * bail out instead of waiting infinitely */
295 lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
296 rc = l_wait_event(svcpt->scp_rep_waitq,
297 !list_empty(&svcpt->scp_rep_idle), &lwi);
300 spin_lock(&svcpt->scp_rep_lock);
303 rs = list_entry(svcpt->scp_rep_idle.next,
304 struct ptlrpc_reply_state, rs_list);
305 list_del(&rs->rs_list);
307 spin_unlock(&svcpt->scp_rep_lock);
309 memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
310 rs->rs_size = svcpt->scp_service->srv_max_reply_size;
311 rs->rs_svcpt = svcpt;
317 void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
319 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
321 spin_lock(&svcpt->scp_rep_lock);
322 list_add(&rs->rs_list, &svcpt->scp_rep_idle);
323 spin_unlock(&svcpt->scp_rep_lock);
324 wake_up(&svcpt->scp_rep_waitq);
327 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
328 __u32 *lens, char **bufs, int flags)
330 struct ptlrpc_reply_state *rs;
334 LASSERT(req->rq_reply_state == NULL);
336 if ((flags & LPRFL_EARLY_REPLY) == 0) {
337 spin_lock(&req->rq_lock);
338 req->rq_packed_final = 1;
339 spin_unlock(&req->rq_lock);
342 msg_len = lustre_msg_size_v2(count, lens);
343 rc = sptlrpc_svc_alloc_rs(req, msg_len);
347 rs = req->rq_reply_state;
348 atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
349 rs->rs_cb_id.cbid_fn = reply_out_callback;
350 rs->rs_cb_id.cbid_arg = rs;
351 rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
352 INIT_LIST_HEAD(&rs->rs_exp_list);
353 INIT_LIST_HEAD(&rs->rs_obd_list);
354 INIT_LIST_HEAD(&rs->rs_list);
355 spin_lock_init(&rs->rs_lock);
357 req->rq_replen = msg_len;
358 req->rq_reply_state = rs;
359 req->rq_repmsg = rs->rs_msg;
361 lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
362 lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
364 PTLRPC_RS_DEBUG_LRU_ADD(rs);
368 EXPORT_SYMBOL(lustre_pack_reply_v2);
370 int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
371 char **bufs, int flags)
374 __u32 size[] = { sizeof(struct ptlrpc_body) };
382 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
384 switch (req->rq_reqmsg->lm_magic) {
385 case LUSTRE_MSG_MAGIC_V2:
386 rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
389 LASSERTF(0, "incorrect message magic: %08x\n",
390 req->rq_reqmsg->lm_magic);
394 CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
395 lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
399 int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
402 return lustre_pack_reply_flags(req, count, lens, bufs, 0);
404 EXPORT_SYMBOL(lustre_pack_reply);
406 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size)
408 __u32 i, offset, buflen, bufcount;
412 bufcount = m->lm_bufcount;
413 if (unlikely(n >= bufcount)) {
414 CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
419 buflen = m->lm_buflens[n];
420 if (unlikely(buflen < min_size)) {
421 CERROR("msg %p buffer[%d] size %d too small "
422 "(required %d, opc=%d)\n", m, n, buflen, min_size,
423 n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
427 offset = lustre_msg_hdr_size_v2(bufcount);
428 for (i = 0; i < n; i++)
429 offset += cfs_size_round(m->lm_buflens[i]);
431 return (char *)m + offset;
434 void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 min_size)
436 switch (m->lm_magic) {
437 case LUSTRE_MSG_MAGIC_V2:
438 return lustre_msg_buf_v2(m, n, min_size);
440 LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n",
445 EXPORT_SYMBOL(lustre_msg_buf);
447 static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
448 unsigned int newlen, int move_data)
450 char *tail = NULL, *newpos;
454 LASSERT(msg->lm_bufcount > segment);
455 LASSERT(msg->lm_buflens[segment] >= newlen);
457 if (msg->lm_buflens[segment] == newlen)
460 if (move_data && msg->lm_bufcount > segment + 1) {
461 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
462 for (n = segment + 1; n < msg->lm_bufcount; n++)
463 tail_len += cfs_size_round(msg->lm_buflens[n]);
466 msg->lm_buflens[segment] = newlen;
468 if (tail && tail_len) {
469 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
470 LASSERT(newpos <= tail);
472 memmove(newpos, tail, tail_len);
475 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
479 * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
480 * we also move data forward from @segment + 1.
482 * if @newlen == 0, we remove the segment completely, but we still keep the
483 * totally bufcount the same to save possible data moving. this will leave a
484 * unused segment with size 0 at the tail, but that's ok.
486 * return new msg size after shrinking.
489 * + if any buffers higher than @segment has been filled in, must call shrink
490 * with non-zero @move_data.
491 * + caller should NOT keep pointers to msg buffers which higher than @segment
494 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
495 unsigned int newlen, int move_data)
497 switch (msg->lm_magic) {
498 case LUSTRE_MSG_MAGIC_V2:
499 return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
501 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
504 EXPORT_SYMBOL(lustre_shrink_msg);
506 void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
508 PTLRPC_RS_DEBUG_LRU_DEL(rs);
510 LASSERT(atomic_read(&rs->rs_refcount) == 0);
511 LASSERT(!rs->rs_difficult || rs->rs_handled);
512 LASSERT(!rs->rs_on_net);
513 LASSERT(!rs->rs_scheduled);
514 LASSERT(rs->rs_export == NULL);
515 LASSERT(rs->rs_nlocks == 0);
516 LASSERT(list_empty(&rs->rs_exp_list));
517 LASSERT(list_empty(&rs->rs_obd_list));
519 sptlrpc_svc_free_rs(rs);
522 static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
524 int swabbed, required_len, i;
526 /* Now we know the sender speaks my language. */
527 required_len = lustre_msg_hdr_size_v2(0);
528 if (len < required_len) {
529 /* can't even look inside the message */
530 CERROR("message length %d too small for lustre_msg\n", len);
534 swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
537 __swab32s(&m->lm_magic);
538 __swab32s(&m->lm_bufcount);
539 __swab32s(&m->lm_secflvr);
540 __swab32s(&m->lm_repsize);
541 __swab32s(&m->lm_cksum);
542 __swab32s(&m->lm_flags);
543 CLASSERT(offsetof(typeof(*m), lm_padding_2) != 0);
544 CLASSERT(offsetof(typeof(*m), lm_padding_3) != 0);
547 required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
548 if (len < required_len) {
549 /* didn't receive all the buffer lengths */
550 CERROR ("message length %d too small for %d buflens\n",
551 len, m->lm_bufcount);
555 for (i = 0; i < m->lm_bufcount; i++) {
557 __swab32s(&m->lm_buflens[i]);
558 required_len += cfs_size_round(m->lm_buflens[i]);
561 if (len < required_len) {
562 CERROR("len: %d, required_len %d\n", len, required_len);
563 CERROR("bufcount: %d\n", m->lm_bufcount);
564 for (i = 0; i < m->lm_bufcount; i++)
565 CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
572 int __lustre_unpack_msg(struct lustre_msg *m, int len)
574 int required_len, rc;
577 /* We can provide a slightly better error log, if we check the
578 * message magic and version first. In the future, struct
579 * lustre_msg may grow, and we'd like to log a version mismatch,
580 * rather than a short message.
583 required_len = offsetof(struct lustre_msg, lm_magic) +
585 if (len < required_len) {
586 /* can't even look inside the message */
587 CERROR("message length %d too small for magic/version check\n",
592 rc = lustre_unpack_msg_v2(m, len);
596 EXPORT_SYMBOL(__lustre_unpack_msg);
598 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
601 rc = __lustre_unpack_msg(req->rq_reqmsg, len);
603 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
609 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
612 rc = __lustre_unpack_msg(req->rq_repmsg, len);
614 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
620 static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
621 const int inout, int offset)
623 struct ptlrpc_body *pb;
624 struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg;
626 pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
628 CERROR("error unpacking ptlrpc body\n");
631 if (ptlrpc_buf_need_swab(req, inout, offset)) {
632 lustre_swab_ptlrpc_body(pb);
633 ptlrpc_buf_set_swabbed(req, inout, offset);
636 if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
637 CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
642 pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
647 int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
649 switch (req->rq_reqmsg->lm_magic) {
650 case LUSTRE_MSG_MAGIC_V2:
651 return lustre_unpack_ptlrpc_body_v2(req, 1, offset);
653 CERROR("bad lustre msg magic: %08x\n",
654 req->rq_reqmsg->lm_magic);
659 int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
661 switch (req->rq_repmsg->lm_magic) {
662 case LUSTRE_MSG_MAGIC_V2:
663 return lustre_unpack_ptlrpc_body_v2(req, 0, offset);
665 CERROR("bad lustre msg magic: %08x\n",
666 req->rq_repmsg->lm_magic);
671 static inline __u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, __u32 n)
673 if (n >= m->lm_bufcount)
676 return m->lm_buflens[n];
680 * lustre_msg_buflen - return the length of buffer \a n in message \a m
681 * \param m lustre_msg (request or reply) to look at
682 * \param n message index (base 0)
684 * returns zero for non-existent message indices
686 __u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n)
688 switch (m->lm_magic) {
689 case LUSTRE_MSG_MAGIC_V2:
690 return lustre_msg_buflen_v2(m, n);
692 CERROR("incorrect message magic: %08x\n", m->lm_magic);
696 EXPORT_SYMBOL(lustre_msg_buflen);
699 lustre_msg_set_buflen_v2(struct lustre_msg_v2 *m, __u32 n, __u32 len)
701 if (n >= m->lm_bufcount)
704 m->lm_buflens[n] = len;
707 void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len)
709 switch (m->lm_magic) {
710 case LUSTRE_MSG_MAGIC_V2:
711 lustre_msg_set_buflen_v2(m, n, len);
714 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
718 /* NB return the bufcount for lustre_msg_v2 format, so if message is packed
719 * in V1 format, the result is one bigger. (add struct ptlrpc_body). */
720 __u32 lustre_msg_bufcount(struct lustre_msg *m)
722 switch (m->lm_magic) {
723 case LUSTRE_MSG_MAGIC_V2:
724 return m->lm_bufcount;
726 CERROR("incorrect message magic: %08x\n", m->lm_magic);
731 char *lustre_msg_string(struct lustre_msg *m, __u32 index, __u32 max_len)
733 /* max_len == 0 means the string should fill the buffer */
737 switch (m->lm_magic) {
738 case LUSTRE_MSG_MAGIC_V2:
739 str = lustre_msg_buf_v2(m, index, 0);
740 blen = lustre_msg_buflen_v2(m, index);
743 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
747 CERROR ("can't unpack string in msg %p buffer[%d]\n", m, index);
751 slen = strnlen(str, blen);
753 if (slen == blen) { /* not NULL terminated */
754 CERROR("can't unpack non-NULL terminated string in "
755 "msg %p buffer[%d] len %d\n", m, index, blen);
760 if (slen != blen - 1) {
761 CERROR("can't unpack short string in msg %p "
762 "buffer[%d] len %d: strlen %d\n",
763 m, index, blen, slen);
766 } else if (slen > max_len) {
767 CERROR("can't unpack oversized string in msg %p "
768 "buffer[%d] len %d strlen %d: max %d expected\n",
769 m, index, blen, slen, max_len);
776 /* Wrap up the normal fixed length cases */
777 static inline void *__lustre_swab_buf(struct lustre_msg *msg, __u32 index,
778 __u32 min_size, void *swabber)
782 LASSERT(msg != NULL);
783 switch (msg->lm_magic) {
784 case LUSTRE_MSG_MAGIC_V2:
785 ptr = lustre_msg_buf_v2(msg, index, min_size);
788 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
791 if (ptr != NULL && swabber != NULL)
792 ((void (*)(void *))swabber)(ptr);
797 static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
799 return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
800 sizeof(struct ptlrpc_body_v2));
803 __u32 lustre_msghdr_get_flags(struct lustre_msg *msg)
805 switch (msg->lm_magic) {
806 case LUSTRE_MSG_MAGIC_V2:
807 /* already in host endian */
808 return msg->lm_flags;
810 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
814 EXPORT_SYMBOL(lustre_msghdr_get_flags);
816 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
818 switch (msg->lm_magic) {
819 case LUSTRE_MSG_MAGIC_V2:
820 msg->lm_flags = flags;
823 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
827 __u32 lustre_msg_get_flags(struct lustre_msg *msg)
829 switch (msg->lm_magic) {
830 case LUSTRE_MSG_MAGIC_V2: {
831 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
835 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
839 /* flags might be printed in debug code while message
844 EXPORT_SYMBOL(lustre_msg_get_flags);
846 void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags)
848 switch (msg->lm_magic) {
849 case LUSTRE_MSG_MAGIC_V2: {
850 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
851 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
852 pb->pb_flags |= flags;
856 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
859 EXPORT_SYMBOL(lustre_msg_add_flags);
861 void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags)
863 switch (msg->lm_magic) {
864 case LUSTRE_MSG_MAGIC_V2: {
865 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
866 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
867 pb->pb_flags = flags;
871 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
875 void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags)
877 switch (msg->lm_magic) {
878 case LUSTRE_MSG_MAGIC_V2: {
879 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
880 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
881 pb->pb_flags &= ~(MSG_GEN_FLAG_MASK & flags);
885 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
888 EXPORT_SYMBOL(lustre_msg_clear_flags);
890 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
892 switch (msg->lm_magic) {
893 case LUSTRE_MSG_MAGIC_V2: {
894 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
896 return pb->pb_op_flags;
898 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
906 void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags)
908 switch (msg->lm_magic) {
909 case LUSTRE_MSG_MAGIC_V2: {
910 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
911 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
912 pb->pb_op_flags |= flags;
916 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
919 EXPORT_SYMBOL(lustre_msg_add_op_flags);
921 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
923 switch (msg->lm_magic) {
924 case LUSTRE_MSG_MAGIC_V2: {
925 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
927 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
930 return &pb->pb_handle;
933 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
938 __u32 lustre_msg_get_type(struct lustre_msg *msg)
940 switch (msg->lm_magic) {
941 case LUSTRE_MSG_MAGIC_V2: {
942 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
944 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
945 return PTL_RPC_MSG_ERR;
950 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
951 return PTL_RPC_MSG_ERR;
954 EXPORT_SYMBOL(lustre_msg_get_type);
956 __u32 lustre_msg_get_version(struct lustre_msg *msg)
958 switch (msg->lm_magic) {
959 case LUSTRE_MSG_MAGIC_V2: {
960 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
962 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
965 return pb->pb_version;
968 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
973 void lustre_msg_add_version(struct lustre_msg *msg, __u32 version)
975 switch (msg->lm_magic) {
976 case LUSTRE_MSG_MAGIC_V2: {
977 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
978 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
979 pb->pb_version |= version;
983 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
987 __u32 lustre_msg_get_opc(struct lustre_msg *msg)
989 switch (msg->lm_magic) {
990 case LUSTRE_MSG_MAGIC_V2: {
991 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
993 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
999 CERROR("incorrect message magic: %08x (msg:%p)\n",
1000 msg->lm_magic, msg);
1004 EXPORT_SYMBOL(lustre_msg_get_opc);
1006 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg)
1008 switch (msg->lm_magic) {
1009 case LUSTRE_MSG_MAGIC_V2: {
1010 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1012 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1015 return pb->pb_last_xid;
1018 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1022 EXPORT_SYMBOL(lustre_msg_get_last_xid);
1024 __u16 lustre_msg_get_tag(struct lustre_msg *msg)
1026 switch (msg->lm_magic) {
1027 case LUSTRE_MSG_MAGIC_V2: {
1028 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1030 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1036 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1040 EXPORT_SYMBOL(lustre_msg_get_tag);
1042 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
1044 switch (msg->lm_magic) {
1045 case LUSTRE_MSG_MAGIC_V2: {
1046 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1048 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1051 return pb->pb_last_committed;
1054 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1058 EXPORT_SYMBOL(lustre_msg_get_last_committed);
1060 __u64 *lustre_msg_get_versions(struct lustre_msg *msg)
1062 switch (msg->lm_magic) {
1063 case LUSTRE_MSG_MAGIC_V2: {
1064 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1066 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1069 return pb->pb_pre_versions;
1072 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1076 EXPORT_SYMBOL(lustre_msg_get_versions);
1078 __u64 lustre_msg_get_transno(struct lustre_msg *msg)
1080 switch (msg->lm_magic) {
1081 case LUSTRE_MSG_MAGIC_V2: {
1082 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1084 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1087 return pb->pb_transno;
1090 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1094 EXPORT_SYMBOL(lustre_msg_get_transno);
1096 int lustre_msg_get_status(struct lustre_msg *msg)
1098 switch (msg->lm_magic) {
1099 case LUSTRE_MSG_MAGIC_V2: {
1100 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1102 return pb->pb_status;
1103 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1107 /* status might be printed in debug code while message
1112 EXPORT_SYMBOL(lustre_msg_get_status);
1114 __u64 lustre_msg_get_slv(struct lustre_msg *msg)
1116 switch (msg->lm_magic) {
1117 case LUSTRE_MSG_MAGIC_V2: {
1118 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1120 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1126 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1132 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
1134 switch (msg->lm_magic) {
1135 case LUSTRE_MSG_MAGIC_V2: {
1136 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1138 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1145 CERROR("invalid msg magic %x\n", msg->lm_magic);
1150 __u32 lustre_msg_get_limit(struct lustre_msg *msg)
1152 switch (msg->lm_magic) {
1153 case LUSTRE_MSG_MAGIC_V2: {
1154 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1156 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1159 return pb->pb_limit;
1162 CERROR("invalid msg magic %x\n", msg->lm_magic);
1168 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
1170 switch (msg->lm_magic) {
1171 case LUSTRE_MSG_MAGIC_V2: {
1172 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1174 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1177 pb->pb_limit = limit;
1181 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1186 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
1188 switch (msg->lm_magic) {
1189 case LUSTRE_MSG_MAGIC_V2: {
1190 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1192 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1195 return pb->pb_conn_cnt;
1198 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1202 EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
1204 __u32 lustre_msg_get_magic(struct lustre_msg *msg)
1206 switch (msg->lm_magic) {
1207 case LUSTRE_MSG_MAGIC_V2:
1208 return msg->lm_magic;
1210 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1215 __u32 lustre_msg_get_timeout(struct lustre_msg *msg)
1217 switch (msg->lm_magic) {
1218 case LUSTRE_MSG_MAGIC_V2: {
1219 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1221 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1224 return pb->pb_timeout;
1227 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1232 __u32 lustre_msg_get_service_time(struct lustre_msg *msg)
1234 switch (msg->lm_magic) {
1235 case LUSTRE_MSG_MAGIC_V2: {
1236 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1238 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1241 return pb->pb_service_time;
1244 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1249 char *lustre_msg_get_jobid(struct lustre_msg *msg)
1251 switch (msg->lm_magic) {
1252 case LUSTRE_MSG_MAGIC_V2: {
1253 struct ptlrpc_body *pb =
1254 lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1255 sizeof(struct ptlrpc_body));
1259 return pb->pb_jobid;
1262 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1266 EXPORT_SYMBOL(lustre_msg_get_jobid);
1268 __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
1270 switch (msg->lm_magic) {
1271 case LUSTRE_MSG_MAGIC_V2:
1272 return msg->lm_cksum;
1274 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1279 __u64 lustre_msg_get_mbits(struct lustre_msg *msg)
1281 switch (msg->lm_magic) {
1282 case LUSTRE_MSG_MAGIC_V2: {
1283 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1285 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1288 return pb->pb_mbits;
1291 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1296 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
1298 switch (msg->lm_magic) {
1299 case LUSTRE_MSG_MAGIC_V2: {
1300 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1301 __u32 len = lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF);
1303 unsigned int hsize = 4;
1306 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1307 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
1308 len, NULL, 0, (unsigned char *)&crc,
1313 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1318 void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
1320 switch (msg->lm_magic) {
1321 case LUSTRE_MSG_MAGIC_V2: {
1322 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1323 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1324 pb->pb_handle = *handle;
1328 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1332 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
1334 switch (msg->lm_magic) {
1335 case LUSTRE_MSG_MAGIC_V2: {
1336 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1337 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1342 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1346 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
1348 switch (msg->lm_magic) {
1349 case LUSTRE_MSG_MAGIC_V2: {
1350 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1351 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1356 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1360 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid)
1362 switch (msg->lm_magic) {
1363 case LUSTRE_MSG_MAGIC_V2: {
1364 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1365 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1366 pb->pb_last_xid = last_xid;
1370 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1373 EXPORT_SYMBOL(lustre_msg_set_last_xid);
1375 void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
1377 switch (msg->lm_magic) {
1378 case LUSTRE_MSG_MAGIC_V2: {
1379 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1380 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1385 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1388 EXPORT_SYMBOL(lustre_msg_set_tag);
1390 void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed)
1392 switch (msg->lm_magic) {
1393 case LUSTRE_MSG_MAGIC_V2: {
1394 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1395 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1396 pb->pb_last_committed = last_committed;
1400 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1404 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
1406 switch (msg->lm_magic) {
1407 case LUSTRE_MSG_MAGIC_V2: {
1408 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1409 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1410 pb->pb_pre_versions[0] = versions[0];
1411 pb->pb_pre_versions[1] = versions[1];
1412 pb->pb_pre_versions[2] = versions[2];
1413 pb->pb_pre_versions[3] = versions[3];
1417 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1420 EXPORT_SYMBOL(lustre_msg_set_versions);
1422 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
1424 switch (msg->lm_magic) {
1425 case LUSTRE_MSG_MAGIC_V2: {
1426 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1427 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1428 pb->pb_transno = transno;
1432 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1435 EXPORT_SYMBOL(lustre_msg_set_transno);
1437 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
1439 switch (msg->lm_magic) {
1440 case LUSTRE_MSG_MAGIC_V2: {
1441 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1442 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1443 pb->pb_status = status;
1447 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1450 EXPORT_SYMBOL(lustre_msg_set_status);
1452 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
1454 switch (msg->lm_magic) {
1455 case LUSTRE_MSG_MAGIC_V2: {
1456 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1457 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1458 pb->pb_conn_cnt = conn_cnt;
1462 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1466 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout)
1468 switch (msg->lm_magic) {
1469 case LUSTRE_MSG_MAGIC_V2: {
1470 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1471 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1472 pb->pb_timeout = timeout;
1476 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1480 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time)
1482 switch (msg->lm_magic) {
1483 case LUSTRE_MSG_MAGIC_V2: {
1484 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1485 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1486 pb->pb_service_time = service_time;
1490 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1494 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
1496 switch (msg->lm_magic) {
1497 case LUSTRE_MSG_MAGIC_V2: {
1498 __u32 opc = lustre_msg_get_opc(msg);
1499 struct ptlrpc_body *pb;
1501 /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
1502 * See the comment in ptlrpc_request_pack(). */
1503 if (!opc || opc == LDLM_BL_CALLBACK ||
1504 opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
1507 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1508 sizeof(struct ptlrpc_body));
1509 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1512 memcpy(pb->pb_jobid, jobid, LUSTRE_JOBID_SIZE);
1513 else if (pb->pb_jobid[0] == '\0')
1514 lustre_get_jobid(pb->pb_jobid);
1518 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1521 EXPORT_SYMBOL(lustre_msg_set_jobid);
1523 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
1525 switch (msg->lm_magic) {
1526 case LUSTRE_MSG_MAGIC_V2:
1527 msg->lm_cksum = cksum;
1530 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1534 void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
1536 switch (msg->lm_magic) {
1537 case LUSTRE_MSG_MAGIC_V2: {
1538 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1540 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1541 pb->pb_mbits = mbits;
1545 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1549 void ptlrpc_request_set_replen(struct ptlrpc_request *req)
1551 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
1553 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
1554 req->rq_pill.rc_area[RCL_SERVER]);
1555 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1556 req->rq_reqmsg->lm_repsize = req->rq_replen;
1558 EXPORT_SYMBOL(ptlrpc_request_set_replen);
1560 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *lens)
1562 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens);
1563 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1564 req->rq_reqmsg->lm_repsize = req->rq_replen;
1568 * Send a remote set_info_async.
1570 * This may go from client to server or server to client.
1572 int do_set_info_async(struct obd_import *imp,
1573 int opcode, int version,
1574 size_t keylen, void *key,
1575 size_t vallen, void *val,
1576 struct ptlrpc_request_set *set)
1578 struct ptlrpc_request *req;
1583 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
1587 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
1588 RCL_CLIENT, keylen);
1589 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
1590 RCL_CLIENT, vallen);
1591 rc = ptlrpc_request_pack(req, version, opcode);
1593 ptlrpc_request_free(req);
1597 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1598 memcpy(tmp, key, keylen);
1599 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1600 memcpy(tmp, val, vallen);
1602 ptlrpc_request_set_replen(req);
1605 ptlrpc_set_add_req(set, req);
1606 ptlrpc_check_set(NULL, set);
1608 rc = ptlrpc_queue_wait(req);
1609 ptlrpc_req_finished(req);
1614 EXPORT_SYMBOL(do_set_info_async);
1616 /* byte flipping routines for all wire types declared in
1617 * lustre_idl.h implemented here.
1619 void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
1621 __swab32s (&b->pb_type);
1622 __swab32s (&b->pb_version);
1623 __swab32s (&b->pb_opc);
1624 __swab32s (&b->pb_status);
1625 __swab64s (&b->pb_last_xid);
1626 __swab16s (&b->pb_tag);
1627 __swab64s (&b->pb_last_committed);
1628 __swab64s (&b->pb_transno);
1629 __swab32s (&b->pb_flags);
1630 __swab32s (&b->pb_op_flags);
1631 __swab32s (&b->pb_conn_cnt);
1632 __swab32s (&b->pb_timeout);
1633 __swab32s (&b->pb_service_time);
1634 __swab32s (&b->pb_limit);
1635 __swab64s (&b->pb_slv);
1636 __swab64s (&b->pb_pre_versions[0]);
1637 __swab64s (&b->pb_pre_versions[1]);
1638 __swab64s (&b->pb_pre_versions[2]);
1639 __swab64s (&b->pb_pre_versions[3]);
1640 __swab64s(&b->pb_mbits);
1641 CLASSERT(offsetof(typeof(*b), pb_padding0) != 0);
1642 CLASSERT(offsetof(typeof(*b), pb_padding1) != 0);
1643 CLASSERT(offsetof(typeof(*b), pb_padding64_0) != 0);
1644 CLASSERT(offsetof(typeof(*b), pb_padding64_1) != 0);
1645 CLASSERT(offsetof(typeof(*b), pb_padding64_2) != 0);
1646 /* While we need to maintain compatibility between
1647 * clients and servers without ptlrpc_body_v2 (< 2.3)
1648 * do not swab any fields beyond pb_jobid, as we are
1649 * using this swab function for both ptlrpc_body
1650 * and ptlrpc_body_v2. */
1651 CLASSERT(offsetof(typeof(*b), pb_jobid) != 0);
1654 void lustre_swab_connect(struct obd_connect_data *ocd)
1656 __swab64s(&ocd->ocd_connect_flags);
1657 __swab32s(&ocd->ocd_version);
1658 __swab32s(&ocd->ocd_grant);
1659 __swab64s(&ocd->ocd_ibits_known);
1660 __swab32s(&ocd->ocd_index);
1661 __swab32s(&ocd->ocd_brw_size);
1662 /* ocd_blocksize and ocd_inodespace don't need to be swabbed because
1663 * they are 8-byte values */
1664 __swab16s(&ocd->ocd_grant_tax_kb);
1665 __swab32s(&ocd->ocd_grant_max_blks);
1666 __swab64s(&ocd->ocd_transno);
1667 __swab32s(&ocd->ocd_group);
1668 __swab32s(&ocd->ocd_cksum_types);
1669 __swab32s(&ocd->ocd_instance);
1670 /* Fields after ocd_cksum_types are only accessible by the receiver
1671 * if the corresponding flag in ocd_connect_flags is set. Accessing
1672 * any field after ocd_maxbytes on the receiver without a valid flag
1673 * may result in out-of-bound memory access and kernel oops. */
1674 if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
1675 __swab32s(&ocd->ocd_max_easize);
1676 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
1677 __swab64s(&ocd->ocd_maxbytes);
1678 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1679 __swab16s(&ocd->ocd_maxmodrpcs);
1680 CLASSERT(offsetof(typeof(*ocd), padding0) != 0);
1681 CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
1682 if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
1683 __swab64s(&ocd->ocd_connect_flags2);
1684 CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
1685 CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
1686 CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
1687 CLASSERT(offsetof(typeof(*ocd), padding6) != 0);
1688 CLASSERT(offsetof(typeof(*ocd), padding7) != 0);
1689 CLASSERT(offsetof(typeof(*ocd), padding8) != 0);
1690 CLASSERT(offsetof(typeof(*ocd), padding9) != 0);
1691 CLASSERT(offsetof(typeof(*ocd), paddingA) != 0);
1692 CLASSERT(offsetof(typeof(*ocd), paddingB) != 0);
1693 CLASSERT(offsetof(typeof(*ocd), paddingC) != 0);
1694 CLASSERT(offsetof(typeof(*ocd), paddingD) != 0);
1695 CLASSERT(offsetof(typeof(*ocd), paddingE) != 0);
1696 CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
1699 static void lustre_swab_ost_layout(struct ost_layout *ol)
1701 __swab32s(&ol->ol_stripe_size);
1702 __swab32s(&ol->ol_stripe_count);
1703 __swab64s(&ol->ol_comp_start);
1704 __swab64s(&ol->ol_comp_end);
1705 __swab32s(&ol->ol_comp_id);
1708 void lustre_swab_obdo (struct obdo *o)
1710 __swab64s(&o->o_valid);
1711 lustre_swab_ost_id(&o->o_oi);
1712 __swab64s(&o->o_parent_seq);
1713 __swab64s(&o->o_size);
1714 __swab64s(&o->o_mtime);
1715 __swab64s(&o->o_atime);
1716 __swab64s(&o->o_ctime);
1717 __swab64s(&o->o_blocks);
1718 __swab64s(&o->o_grant);
1719 __swab32s(&o->o_blksize);
1720 __swab32s(&o->o_mode);
1721 __swab32s(&o->o_uid);
1722 __swab32s(&o->o_gid);
1723 __swab32s(&o->o_flags);
1724 __swab32s(&o->o_nlink);
1725 __swab32s(&o->o_parent_oid);
1726 __swab32s(&o->o_misc);
1727 __swab64s(&o->o_ioepoch);
1728 __swab32s(&o->o_stripe_idx);
1729 __swab32s(&o->o_parent_ver);
1730 lustre_swab_ost_layout(&o->o_layout);
1731 __swab32s(&o->o_layout_version);
1732 __swab32s(&o->o_uid_h);
1733 __swab32s(&o->o_gid_h);
1734 __swab64s(&o->o_data_version);
1735 __swab32s(&o->o_projid);
1736 CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
1737 CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
1738 CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
1741 EXPORT_SYMBOL(lustre_swab_obdo);
1743 void lustre_swab_obd_statfs (struct obd_statfs *os)
1745 __swab64s (&os->os_type);
1746 __swab64s (&os->os_blocks);
1747 __swab64s (&os->os_bfree);
1748 __swab64s (&os->os_bavail);
1749 __swab64s (&os->os_files);
1750 __swab64s (&os->os_ffree);
1751 /* no need to swab os_fsid */
1752 __swab32s (&os->os_bsize);
1753 __swab32s (&os->os_namelen);
1754 __swab64s (&os->os_maxbytes);
1755 __swab32s (&os->os_state);
1756 CLASSERT(offsetof(typeof(*os), os_fprecreated) != 0);
1757 CLASSERT(offsetof(typeof(*os), os_spare2) != 0);
1758 CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
1759 CLASSERT(offsetof(typeof(*os), os_spare4) != 0);
1760 CLASSERT(offsetof(typeof(*os), os_spare5) != 0);
1761 CLASSERT(offsetof(typeof(*os), os_spare6) != 0);
1762 CLASSERT(offsetof(typeof(*os), os_spare7) != 0);
1763 CLASSERT(offsetof(typeof(*os), os_spare8) != 0);
1764 CLASSERT(offsetof(typeof(*os), os_spare9) != 0);
1767 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
1769 lustre_swab_ost_id(&ioo->ioo_oid);
1770 __swab32s(&ioo->ioo_max_brw);
1771 __swab32s(&ioo->ioo_bufcnt);
1774 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
1776 __swab64s(&nbr->rnb_offset);
1777 __swab32s(&nbr->rnb_len);
1778 __swab32s(&nbr->rnb_flags);
1781 void lustre_swab_ost_body (struct ost_body *b)
1783 lustre_swab_obdo (&b->oa);
1786 void lustre_swab_ost_last_id(u64 *id)
1791 void lustre_swab_generic_32s(__u32 *val)
1796 void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *desc)
1798 lustre_swab_lu_fid(&desc->gl_id.qid_fid);
1799 __swab64s(&desc->gl_flags);
1800 __swab64s(&desc->gl_ver);
1801 __swab64s(&desc->gl_hardlimit);
1802 __swab64s(&desc->gl_softlimit);
1803 __swab64s(&desc->gl_time);
1804 CLASSERT(offsetof(typeof(*desc), gl_pad2) != 0);
1806 EXPORT_SYMBOL(lustre_swab_gl_lquota_desc);
1808 void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *desc)
1810 __swab32s(&desc->lgbd_status);
1811 __swab32s(&desc->lgbd_timeout);
1812 CLASSERT(offsetof(typeof(*desc), lgbd_padding) != 0);
1814 EXPORT_SYMBOL(lustre_swab_gl_barrier_desc);
1816 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
1818 __swab64s(&lvb->lvb_size);
1819 __swab64s(&lvb->lvb_mtime);
1820 __swab64s(&lvb->lvb_atime);
1821 __swab64s(&lvb->lvb_ctime);
1822 __swab64s(&lvb->lvb_blocks);
1824 EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
1826 void lustre_swab_ost_lvb(struct ost_lvb *lvb)
1828 __swab64s(&lvb->lvb_size);
1829 __swab64s(&lvb->lvb_mtime);
1830 __swab64s(&lvb->lvb_atime);
1831 __swab64s(&lvb->lvb_ctime);
1832 __swab64s(&lvb->lvb_blocks);
1833 __swab32s(&lvb->lvb_mtime_ns);
1834 __swab32s(&lvb->lvb_atime_ns);
1835 __swab32s(&lvb->lvb_ctime_ns);
1836 __swab32s(&lvb->lvb_padding);
1838 EXPORT_SYMBOL(lustre_swab_ost_lvb);
1840 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
1842 __swab64s(&lvb->lvb_flags);
1843 __swab64s(&lvb->lvb_id_may_rel);
1844 __swab64s(&lvb->lvb_id_rel);
1845 __swab64s(&lvb->lvb_id_qunit);
1846 __swab64s(&lvb->lvb_pad1);
1848 EXPORT_SYMBOL(lustre_swab_lquota_lvb);
1850 void lustre_swab_barrier_lvb(struct barrier_lvb *lvb)
1852 __swab32s(&lvb->lvb_status);
1853 __swab32s(&lvb->lvb_index);
1854 CLASSERT(offsetof(typeof(*lvb), lvb_padding) != 0);
1856 EXPORT_SYMBOL(lustre_swab_barrier_lvb);
1858 void lustre_swab_mdt_body (struct mdt_body *b)
1860 lustre_swab_lu_fid(&b->mbo_fid1);
1861 lustre_swab_lu_fid(&b->mbo_fid2);
1862 /* handle is opaque */
1863 __swab64s(&b->mbo_valid);
1864 __swab64s(&b->mbo_size);
1865 __swab64s(&b->mbo_mtime);
1866 __swab64s(&b->mbo_atime);
1867 __swab64s(&b->mbo_ctime);
1868 __swab64s(&b->mbo_blocks);
1869 __swab64s(&b->mbo_ioepoch);
1870 __swab64s(&b->mbo_t_state);
1871 __swab32s(&b->mbo_fsuid);
1872 __swab32s(&b->mbo_fsgid);
1873 __swab32s(&b->mbo_capability);
1874 __swab32s(&b->mbo_mode);
1875 __swab32s(&b->mbo_uid);
1876 __swab32s(&b->mbo_gid);
1877 __swab32s(&b->mbo_flags);
1878 __swab32s(&b->mbo_rdev);
1879 __swab32s(&b->mbo_nlink);
1880 CLASSERT(offsetof(typeof(*b), mbo_unused2) != 0);
1881 __swab32s(&b->mbo_suppgid);
1882 __swab32s(&b->mbo_eadatasize);
1883 __swab32s(&b->mbo_aclsize);
1884 __swab32s(&b->mbo_max_mdsize);
1885 CLASSERT(offsetof(typeof(*b), mbo_unused3) != 0);
1886 __swab32s(&b->mbo_uid_h);
1887 __swab32s(&b->mbo_gid_h);
1888 __swab32s(&b->mbo_projid);
1889 __swab64s(&b->mbo_dom_size);
1890 __swab64s(&b->mbo_dom_blocks);
1891 CLASSERT(offsetof(typeof(*b), mbo_padding_8) != 0);
1892 CLASSERT(offsetof(typeof(*b), mbo_padding_9) != 0);
1893 CLASSERT(offsetof(typeof(*b), mbo_padding_10) != 0);
1896 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
1898 /* mio_handle is opaque */
1899 CLASSERT(offsetof(typeof(*b), mio_unused1) != 0);
1900 CLASSERT(offsetof(typeof(*b), mio_unused2) != 0);
1901 CLASSERT(offsetof(typeof(*b), mio_padding) != 0);
1904 void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
1908 __swab32s(&mti->mti_lustre_ver);
1909 __swab32s(&mti->mti_stripe_index);
1910 __swab32s(&mti->mti_config_ver);
1911 __swab32s(&mti->mti_flags);
1912 __swab32s(&mti->mti_instance);
1913 __swab32s(&mti->mti_nid_count);
1914 CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
1915 for (i = 0; i < MTI_NIDS_MAX; i++)
1916 __swab64s(&mti->mti_nids[i]);
1919 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
1923 __swab64s(&entry->mne_version);
1924 __swab32s(&entry->mne_instance);
1925 __swab32s(&entry->mne_index);
1926 __swab32s(&entry->mne_length);
1928 /* mne_nid_(count|type) must be one byte size because we're gonna
1929 * access it w/o swapping. */
1930 CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
1931 CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
1933 /* remove this assertion if ipv6 is supported. */
1934 LASSERT(entry->mne_nid_type == 0);
1935 for (i = 0; i < entry->mne_nid_count; i++) {
1936 CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
1937 __swab64s(&entry->u.nids[i]);
1940 EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
1942 void lustre_swab_mgs_config_body(struct mgs_config_body *body)
1944 __swab64s(&body->mcb_offset);
1945 __swab32s(&body->mcb_units);
1946 __swab16s(&body->mcb_type);
1949 void lustre_swab_mgs_config_res(struct mgs_config_res *body)
1951 __swab64s(&body->mcr_offset);
1952 __swab64s(&body->mcr_size);
1955 static void lustre_swab_obd_dqinfo (struct obd_dqinfo *i)
1957 __swab64s (&i->dqi_bgrace);
1958 __swab64s (&i->dqi_igrace);
1959 __swab32s (&i->dqi_flags);
1960 __swab32s (&i->dqi_valid);
1963 static void lustre_swab_obd_dqblk (struct obd_dqblk *b)
1965 __swab64s (&b->dqb_ihardlimit);
1966 __swab64s (&b->dqb_isoftlimit);
1967 __swab64s (&b->dqb_curinodes);
1968 __swab64s (&b->dqb_bhardlimit);
1969 __swab64s (&b->dqb_bsoftlimit);
1970 __swab64s (&b->dqb_curspace);
1971 __swab64s (&b->dqb_btime);
1972 __swab64s (&b->dqb_itime);
1973 __swab32s (&b->dqb_valid);
1974 CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
1977 void lustre_swab_obd_quotactl (struct obd_quotactl *q)
1979 __swab32s (&q->qc_cmd);
1980 __swab32s (&q->qc_type);
1981 __swab32s (&q->qc_id);
1982 __swab32s (&q->qc_stat);
1983 lustre_swab_obd_dqinfo (&q->qc_dqinfo);
1984 lustre_swab_obd_dqblk (&q->qc_dqblk);
1987 void lustre_swab_fid2path(struct getinfo_fid2path *gf)
1989 lustre_swab_lu_fid(&gf->gf_fid);
1990 __swab64s(&gf->gf_recno);
1991 __swab32s(&gf->gf_linkno);
1992 __swab32s(&gf->gf_pathlen);
1994 EXPORT_SYMBOL(lustre_swab_fid2path);
1996 static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
1998 __swab64s(&fm_extent->fe_logical);
1999 __swab64s(&fm_extent->fe_physical);
2000 __swab64s(&fm_extent->fe_length);
2001 __swab32s(&fm_extent->fe_flags);
2002 __swab32s(&fm_extent->fe_device);
2005 void lustre_swab_fiemap(struct fiemap *fiemap)
2009 __swab64s(&fiemap->fm_start);
2010 __swab64s(&fiemap->fm_length);
2011 __swab32s(&fiemap->fm_flags);
2012 __swab32s(&fiemap->fm_mapped_extents);
2013 __swab32s(&fiemap->fm_extent_count);
2014 __swab32s(&fiemap->fm_reserved);
2016 for (i = 0; i < fiemap->fm_mapped_extents; i++)
2017 lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
2020 void lustre_swab_idx_info(struct idx_info *ii)
2022 __swab32s(&ii->ii_magic);
2023 __swab32s(&ii->ii_flags);
2024 __swab16s(&ii->ii_count);
2025 __swab32s(&ii->ii_attrs);
2026 lustre_swab_lu_fid(&ii->ii_fid);
2027 __swab64s(&ii->ii_version);
2028 __swab64s(&ii->ii_hash_start);
2029 __swab64s(&ii->ii_hash_end);
2030 __swab16s(&ii->ii_keysize);
2031 __swab16s(&ii->ii_recsize);
2034 void lustre_swab_lip_header(struct lu_idxpage *lip)
2037 __swab32s(&lip->lip_magic);
2038 __swab16s(&lip->lip_flags);
2039 __swab16s(&lip->lip_nr);
2041 EXPORT_SYMBOL(lustre_swab_lip_header);
2043 void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
2045 __swab32s(&rr->rr_opcode);
2046 __swab32s(&rr->rr_cap);
2047 __swab32s(&rr->rr_fsuid);
2048 /* rr_fsuid_h is unused */
2049 __swab32s(&rr->rr_fsgid);
2050 /* rr_fsgid_h is unused */
2051 __swab32s(&rr->rr_suppgid1);
2052 /* rr_suppgid1_h is unused */
2053 __swab32s(&rr->rr_suppgid2);
2054 /* rr_suppgid2_h is unused */
2055 lustre_swab_lu_fid(&rr->rr_fid1);
2056 lustre_swab_lu_fid(&rr->rr_fid2);
2057 __swab64s(&rr->rr_mtime);
2058 __swab64s(&rr->rr_atime);
2059 __swab64s(&rr->rr_ctime);
2060 __swab64s(&rr->rr_size);
2061 __swab64s(&rr->rr_blocks);
2062 __swab32s(&rr->rr_bias);
2063 __swab32s(&rr->rr_mode);
2064 __swab32s(&rr->rr_flags);
2065 __swab32s(&rr->rr_flags_h);
2066 __swab32s(&rr->rr_umask);
2068 CLASSERT(offsetof(typeof(*rr), rr_padding_4) != 0);
2071 void lustre_swab_lov_desc (struct lov_desc *ld)
2073 __swab32s (&ld->ld_tgt_count);
2074 __swab32s (&ld->ld_active_tgt_count);
2075 __swab32s (&ld->ld_default_stripe_count);
2076 __swab32s (&ld->ld_pattern);
2077 __swab64s (&ld->ld_default_stripe_size);
2078 __swab64s (&ld->ld_default_stripe_offset);
2079 __swab32s (&ld->ld_qos_maxage);
2080 /* uuid endian insensitive */
2082 EXPORT_SYMBOL(lustre_swab_lov_desc);
2084 void lustre_swab_lmv_desc (struct lmv_desc *ld)
2086 __swab32s (&ld->ld_tgt_count);
2087 __swab32s (&ld->ld_active_tgt_count);
2088 __swab32s (&ld->ld_default_stripe_count);
2089 __swab32s (&ld->ld_pattern);
2090 __swab64s (&ld->ld_default_hash_size);
2091 __swab32s (&ld->ld_qos_maxage);
2092 /* uuid endian insensitive */
2095 /* This structure is always in little-endian */
2096 static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
2100 __swab32s(&lmm1->lmv_magic);
2101 __swab32s(&lmm1->lmv_stripe_count);
2102 __swab32s(&lmm1->lmv_master_mdt_index);
2103 __swab32s(&lmm1->lmv_hash_type);
2104 __swab32s(&lmm1->lmv_layout_version);
2105 for (i = 0; i < lmm1->lmv_stripe_count; i++)
2106 lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
2109 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
2111 switch (lmm->lmv_magic) {
2113 lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
2119 EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
2121 void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
2123 __swab32s(&lum->lum_magic);
2124 __swab32s(&lum->lum_stripe_count);
2125 __swab32s(&lum->lum_stripe_offset);
2126 __swab32s(&lum->lum_hash_type);
2127 __swab32s(&lum->lum_type);
2128 CLASSERT(offsetof(typeof(*lum), lum_padding1) != 0);
2130 EXPORT_SYMBOL(lustre_swab_lmv_user_md);
2132 static void lustre_print_v1v3(unsigned int lvl, struct lov_user_md *lum,
2135 CDEBUG(lvl, "%s lov_user_md %p:\n", msg, lum);
2136 CDEBUG(lvl, "\tlmm_magic: %#x\n", lum->lmm_magic);
2137 CDEBUG(lvl, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
2138 CDEBUG(lvl, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
2139 CDEBUG(lvl, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
2140 CDEBUG(lvl, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
2141 CDEBUG(lvl, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
2142 CDEBUG(lvl, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
2143 lum->lmm_stripe_offset);
2144 if (lum->lmm_magic == LOV_USER_MAGIC_V3) {
2145 struct lov_user_md_v3 *v3 = (void *)lum;
2146 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2148 if (lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2149 struct lov_user_md_v3 *v3 = (void *)lum;
2152 if (v3->lmm_pool_name[0] != '\0')
2153 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2155 CDEBUG(lvl, "\ttarget list:\n");
2156 for (i = 0; i < v3->lmm_stripe_count; i++)
2157 CDEBUG(lvl, "\t\t%u\n", v3->lmm_objects[i].l_ost_idx);
2161 void lustre_print_user_md(unsigned int lvl, struct lov_user_md *lum,
2164 struct lov_comp_md_v1 *comp_v1;
2167 if (likely(!cfs_cdebug_show(lvl, DEBUG_SUBSYSTEM)))
2170 if (lum->lmm_magic == LOV_USER_MAGIC_V1 ||
2171 lum->lmm_magic == LOV_USER_MAGIC_V3) {
2172 lustre_print_v1v3(lvl, lum, msg);
2176 if (lum->lmm_magic != LOV_USER_MAGIC_COMP_V1) {
2177 CDEBUG(lvl, "%s: bad magic: %x\n", msg, lum->lmm_magic);
2181 comp_v1 = (struct lov_comp_md_v1 *)lum;
2182 CDEBUG(lvl, "%s: lov_comp_md_v1 %p:\n", msg, lum);
2183 CDEBUG(lvl, "\tlcm_magic: %#x\n", comp_v1->lcm_magic);
2184 CDEBUG(lvl, "\tlcm_size: %#x\n", comp_v1->lcm_size);
2185 CDEBUG(lvl, "\tlcm_layout_gen: %#x\n", comp_v1->lcm_layout_gen);
2186 CDEBUG(lvl, "\tlcm_flags: %#x\n", comp_v1->lcm_flags);
2187 CDEBUG(lvl, "\tlcm_entry_count: %#x\n\n", comp_v1->lcm_entry_count);
2188 CDEBUG(lvl, "\tlcm_mirror_count: %#x\n\n", comp_v1->lcm_mirror_count);
2190 for (i = 0; i < comp_v1->lcm_entry_count; i++) {
2191 struct lov_comp_md_entry_v1 *ent = &comp_v1->lcm_entries[i];
2192 struct lov_user_md *v1;
2194 CDEBUG(lvl, "\tentry %d:\n", i);
2195 CDEBUG(lvl, "\tlcme_id: %#x\n", ent->lcme_id);
2196 CDEBUG(lvl, "\tlcme_flags: %#x\n", ent->lcme_flags);
2197 CDEBUG(lvl, "\tlcme_extent.e_start: %llu\n",
2198 ent->lcme_extent.e_start);
2199 CDEBUG(lvl, "\tlcme_extent.e_end: %llu\n",
2200 ent->lcme_extent.e_end);
2201 CDEBUG(lvl, "\tlcme_offset: %#x\n", ent->lcme_offset);
2202 CDEBUG(lvl, "\tlcme_size: %#x\n\n", ent->lcme_size);
2204 v1 = (struct lov_user_md *)((char *)comp_v1 +
2205 comp_v1->lcm_entries[i].lcme_offset);
2206 lustre_print_v1v3(lvl, v1, msg);
2209 EXPORT_SYMBOL(lustre_print_user_md);
2211 static void lustre_swab_lmm_oi(struct ost_id *oi)
2213 __swab64s(&oi->oi.oi_id);
2214 __swab64s(&oi->oi.oi_seq);
2217 static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
2220 __swab32s(&lum->lmm_magic);
2221 __swab32s(&lum->lmm_pattern);
2222 lustre_swab_lmm_oi(&lum->lmm_oi);
2223 __swab32s(&lum->lmm_stripe_size);
2224 __swab16s(&lum->lmm_stripe_count);
2225 __swab16s(&lum->lmm_stripe_offset);
2229 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
2232 CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
2233 lustre_swab_lov_user_md_common(lum);
2236 EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
2238 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
2241 CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
2242 lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
2243 /* lmm_pool_name nothing to do with char */
2246 EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
2248 void lustre_swab_lov_comp_md_v1(struct lov_comp_md_v1 *lum)
2250 struct lov_comp_md_entry_v1 *ent;
2251 struct lov_user_md_v1 *v1;
2252 struct lov_user_md_v3 *v3;
2256 __u16 ent_count, stripe_count;
2259 cpu_endian = lum->lcm_magic == LOV_USER_MAGIC_COMP_V1;
2260 ent_count = lum->lcm_entry_count;
2262 __swab16s(&ent_count);
2264 CDEBUG(D_IOCTL, "swabbing lov_user_comp_md v1\n");
2265 __swab32s(&lum->lcm_magic);
2266 __swab32s(&lum->lcm_size);
2267 __swab32s(&lum->lcm_layout_gen);
2268 __swab16s(&lum->lcm_flags);
2269 __swab16s(&lum->lcm_entry_count);
2270 __swab16s(&lum->lcm_mirror_count);
2271 CLASSERT(offsetof(typeof(*lum), lcm_padding1) != 0);
2272 CLASSERT(offsetof(typeof(*lum), lcm_padding2) != 0);
2274 for (i = 0; i < ent_count; i++) {
2275 ent = &lum->lcm_entries[i];
2276 off = ent->lcme_offset;
2277 size = ent->lcme_size;
2283 __swab32s(&ent->lcme_id);
2284 __swab32s(&ent->lcme_flags);
2285 __swab64s(&ent->lcme_extent.e_start);
2286 __swab64s(&ent->lcme_extent.e_end);
2287 __swab32s(&ent->lcme_offset);
2288 __swab32s(&ent->lcme_size);
2289 CLASSERT(offsetof(typeof(*ent), lcme_padding) != 0);
2291 v1 = (struct lov_user_md_v1 *)((char *)lum + off);
2292 stripe_count = v1->lmm_stripe_count;
2294 __swab16s(&stripe_count);
2296 if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V1) ||
2297 v1->lmm_magic == LOV_USER_MAGIC_V1) {
2298 lustre_swab_lov_user_md_v1(v1);
2299 if (size > sizeof(*v1))
2300 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2302 } else if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V3) ||
2303 v1->lmm_magic == LOV_USER_MAGIC_V3 ||
2304 v1->lmm_magic == __swab32(LOV_USER_MAGIC_SPECIFIC) ||
2305 v1->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2306 v3 = (struct lov_user_md_v3 *)v1;
2307 lustre_swab_lov_user_md_v3(v3);
2308 if (size > sizeof(*v3))
2309 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2312 CERROR("Invalid magic %#x\n", v1->lmm_magic);
2316 EXPORT_SYMBOL(lustre_swab_lov_comp_md_v1);
2318 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
2321 CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
2322 __swab32s(&lmm->lmm_magic);
2323 __swab32s(&lmm->lmm_pattern);
2324 lustre_swab_lmm_oi(&lmm->lmm_oi);
2325 __swab32s(&lmm->lmm_stripe_size);
2326 __swab16s(&lmm->lmm_stripe_count);
2327 __swab16s(&lmm->lmm_layout_gen);
2330 EXPORT_SYMBOL(lustre_swab_lov_mds_md);
2332 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
2337 for (i = 0; i < stripe_count; i++) {
2338 lustre_swab_ost_id(&(lod[i].l_ost_oi));
2339 __swab32s(&(lod[i].l_ost_gen));
2340 __swab32s(&(lod[i].l_ost_idx));
2344 EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
2346 void lustre_swab_ldlm_res_id (struct ldlm_res_id *id)
2350 for (i = 0; i < RES_NAME_SIZE; i++)
2351 __swab64s (&id->name[i]);
2354 void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
2356 /* the lock data is a union and the first two fields are always an
2357 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
2358 * data the same way. */
2359 __swab64s(&d->l_extent.start);
2360 __swab64s(&d->l_extent.end);
2361 __swab64s(&d->l_extent.gid);
2362 __swab64s(&d->l_flock.lfw_owner);
2363 __swab32s(&d->l_flock.lfw_pid);
2366 void lustre_swab_ldlm_intent (struct ldlm_intent *i)
2371 void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
2373 __swab32s(&r->lr_type);
2374 CLASSERT(offsetof(typeof(*r), lr_pad) != 0);
2375 lustre_swab_ldlm_res_id(&r->lr_name);
2378 void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l)
2380 lustre_swab_ldlm_resource_desc (&l->l_resource);
2381 __swab32s (&l->l_req_mode);
2382 __swab32s (&l->l_granted_mode);
2383 lustre_swab_ldlm_policy_data (&l->l_policy_data);
2386 void lustre_swab_ldlm_request (struct ldlm_request *rq)
2388 __swab32s (&rq->lock_flags);
2389 lustre_swab_ldlm_lock_desc (&rq->lock_desc);
2390 __swab32s (&rq->lock_count);
2391 /* lock_handle[] opaque */
2394 void lustre_swab_ldlm_reply (struct ldlm_reply *r)
2396 __swab32s (&r->lock_flags);
2397 CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
2398 lustre_swab_ldlm_lock_desc (&r->lock_desc);
2399 /* lock_handle opaque */
2400 __swab64s (&r->lock_policy_res1);
2401 __swab64s (&r->lock_policy_res2);
2404 void lustre_swab_quota_body(struct quota_body *b)
2406 lustre_swab_lu_fid(&b->qb_fid);
2407 lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
2408 __swab32s(&b->qb_flags);
2409 __swab64s(&b->qb_count);
2410 __swab64s(&b->qb_usage);
2411 __swab64s(&b->qb_slv_ver);
2414 /* Dump functions */
2415 void dump_ioo(struct obd_ioobj *ioo)
2418 "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
2419 "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
2423 void dump_rniobuf(struct niobuf_remote *nb)
2425 CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
2426 nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
2429 void dump_obdo(struct obdo *oa)
2431 u64 valid = oa->o_valid;
2433 CDEBUG(D_RPCTRACE, "obdo: o_valid = %#llx\n", valid);
2434 if (valid & OBD_MD_FLID)
2435 CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
2436 if (valid & OBD_MD_FLFID)
2437 CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
2439 if (valid & OBD_MD_FLSIZE)
2440 CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
2441 if (valid & OBD_MD_FLMTIME)
2442 CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
2443 if (valid & OBD_MD_FLATIME)
2444 CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
2445 if (valid & OBD_MD_FLCTIME)
2446 CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
2447 if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
2448 CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
2449 if (valid & OBD_MD_FLGRANT)
2450 CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
2451 if (valid & OBD_MD_FLBLKSZ)
2452 CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
2453 if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
2454 CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
2455 oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
2456 (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
2457 if (valid & OBD_MD_FLUID)
2458 CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
2459 if (valid & OBD_MD_FLUID)
2460 CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
2461 if (valid & OBD_MD_FLGID)
2462 CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
2463 if (valid & OBD_MD_FLGID)
2464 CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
2465 if (valid & OBD_MD_FLFLAGS)
2466 CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
2467 if (valid & OBD_MD_FLNLINK)
2468 CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
2469 else if (valid & OBD_MD_FLCKSUM)
2470 CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
2472 if (valid & OBD_MD_FLGENER)
2473 CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
2475 if (valid & OBD_MD_FLEPOCH)
2476 CDEBUG(D_RPCTRACE, "obdo: o_ioepoch = %lld\n",
2478 if (valid & OBD_MD_FLFID) {
2479 CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
2481 CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
2484 if (valid & OBD_MD_FLHANDLE)
2485 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
2486 oa->o_handle.cookie);
2489 void dump_ost_body(struct ost_body *ob)
2494 void dump_rcs(__u32 *rc)
2496 CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
2499 static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2501 LASSERT(req->rq_reqmsg);
2503 switch (req->rq_reqmsg->lm_magic) {
2504 case LUSTRE_MSG_MAGIC_V2:
2505 return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF);
2507 CERROR("bad lustre msg magic: %#08X\n",
2508 req->rq_reqmsg->lm_magic);
2513 static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2515 if (unlikely(!req->rq_repmsg))
2518 switch (req->rq_repmsg->lm_magic) {
2519 case LUSTRE_MSG_MAGIC_V2:
2520 return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF);
2522 /* uninitialized yet */
2527 void _debug_req(struct ptlrpc_request *req,
2528 struct libcfs_debug_msg_data *msgdata, const char *fmt, ...)
2530 bool req_ok = req->rq_reqmsg != NULL;
2531 bool rep_ok = false;
2532 lnet_nid_t nid = LNET_NID_ANY;
2535 int rep_status = -1;
2537 spin_lock(&req->rq_early_free_lock);
2541 if (ptlrpc_req_need_swab(req)) {
2542 req_ok = req_ok && req_ptlrpc_body_swabbed(req);
2543 rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
2547 rep_flags = lustre_msg_get_flags(req->rq_repmsg);
2548 rep_status = lustre_msg_get_status(req->rq_repmsg);
2550 spin_unlock(&req->rq_early_free_lock);
2552 if (req->rq_import && req->rq_import->imp_connection)
2553 nid = req->rq_import->imp_connection->c_peer.nid;
2554 else if (req->rq_export && req->rq_export->exp_connection)
2555 nid = req->rq_export->exp_connection->c_peer.nid;
2557 va_start(args, fmt);
2558 libcfs_debug_vmsg2(msgdata, fmt, args,
2559 " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
2560 req, req->rq_xid, req->rq_transno,
2561 req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
2562 req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
2564 req->rq_import->imp_obd->obd_name :
2566 req->rq_export->exp_client_uuid.uuid :
2568 libcfs_nid2str(nid),
2569 req->rq_request_portal, req->rq_reply_portal,
2570 req->rq_reqlen, req->rq_replen,
2571 req->rq_early_count, (s64)req->rq_timedout,
2572 (s64)req->rq_deadline,
2573 atomic_read(&req->rq_refcount),
2574 DEBUG_REQ_FLAGS(req),
2575 req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
2576 rep_flags, req->rq_status, rep_status);
2579 EXPORT_SYMBOL(_debug_req);
2581 void lustre_swab_lustre_capa(struct lustre_capa *c)
2583 lustre_swab_lu_fid(&c->lc_fid);
2584 __swab64s (&c->lc_opc);
2585 __swab64s (&c->lc_uid);
2586 __swab64s (&c->lc_gid);
2587 __swab32s (&c->lc_flags);
2588 __swab32s (&c->lc_keyid);
2589 __swab32s (&c->lc_timeout);
2590 __swab32s (&c->lc_expiry);
2593 void lustre_swab_lustre_capa_key(struct lustre_capa_key *k)
2595 __swab64s (&k->lk_seq);
2596 __swab32s (&k->lk_keyid);
2597 CLASSERT(offsetof(typeof(*k), lk_padding) != 0);
2600 void lustre_swab_hsm_user_state(struct hsm_user_state *state)
2602 __swab32s(&state->hus_states);
2603 __swab32s(&state->hus_archive_id);
2606 void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
2608 __swab32s(&hss->hss_valid);
2609 __swab64s(&hss->hss_setmask);
2610 __swab64s(&hss->hss_clearmask);
2611 __swab32s(&hss->hss_archive_id);
2614 static void lustre_swab_hsm_extent(struct hsm_extent *extent)
2616 __swab64s(&extent->offset);
2617 __swab64s(&extent->length);
2620 void lustre_swab_hsm_current_action(struct hsm_current_action *action)
2622 __swab32s(&action->hca_state);
2623 __swab32s(&action->hca_action);
2624 lustre_swab_hsm_extent(&action->hca_location);
2627 void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
2629 lustre_swab_lu_fid(&hui->hui_fid);
2630 lustre_swab_hsm_extent(&hui->hui_extent);
2633 void lustre_swab_lu_extent(struct lu_extent *le)
2635 __swab64s(&le->e_start);
2636 __swab64s(&le->e_end);
2639 void lustre_swab_layout_intent(struct layout_intent *li)
2641 __swab32s(&li->li_opc);
2642 __swab32s(&li->li_flags);
2643 lustre_swab_lu_extent(&li->li_extent);
2646 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
2648 lustre_swab_lu_fid(&hpk->hpk_fid);
2649 __swab64s(&hpk->hpk_cookie);
2650 __swab64s(&hpk->hpk_extent.offset);
2651 __swab64s(&hpk->hpk_extent.length);
2652 __swab16s(&hpk->hpk_flags);
2653 __swab16s(&hpk->hpk_errval);
2656 void lustre_swab_hsm_request(struct hsm_request *hr)
2658 __swab32s(&hr->hr_action);
2659 __swab32s(&hr->hr_archive_id);
2660 __swab64s(&hr->hr_flags);
2661 __swab32s(&hr->hr_itemcount);
2662 __swab32s(&hr->hr_data_len);
2665 void lustre_swab_object_update(struct object_update *ou)
2667 struct object_update_param *param;
2670 __swab16s(&ou->ou_type);
2671 __swab16s(&ou->ou_params_count);
2672 __swab32s(&ou->ou_result_size);
2673 __swab32s(&ou->ou_flags);
2674 __swab32s(&ou->ou_padding1);
2675 __swab64s(&ou->ou_batchid);
2676 lustre_swab_lu_fid(&ou->ou_fid);
2677 param = &ou->ou_params[0];
2678 for (i = 0; i < ou->ou_params_count; i++) {
2679 __swab16s(¶m->oup_len);
2680 __swab16s(¶m->oup_padding);
2681 __swab32s(¶m->oup_padding2);
2682 param = (struct object_update_param *)((char *)param +
2683 object_update_param_size(param));
2687 void lustre_swab_object_update_request(struct object_update_request *our)
2690 __swab32s(&our->ourq_magic);
2691 __swab16s(&our->ourq_count);
2692 __swab16s(&our->ourq_padding);
2693 for (i = 0; i < our->ourq_count; i++) {
2694 struct object_update *ou;
2696 ou = object_update_request_get(our, i, NULL);
2699 lustre_swab_object_update(ou);
2703 void lustre_swab_object_update_result(struct object_update_result *our)
2705 __swab32s(&our->our_rc);
2706 __swab16s(&our->our_datalen);
2707 __swab16s(&our->our_padding);
2710 void lustre_swab_object_update_reply(struct object_update_reply *our)
2714 __swab32s(&our->ourp_magic);
2715 __swab16s(&our->ourp_count);
2716 __swab16s(&our->ourp_padding);
2717 for (i = 0; i < our->ourp_count; i++) {
2718 struct object_update_result *ourp;
2720 __swab16s(&our->ourp_lens[i]);
2721 ourp = object_update_result_get(our, i, NULL);
2724 lustre_swab_object_update_result(ourp);
2728 void lustre_swab_out_update_header(struct out_update_header *ouh)
2730 __swab32s(&ouh->ouh_magic);
2731 __swab32s(&ouh->ouh_count);
2732 __swab32s(&ouh->ouh_inline_length);
2733 __swab32s(&ouh->ouh_reply_size);
2735 EXPORT_SYMBOL(lustre_swab_out_update_header);
2737 void lustre_swab_out_update_buffer(struct out_update_buffer *oub)
2739 __swab32s(&oub->oub_size);
2740 __swab32s(&oub->oub_padding);
2742 EXPORT_SYMBOL(lustre_swab_out_update_buffer);
2744 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
2746 __swab64s(&msl->msl_flags);
2749 void lustre_swab_close_data(struct close_data *cd)
2751 lustre_swab_lu_fid(&cd->cd_fid);
2752 __swab64s(&cd->cd_data_version);
2755 void lustre_swab_lfsck_request(struct lfsck_request *lr)
2757 __swab32s(&lr->lr_event);
2758 __swab32s(&lr->lr_index);
2759 __swab32s(&lr->lr_flags);
2760 __swab32s(&lr->lr_valid);
2761 __swab32s(&lr->lr_speed);
2762 __swab16s(&lr->lr_version);
2763 __swab16s(&lr->lr_active);
2764 __swab16s(&lr->lr_param);
2765 __swab16s(&lr->lr_async_windows);
2766 __swab32s(&lr->lr_flags);
2767 lustre_swab_lu_fid(&lr->lr_fid);
2768 lustre_swab_lu_fid(&lr->lr_fid2);
2769 __swab32s(&lr->lr_comp_id);
2770 CLASSERT(offsetof(typeof(*lr), lr_padding_0) != 0);
2771 CLASSERT(offsetof(typeof(*lr), lr_padding_1) != 0);
2772 CLASSERT(offsetof(typeof(*lr), lr_padding_2) != 0);
2773 CLASSERT(offsetof(typeof(*lr), lr_padding_3) != 0);
2776 void lustre_swab_lfsck_reply(struct lfsck_reply *lr)
2778 __swab32s(&lr->lr_status);
2779 CLASSERT(offsetof(typeof(*lr), lr_padding_1) != 0);
2780 __swab64s(&lr->lr_repaired);
2783 static void lustre_swab_orphan_rec(struct lu_orphan_rec *rec)
2785 lustre_swab_lu_fid(&rec->lor_fid);
2786 __swab32s(&rec->lor_uid);
2787 __swab32s(&rec->lor_gid);
2790 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent)
2792 lustre_swab_lu_fid(&ent->loe_key);
2793 lustre_swab_orphan_rec(&ent->loe_rec);
2795 EXPORT_SYMBOL(lustre_swab_orphan_ent);
2797 void lustre_swab_orphan_ent_v2(struct lu_orphan_ent_v2 *ent)
2799 lustre_swab_lu_fid(&ent->loe_key);
2800 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
2801 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
2802 CLASSERT(offsetof(typeof(ent->loe_rec), lor_padding) != 0);
2804 EXPORT_SYMBOL(lustre_swab_orphan_ent_v2);
2806 void lustre_swab_ladvise(struct lu_ladvise *ladvise)
2808 __swab16s(&ladvise->lla_advice);
2809 __swab16s(&ladvise->lla_value1);
2810 __swab32s(&ladvise->lla_value2);
2811 __swab64s(&ladvise->lla_start);
2812 __swab64s(&ladvise->lla_end);
2813 __swab32s(&ladvise->lla_value3);
2814 __swab32s(&ladvise->lla_value4);
2816 EXPORT_SYMBOL(lustre_swab_ladvise);
2818 void lustre_swab_ladvise_hdr(struct ladvise_hdr *ladvise_hdr)
2820 __swab32s(&ladvise_hdr->lah_magic);
2821 __swab32s(&ladvise_hdr->lah_count);
2822 __swab64s(&ladvise_hdr->lah_flags);
2823 __swab32s(&ladvise_hdr->lah_value1);
2824 __swab32s(&ladvise_hdr->lah_value2);
2825 __swab64s(&ladvise_hdr->lah_value3);
2827 EXPORT_SYMBOL(lustre_swab_ladvise_hdr);