4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/pack_generic.c
34 * (Un)packing of OST requests
36 * Author: Peter J. Braam <braam@clusterfs.com>
37 * Author: Phil Schwan <phil@clusterfs.com>
38 * Author: Eric Barton <eeb@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_RPC
43 #include <libcfs/libcfs.h>
45 #include <llog_swab.h>
46 #include <lustre_net.h>
47 #include <lustre_swab.h>
48 #include <obd_cksum.h>
49 #include <obd_class.h>
50 #include <obd_support.h>
51 #include <obj_update.h>
53 #include "ptlrpc_internal.h"
55 static inline __u32 lustre_msg_hdr_size_v2(__u32 count)
57 return cfs_size_round(offsetof(struct lustre_msg_v2,
61 __u32 lustre_msg_hdr_size(__u32 magic, __u32 count)
66 case LUSTRE_MSG_MAGIC_V2:
67 return lustre_msg_hdr_size_v2(count);
69 LASSERTF(0, "incorrect message magic: %08x\n", magic);
74 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
78 lustre_set_req_swabbed(req, index);
80 lustre_set_rep_swabbed(req, index);
83 bool ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
87 return (ptlrpc_req_need_swab(req) &&
88 !lustre_req_swabbed(req, index));
90 return (ptlrpc_rep_need_swab(req) && !lustre_rep_swabbed(req, index));
93 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
94 enum lustre_msg_version version)
96 enum lustre_msg_version ver = lustre_msg_get_version(msg);
98 return (ver & LUSTRE_VERSION_MASK) != version;
101 int lustre_msg_check_version(struct lustre_msg *msg,
102 enum lustre_msg_version version)
104 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
105 switch (msg->lm_magic) {
106 case LUSTRE_MSG_MAGIC_V1:
107 CERROR("msg v1 not supported - please upgrade you system\n");
109 case LUSTRE_MSG_MAGIC_V2:
110 return lustre_msg_check_version_v2(msg, version);
112 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
115 #undef LUSTRE_MSG_MAGIC_V1
118 /* early reply size */
119 __u32 lustre_msg_early_size()
121 __u32 pblen = sizeof(struct ptlrpc_body);
123 return lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
125 EXPORT_SYMBOL(lustre_msg_early_size);
127 __u32 lustre_msg_size_v2(int count, __u32 *lengths)
133 size = lustre_msg_hdr_size_v2(count);
134 for (i = 0; i < count; i++)
135 size += cfs_size_round(lengths[i]);
139 EXPORT_SYMBOL(lustre_msg_size_v2);
141 /* This returns the size of the buffer that is required to hold a lustre_msg
142 * with the given sub-buffer lengths.
143 * NOTE: this should only be used for NEW requests, and should always be
144 * in the form of a v2 request. If this is a connection to a v1
145 * target then the first buffer will be stripped because the ptlrpc
146 * data is part of the lustre_msg_v1 header. b=14043 */
147 __u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
149 __u32 size[] = { sizeof(struct ptlrpc_body) };
157 LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
160 case LUSTRE_MSG_MAGIC_V2:
161 return lustre_msg_size_v2(count, lens);
163 LASSERTF(0, "incorrect message magic: %08x\n", magic);
168 /* This is used to determine the size of a buffer that was already packed
169 * and will correctly handle the different message formats. */
170 __u32 lustre_packed_msg_size(struct lustre_msg *msg)
172 switch (msg->lm_magic) {
173 case LUSTRE_MSG_MAGIC_V2:
174 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
176 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
180 EXPORT_SYMBOL(lustre_packed_msg_size);
182 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
190 msg->lm_bufcount = count;
191 /* XXX: lm_secflvr uninitialized here */
192 msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
194 for (i = 0; i < count; i++)
195 msg->lm_buflens[i] = lens[i];
200 ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
201 for (i = 0; i < count; i++) {
205 memcpy(ptr, tmp, lens[i]);
206 ptr += cfs_size_round(lens[i]);
209 EXPORT_SYMBOL(lustre_init_msg_v2);
211 static int lustre_pack_request_v2(struct ptlrpc_request *req,
212 int count, __u32 *lens, char **bufs)
216 reqlen = lustre_msg_size_v2(count, lens);
218 rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
222 req->rq_reqlen = reqlen;
224 lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
225 lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
229 int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
230 __u32 *lens, char **bufs)
232 __u32 size[] = { sizeof(struct ptlrpc_body) };
240 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
242 /* only use new format, we don't need to be compatible with 1.4 */
243 magic = LUSTRE_MSG_MAGIC_V2;
246 case LUSTRE_MSG_MAGIC_V2:
247 return lustre_pack_request_v2(req, count, lens, bufs);
249 LASSERTF(0, "incorrect message magic: %08x\n", magic);
255 struct list_head ptlrpc_rs_debug_lru =
256 LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
257 spinlock_t ptlrpc_rs_debug_lock;
259 #define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
261 spin_lock(&ptlrpc_rs_debug_lock); \
262 list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
263 spin_unlock(&ptlrpc_rs_debug_lock); \
266 #define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
268 spin_lock(&ptlrpc_rs_debug_lock); \
269 list_del(&(rs)->rs_debug_list); \
270 spin_unlock(&ptlrpc_rs_debug_lock); \
273 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
274 # define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
277 struct ptlrpc_reply_state *
278 lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
280 struct ptlrpc_reply_state *rs = NULL;
282 spin_lock(&svcpt->scp_rep_lock);
284 /* See if we have anything in a pool, and wait if nothing */
285 while (list_empty(&svcpt->scp_rep_idle)) {
286 struct l_wait_info lwi;
289 spin_unlock(&svcpt->scp_rep_lock);
290 /* If we cannot get anything for some long time, we better
291 * bail out instead of waiting infinitely */
292 lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
293 rc = l_wait_event(svcpt->scp_rep_waitq,
294 !list_empty(&svcpt->scp_rep_idle), &lwi);
297 spin_lock(&svcpt->scp_rep_lock);
300 rs = list_entry(svcpt->scp_rep_idle.next,
301 struct ptlrpc_reply_state, rs_list);
302 list_del(&rs->rs_list);
304 spin_unlock(&svcpt->scp_rep_lock);
306 memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
307 rs->rs_size = svcpt->scp_service->srv_max_reply_size;
308 rs->rs_svcpt = svcpt;
314 void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
316 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
318 spin_lock(&svcpt->scp_rep_lock);
319 list_add(&rs->rs_list, &svcpt->scp_rep_idle);
320 spin_unlock(&svcpt->scp_rep_lock);
321 wake_up(&svcpt->scp_rep_waitq);
324 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
325 __u32 *lens, char **bufs, int flags)
327 struct ptlrpc_reply_state *rs;
331 LASSERT(req->rq_reply_state == NULL);
334 if ((flags & LPRFL_EARLY_REPLY) == 0) {
335 spin_lock(&req->rq_lock);
336 req->rq_packed_final = 1;
337 spin_unlock(&req->rq_lock);
340 msg_len = lustre_msg_size_v2(count, lens);
341 rc = sptlrpc_svc_alloc_rs(req, msg_len);
345 rs = req->rq_reply_state;
346 atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
347 rs->rs_cb_id.cbid_fn = reply_out_callback;
348 rs->rs_cb_id.cbid_arg = rs;
349 rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
350 INIT_LIST_HEAD(&rs->rs_exp_list);
351 INIT_LIST_HEAD(&rs->rs_obd_list);
352 INIT_LIST_HEAD(&rs->rs_list);
353 spin_lock_init(&rs->rs_lock);
355 req->rq_replen = msg_len;
356 req->rq_reply_state = rs;
357 req->rq_repmsg = rs->rs_msg;
359 lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
360 lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
362 PTLRPC_RS_DEBUG_LRU_ADD(rs);
366 EXPORT_SYMBOL(lustre_pack_reply_v2);
368 int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
369 char **bufs, int flags)
372 __u32 size[] = { sizeof(struct ptlrpc_body) };
380 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
382 switch (req->rq_reqmsg->lm_magic) {
383 case LUSTRE_MSG_MAGIC_V2:
384 rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
387 LASSERTF(0, "incorrect message magic: %08x\n",
388 req->rq_reqmsg->lm_magic);
392 CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
393 lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
397 int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
400 return lustre_pack_reply_flags(req, count, lens, bufs, 0);
402 EXPORT_SYMBOL(lustre_pack_reply);
404 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size)
406 __u32 i, offset, buflen, bufcount;
409 LASSERT(m->lm_bufcount > 0);
411 bufcount = m->lm_bufcount;
412 if (unlikely(n >= bufcount)) {
413 CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
418 buflen = m->lm_buflens[n];
419 if (unlikely(buflen < min_size)) {
420 CERROR("msg %p buffer[%d] size %d too small "
421 "(required %d, opc=%d)\n", m, n, buflen, min_size,
422 n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
426 offset = lustre_msg_hdr_size_v2(bufcount);
427 for (i = 0; i < n; i++)
428 offset += cfs_size_round(m->lm_buflens[i]);
430 return (char *)m + offset;
433 void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 min_size)
435 switch (m->lm_magic) {
436 case LUSTRE_MSG_MAGIC_V2:
437 return lustre_msg_buf_v2(m, n, min_size);
439 LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n",
444 EXPORT_SYMBOL(lustre_msg_buf);
446 static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
447 unsigned int newlen, int move_data)
449 char *tail = NULL, *newpos;
453 LASSERT(msg->lm_bufcount > segment);
454 LASSERT(msg->lm_buflens[segment] >= newlen);
456 if (msg->lm_buflens[segment] == newlen)
459 if (move_data && msg->lm_bufcount > segment + 1) {
460 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
461 for (n = segment + 1; n < msg->lm_bufcount; n++)
462 tail_len += cfs_size_round(msg->lm_buflens[n]);
465 msg->lm_buflens[segment] = newlen;
467 if (tail && tail_len) {
468 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
469 LASSERT(newpos <= tail);
471 memmove(newpos, tail, tail_len);
474 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
478 * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
479 * we also move data forward from @segment + 1.
481 * if @newlen == 0, we remove the segment completely, but we still keep the
482 * totally bufcount the same to save possible data moving. this will leave a
483 * unused segment with size 0 at the tail, but that's ok.
485 * return new msg size after shrinking.
488 * + if any buffers higher than @segment has been filled in, must call shrink
489 * with non-zero @move_data.
490 * + caller should NOT keep pointers to msg buffers which higher than @segment
493 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
494 unsigned int newlen, int move_data)
496 switch (msg->lm_magic) {
497 case LUSTRE_MSG_MAGIC_V2:
498 return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
500 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
503 EXPORT_SYMBOL(lustre_shrink_msg);
505 void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
507 PTLRPC_RS_DEBUG_LRU_DEL(rs);
509 LASSERT(atomic_read(&rs->rs_refcount) == 0);
510 LASSERT(!rs->rs_difficult || rs->rs_handled);
511 LASSERT(!rs->rs_on_net);
512 LASSERT(!rs->rs_scheduled);
513 LASSERT(rs->rs_export == NULL);
514 LASSERT(rs->rs_nlocks == 0);
515 LASSERT(list_empty(&rs->rs_exp_list));
516 LASSERT(list_empty(&rs->rs_obd_list));
518 sptlrpc_svc_free_rs(rs);
521 static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
523 int swabbed, required_len, i, buflen;
525 /* Now we know the sender speaks my language. */
526 required_len = lustre_msg_hdr_size_v2(0);
527 if (len < required_len) {
528 /* can't even look inside the message */
529 CERROR("message length %d too small for lustre_msg\n", len);
533 swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
536 __swab32s(&m->lm_magic);
537 __swab32s(&m->lm_bufcount);
538 __swab32s(&m->lm_secflvr);
539 __swab32s(&m->lm_repsize);
540 __swab32s(&m->lm_cksum);
541 __swab32s(&m->lm_flags);
542 CLASSERT(offsetof(typeof(*m), lm_padding_2) != 0);
543 CLASSERT(offsetof(typeof(*m), lm_padding_3) != 0);
546 if (m->lm_bufcount == 0 || m->lm_bufcount > PTLRPC_MAX_BUFCOUNT) {
547 CERROR("message bufcount %d is not valid\n", m->lm_bufcount);
550 required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
551 if (len < required_len) {
552 /* didn't receive all the buffer lengths */
553 CERROR("message length %d too small for %d buflens\n",
554 len, m->lm_bufcount);
558 for (i = 0; i < m->lm_bufcount; i++) {
560 __swab32s(&m->lm_buflens[i]);
561 buflen = cfs_size_round(m->lm_buflens[i]);
562 if (buflen < 0 || buflen > PTLRPC_MAX_BUFLEN) {
563 CERROR("buffer %d length %d is not valid\n", i, buflen);
566 required_len += buflen;
568 if (len < required_len || required_len > PTLRPC_MAX_BUFLEN) {
569 CERROR("len: %d, required_len %d, bufcount: %d\n",
570 len, required_len, m->lm_bufcount);
571 for (i = 0; i < m->lm_bufcount; i++)
572 CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
579 int __lustre_unpack_msg(struct lustre_msg *m, int len)
581 int required_len, rc;
584 /* We can provide a slightly better error log, if we check the
585 * message magic and version first. In the future, struct
586 * lustre_msg may grow, and we'd like to log a version mismatch,
587 * rather than a short message.
590 required_len = offsetof(struct lustre_msg, lm_magic) +
592 if (len < required_len) {
593 /* can't even look inside the message */
594 CERROR("message length %d too small for magic/version check\n",
599 rc = lustre_unpack_msg_v2(m, len);
603 EXPORT_SYMBOL(__lustre_unpack_msg);
605 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
608 rc = __lustre_unpack_msg(req->rq_reqmsg, len);
610 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
616 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
619 rc = __lustre_unpack_msg(req->rq_repmsg, len);
621 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
627 static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
628 const int inout, int offset)
630 struct ptlrpc_body *pb;
631 struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg;
633 pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
635 CERROR("error unpacking ptlrpc body\n");
638 if (ptlrpc_buf_need_swab(req, inout, offset)) {
639 lustre_swab_ptlrpc_body(pb);
640 ptlrpc_buf_set_swabbed(req, inout, offset);
643 if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
644 CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
649 pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
654 int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
656 switch (req->rq_reqmsg->lm_magic) {
657 case LUSTRE_MSG_MAGIC_V2:
658 return lustre_unpack_ptlrpc_body_v2(req, 1, offset);
660 CERROR("bad lustre msg magic: %08x\n",
661 req->rq_reqmsg->lm_magic);
666 int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
668 switch (req->rq_repmsg->lm_magic) {
669 case LUSTRE_MSG_MAGIC_V2:
670 return lustre_unpack_ptlrpc_body_v2(req, 0, offset);
672 CERROR("bad lustre msg magic: %08x\n",
673 req->rq_repmsg->lm_magic);
678 static inline __u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, __u32 n)
680 if (n >= m->lm_bufcount)
683 return m->lm_buflens[n];
687 * lustre_msg_buflen - return the length of buffer \a n in message \a m
688 * \param m lustre_msg (request or reply) to look at
689 * \param n message index (base 0)
691 * returns zero for non-existent message indices
693 __u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n)
695 switch (m->lm_magic) {
696 case LUSTRE_MSG_MAGIC_V2:
697 return lustre_msg_buflen_v2(m, n);
699 CERROR("incorrect message magic: %08x\n", m->lm_magic);
703 EXPORT_SYMBOL(lustre_msg_buflen);
706 lustre_msg_set_buflen_v2(struct lustre_msg_v2 *m, __u32 n, __u32 len)
708 if (n >= m->lm_bufcount)
711 m->lm_buflens[n] = len;
714 void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len)
716 switch (m->lm_magic) {
717 case LUSTRE_MSG_MAGIC_V2:
718 lustre_msg_set_buflen_v2(m, n, len);
721 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
725 /* NB return the bufcount for lustre_msg_v2 format, so if message is packed
726 * in V1 format, the result is one bigger. (add struct ptlrpc_body). */
727 __u32 lustre_msg_bufcount(struct lustre_msg *m)
729 switch (m->lm_magic) {
730 case LUSTRE_MSG_MAGIC_V2:
731 return m->lm_bufcount;
733 CERROR("incorrect message magic: %08x\n", m->lm_magic);
738 char *lustre_msg_string(struct lustre_msg *m, __u32 index, __u32 max_len)
740 /* max_len == 0 means the string should fill the buffer */
744 switch (m->lm_magic) {
745 case LUSTRE_MSG_MAGIC_V2:
746 str = lustre_msg_buf_v2(m, index, 0);
747 blen = lustre_msg_buflen_v2(m, index);
750 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
754 CERROR ("can't unpack string in msg %p buffer[%d]\n", m, index);
758 slen = strnlen(str, blen);
760 if (slen == blen) { /* not NULL terminated */
761 CERROR("can't unpack non-NULL terminated string in "
762 "msg %p buffer[%d] len %d\n", m, index, blen);
765 if (blen > PTLRPC_MAX_BUFLEN) {
766 CERROR("buffer length of msg %p buffer[%d] is invalid(%d)\n",
772 if (slen != blen - 1) {
773 CERROR("can't unpack short string in msg %p "
774 "buffer[%d] len %d: strlen %d\n",
775 m, index, blen, slen);
778 } else if (slen > max_len) {
779 CERROR("can't unpack oversized string in msg %p "
780 "buffer[%d] len %d strlen %d: max %d expected\n",
781 m, index, blen, slen, max_len);
788 /* Wrap up the normal fixed length cases */
789 static inline void *__lustre_swab_buf(struct lustre_msg *msg, __u32 index,
790 __u32 min_size, void *swabber)
794 LASSERT(msg != NULL);
795 switch (msg->lm_magic) {
796 case LUSTRE_MSG_MAGIC_V2:
797 ptr = lustre_msg_buf_v2(msg, index, min_size);
800 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
803 if (ptr != NULL && swabber != NULL)
804 ((void (*)(void *))swabber)(ptr);
809 static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
811 return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
812 sizeof(struct ptlrpc_body_v2));
815 enum lustre_msghdr lustre_msghdr_get_flags(struct lustre_msg *msg)
817 switch (msg->lm_magic) {
818 case LUSTRE_MSG_MAGIC_V2:
819 /* already in host endian */
820 return msg->lm_flags;
822 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
826 EXPORT_SYMBOL(lustre_msghdr_get_flags);
828 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
830 switch (msg->lm_magic) {
831 case LUSTRE_MSG_MAGIC_V2:
832 msg->lm_flags = flags;
835 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
839 __u32 lustre_msg_get_flags(struct lustre_msg *msg)
841 switch (msg->lm_magic) {
842 case LUSTRE_MSG_MAGIC_V2: {
843 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
847 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
851 /* flags might be printed in debug code while message
856 EXPORT_SYMBOL(lustre_msg_get_flags);
858 void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags)
860 switch (msg->lm_magic) {
861 case LUSTRE_MSG_MAGIC_V2: {
862 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
863 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
864 pb->pb_flags |= flags;
868 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
871 EXPORT_SYMBOL(lustre_msg_add_flags);
873 void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags)
875 switch (msg->lm_magic) {
876 case LUSTRE_MSG_MAGIC_V2: {
877 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
878 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
879 pb->pb_flags = flags;
883 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
887 void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags)
889 switch (msg->lm_magic) {
890 case LUSTRE_MSG_MAGIC_V2: {
891 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
892 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
893 pb->pb_flags &= ~flags;
898 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
901 EXPORT_SYMBOL(lustre_msg_clear_flags);
903 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
905 switch (msg->lm_magic) {
906 case LUSTRE_MSG_MAGIC_V2: {
907 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
909 return pb->pb_op_flags;
911 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
919 void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags)
921 switch (msg->lm_magic) {
922 case LUSTRE_MSG_MAGIC_V2: {
923 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
924 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
925 pb->pb_op_flags |= flags;
929 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
932 EXPORT_SYMBOL(lustre_msg_add_op_flags);
934 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
936 switch (msg->lm_magic) {
937 case LUSTRE_MSG_MAGIC_V2: {
938 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
940 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
943 return &pb->pb_handle;
946 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
951 __u32 lustre_msg_get_type(struct lustre_msg *msg)
953 switch (msg->lm_magic) {
954 case LUSTRE_MSG_MAGIC_V2: {
955 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
957 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
958 return PTL_RPC_MSG_ERR;
963 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
964 return PTL_RPC_MSG_ERR;
967 EXPORT_SYMBOL(lustre_msg_get_type);
969 enum lustre_msg_version lustre_msg_get_version(struct lustre_msg *msg)
971 switch (msg->lm_magic) {
972 case LUSTRE_MSG_MAGIC_V2: {
973 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
975 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
978 return pb->pb_version;
981 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
986 void lustre_msg_add_version(struct lustre_msg *msg, __u32 version)
988 switch (msg->lm_magic) {
989 case LUSTRE_MSG_MAGIC_V2: {
990 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
991 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
992 pb->pb_version |= version;
996 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1000 __u32 lustre_msg_get_opc(struct lustre_msg *msg)
1002 switch (msg->lm_magic) {
1003 case LUSTRE_MSG_MAGIC_V2: {
1004 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1006 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1012 CERROR("incorrect message magic: %08x (msg:%p)\n",
1013 msg->lm_magic, msg);
1017 EXPORT_SYMBOL(lustre_msg_get_opc);
1019 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg)
1021 switch (msg->lm_magic) {
1022 case LUSTRE_MSG_MAGIC_V2: {
1023 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1025 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1028 return pb->pb_last_xid;
1031 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1035 EXPORT_SYMBOL(lustre_msg_get_last_xid);
1037 __u16 lustre_msg_get_tag(struct lustre_msg *msg)
1039 switch (msg->lm_magic) {
1040 case LUSTRE_MSG_MAGIC_V2: {
1041 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1043 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1049 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1053 EXPORT_SYMBOL(lustre_msg_get_tag);
1055 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
1057 switch (msg->lm_magic) {
1058 case LUSTRE_MSG_MAGIC_V2: {
1059 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1061 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1064 return pb->pb_last_committed;
1067 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1071 EXPORT_SYMBOL(lustre_msg_get_last_committed);
1073 __u64 *lustre_msg_get_versions(struct lustre_msg *msg)
1075 switch (msg->lm_magic) {
1076 case LUSTRE_MSG_MAGIC_V2: {
1077 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1079 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1082 return pb->pb_pre_versions;
1085 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1089 EXPORT_SYMBOL(lustre_msg_get_versions);
1091 __u64 lustre_msg_get_transno(struct lustre_msg *msg)
1093 switch (msg->lm_magic) {
1094 case LUSTRE_MSG_MAGIC_V2: {
1095 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1097 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1100 return pb->pb_transno;
1103 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1107 EXPORT_SYMBOL(lustre_msg_get_transno);
1109 int lustre_msg_get_status(struct lustre_msg *msg)
1111 switch (msg->lm_magic) {
1112 case LUSTRE_MSG_MAGIC_V2: {
1113 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1115 return pb->pb_status;
1116 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1120 /* status might be printed in debug code while message
1125 EXPORT_SYMBOL(lustre_msg_get_status);
1127 __u64 lustre_msg_get_slv(struct lustre_msg *msg)
1129 switch (msg->lm_magic) {
1130 case LUSTRE_MSG_MAGIC_V2: {
1131 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1133 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1139 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1145 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
1147 switch (msg->lm_magic) {
1148 case LUSTRE_MSG_MAGIC_V2: {
1149 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1151 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1158 CERROR("invalid msg magic %x\n", msg->lm_magic);
1163 __u32 lustre_msg_get_limit(struct lustre_msg *msg)
1165 switch (msg->lm_magic) {
1166 case LUSTRE_MSG_MAGIC_V2: {
1167 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1169 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1172 return pb->pb_limit;
1175 CERROR("invalid msg magic %x\n", msg->lm_magic);
1181 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
1183 switch (msg->lm_magic) {
1184 case LUSTRE_MSG_MAGIC_V2: {
1185 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1187 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1190 pb->pb_limit = limit;
1194 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1199 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
1201 switch (msg->lm_magic) {
1202 case LUSTRE_MSG_MAGIC_V2: {
1203 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1205 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1208 return pb->pb_conn_cnt;
1211 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1215 EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
1217 __u32 lustre_msg_get_magic(struct lustre_msg *msg)
1219 switch (msg->lm_magic) {
1220 case LUSTRE_MSG_MAGIC_V2:
1221 return msg->lm_magic;
1223 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1228 __u32 lustre_msg_get_timeout(struct lustre_msg *msg)
1230 switch (msg->lm_magic) {
1231 case LUSTRE_MSG_MAGIC_V2: {
1232 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1234 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1237 return pb->pb_timeout;
1240 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1245 __u32 lustre_msg_get_service_time(struct lustre_msg *msg)
1247 switch (msg->lm_magic) {
1248 case LUSTRE_MSG_MAGIC_V2: {
1249 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1251 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1254 return pb->pb_service_time;
1257 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1262 char *lustre_msg_get_jobid(struct lustre_msg *msg)
1264 switch (msg->lm_magic) {
1265 case LUSTRE_MSG_MAGIC_V2: {
1266 struct ptlrpc_body *pb;
1268 /* the old pltrpc_body_v2 is smaller; doesn't include jobid */
1269 if (msg->lm_buflens[MSG_PTLRPC_BODY_OFF] <
1270 sizeof(struct ptlrpc_body))
1273 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1274 sizeof(struct ptlrpc_body));
1278 return pb->pb_jobid;
1281 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1285 EXPORT_SYMBOL(lustre_msg_get_jobid);
1287 __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
1289 switch (msg->lm_magic) {
1290 case LUSTRE_MSG_MAGIC_V2:
1291 return msg->lm_cksum;
1293 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1298 __u64 lustre_msg_get_mbits(struct lustre_msg *msg)
1300 switch (msg->lm_magic) {
1301 case LUSTRE_MSG_MAGIC_V2: {
1302 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1304 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1307 return pb->pb_mbits;
1310 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1315 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
1317 switch (msg->lm_magic) {
1318 case LUSTRE_MSG_MAGIC_V2: {
1319 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1320 __u32 len = lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF);
1322 unsigned int hsize = 4;
1325 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1326 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
1327 len, NULL, 0, (unsigned char *)&crc,
1332 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1337 void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
1339 switch (msg->lm_magic) {
1340 case LUSTRE_MSG_MAGIC_V2: {
1341 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1342 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1343 pb->pb_handle = *handle;
1347 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1351 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
1353 switch (msg->lm_magic) {
1354 case LUSTRE_MSG_MAGIC_V2: {
1355 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1356 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1361 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1365 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
1367 switch (msg->lm_magic) {
1368 case LUSTRE_MSG_MAGIC_V2: {
1369 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1370 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1375 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1379 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid)
1381 switch (msg->lm_magic) {
1382 case LUSTRE_MSG_MAGIC_V2: {
1383 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1384 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1385 pb->pb_last_xid = last_xid;
1389 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1392 EXPORT_SYMBOL(lustre_msg_set_last_xid);
1394 void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
1396 switch (msg->lm_magic) {
1397 case LUSTRE_MSG_MAGIC_V2: {
1398 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1399 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1404 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1407 EXPORT_SYMBOL(lustre_msg_set_tag);
1409 void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed)
1411 switch (msg->lm_magic) {
1412 case LUSTRE_MSG_MAGIC_V2: {
1413 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1414 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1415 pb->pb_last_committed = last_committed;
1419 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1423 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
1425 switch (msg->lm_magic) {
1426 case LUSTRE_MSG_MAGIC_V2: {
1427 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1428 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1429 pb->pb_pre_versions[0] = versions[0];
1430 pb->pb_pre_versions[1] = versions[1];
1431 pb->pb_pre_versions[2] = versions[2];
1432 pb->pb_pre_versions[3] = versions[3];
1436 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1439 EXPORT_SYMBOL(lustre_msg_set_versions);
1441 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
1443 switch (msg->lm_magic) {
1444 case LUSTRE_MSG_MAGIC_V2: {
1445 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1446 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1447 pb->pb_transno = transno;
1451 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1454 EXPORT_SYMBOL(lustre_msg_set_transno);
1456 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
1458 switch (msg->lm_magic) {
1459 case LUSTRE_MSG_MAGIC_V2: {
1460 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1461 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1462 pb->pb_status = status;
1466 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1469 EXPORT_SYMBOL(lustre_msg_set_status);
1471 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
1473 switch (msg->lm_magic) {
1474 case LUSTRE_MSG_MAGIC_V2: {
1475 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1476 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1477 pb->pb_conn_cnt = conn_cnt;
1481 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1485 void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout)
1487 switch (msg->lm_magic) {
1488 case LUSTRE_MSG_MAGIC_V2: {
1489 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1490 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1491 pb->pb_timeout = timeout;
1495 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1499 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time)
1501 switch (msg->lm_magic) {
1502 case LUSTRE_MSG_MAGIC_V2: {
1503 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1504 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1505 pb->pb_service_time = service_time;
1509 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1513 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
1515 switch (msg->lm_magic) {
1516 case LUSTRE_MSG_MAGIC_V2: {
1517 __u32 opc = lustre_msg_get_opc(msg);
1518 struct ptlrpc_body *pb;
1520 /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
1521 * See the comment in ptlrpc_request_pack(). */
1522 if (!opc || opc == LDLM_BL_CALLBACK ||
1523 opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
1526 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1527 sizeof(struct ptlrpc_body));
1528 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1531 memcpy(pb->pb_jobid, jobid, sizeof(pb->pb_jobid));
1532 else if (pb->pb_jobid[0] == '\0')
1533 lustre_get_jobid(pb->pb_jobid, sizeof(pb->pb_jobid));
1537 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1540 EXPORT_SYMBOL(lustre_msg_set_jobid);
1542 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
1544 switch (msg->lm_magic) {
1545 case LUSTRE_MSG_MAGIC_V2:
1546 msg->lm_cksum = cksum;
1549 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1553 void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
1555 switch (msg->lm_magic) {
1556 case LUSTRE_MSG_MAGIC_V2: {
1557 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1559 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1560 pb->pb_mbits = mbits;
1564 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1568 void ptlrpc_request_set_replen(struct ptlrpc_request *req)
1570 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
1572 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
1573 req->rq_pill.rc_area[RCL_SERVER]);
1574 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1575 req->rq_reqmsg->lm_repsize = req->rq_replen;
1577 EXPORT_SYMBOL(ptlrpc_request_set_replen);
1579 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *lens)
1581 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens);
1582 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1583 req->rq_reqmsg->lm_repsize = req->rq_replen;
1587 * Send a remote set_info_async.
1589 * This may go from client to server or server to client.
1591 int do_set_info_async(struct obd_import *imp,
1592 int opcode, int version,
1593 size_t keylen, void *key,
1594 size_t vallen, void *val,
1595 struct ptlrpc_request_set *set)
1597 struct ptlrpc_request *req;
1602 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
1606 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
1607 RCL_CLIENT, keylen);
1608 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
1609 RCL_CLIENT, vallen);
1610 rc = ptlrpc_request_pack(req, version, opcode);
1612 ptlrpc_request_free(req);
1616 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1617 memcpy(tmp, key, keylen);
1618 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1619 memcpy(tmp, val, vallen);
1621 ptlrpc_request_set_replen(req);
1624 ptlrpc_set_add_req(set, req);
1625 ptlrpc_check_set(NULL, set);
1627 rc = ptlrpc_queue_wait(req);
1628 ptlrpc_req_finished(req);
1633 EXPORT_SYMBOL(do_set_info_async);
1635 /* byte flipping routines for all wire types declared in
1636 * lustre_idl.h implemented here.
1638 void lustre_swab_ptlrpc_body(struct ptlrpc_body *body)
1640 __swab32s(&body->pb_type);
1641 __swab32s(&body->pb_version);
1642 __swab32s(&body->pb_opc);
1643 __swab32s(&body->pb_status);
1644 __swab64s(&body->pb_last_xid);
1645 __swab16s(&body->pb_tag);
1646 CLASSERT(offsetof(typeof(*body), pb_padding0) != 0);
1647 CLASSERT(offsetof(typeof(*body), pb_padding1) != 0);
1648 __swab64s(&body->pb_last_committed);
1649 __swab64s(&body->pb_transno);
1650 __swab32s(&body->pb_flags);
1651 __swab32s(&body->pb_op_flags);
1652 __swab32s(&body->pb_conn_cnt);
1653 __swab32s(&body->pb_timeout);
1654 __swab32s(&body->pb_service_time);
1655 __swab32s(&body->pb_limit);
1656 __swab64s(&body->pb_slv);
1657 __swab64s(&body->pb_pre_versions[0]);
1658 __swab64s(&body->pb_pre_versions[1]);
1659 __swab64s(&body->pb_pre_versions[2]);
1660 __swab64s(&body->pb_pre_versions[3]);
1661 __swab64s(&body->pb_mbits);
1662 CLASSERT(offsetof(typeof(*body), pb_padding64_0) != 0);
1663 CLASSERT(offsetof(typeof(*body), pb_padding64_1) != 0);
1664 CLASSERT(offsetof(typeof(*body), pb_padding64_2) != 0);
1665 /* While we need to maintain compatibility between
1666 * clients and servers without ptlrpc_body_v2 (< 2.3)
1667 * do not swab any fields beyond pb_jobid, as we are
1668 * using this swab function for both ptlrpc_body
1669 * and ptlrpc_body_v2. */
1670 /* pb_jobid is an ASCII string and should not be swabbed */
1671 CLASSERT(offsetof(typeof(*body), pb_jobid) != 0);
1674 void lustre_swab_connect(struct obd_connect_data *ocd)
1676 __swab64s(&ocd->ocd_connect_flags);
1677 __swab32s(&ocd->ocd_version);
1678 __swab32s(&ocd->ocd_grant);
1679 __swab64s(&ocd->ocd_ibits_known);
1680 __swab32s(&ocd->ocd_index);
1681 __swab32s(&ocd->ocd_brw_size);
1682 /* ocd_blocksize and ocd_inodespace don't need to be swabbed because
1683 * they are 8-byte values */
1684 __swab16s(&ocd->ocd_grant_tax_kb);
1685 __swab32s(&ocd->ocd_grant_max_blks);
1686 __swab64s(&ocd->ocd_transno);
1687 __swab32s(&ocd->ocd_group);
1688 __swab32s(&ocd->ocd_cksum_types);
1689 __swab32s(&ocd->ocd_instance);
1690 /* Fields after ocd_cksum_types are only accessible by the receiver
1691 * if the corresponding flag in ocd_connect_flags is set. Accessing
1692 * any field after ocd_maxbytes on the receiver without a valid flag
1693 * may result in out-of-bound memory access and kernel oops. */
1694 if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
1695 __swab32s(&ocd->ocd_max_easize);
1696 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
1697 __swab64s(&ocd->ocd_maxbytes);
1698 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1699 __swab16s(&ocd->ocd_maxmodrpcs);
1700 CLASSERT(offsetof(typeof(*ocd), padding0) != 0);
1701 CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
1702 if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
1703 __swab64s(&ocd->ocd_connect_flags2);
1704 CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
1705 CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
1706 CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
1707 CLASSERT(offsetof(typeof(*ocd), padding6) != 0);
1708 CLASSERT(offsetof(typeof(*ocd), padding7) != 0);
1709 CLASSERT(offsetof(typeof(*ocd), padding8) != 0);
1710 CLASSERT(offsetof(typeof(*ocd), padding9) != 0);
1711 CLASSERT(offsetof(typeof(*ocd), paddingA) != 0);
1712 CLASSERT(offsetof(typeof(*ocd), paddingB) != 0);
1713 CLASSERT(offsetof(typeof(*ocd), paddingC) != 0);
1714 CLASSERT(offsetof(typeof(*ocd), paddingD) != 0);
1715 CLASSERT(offsetof(typeof(*ocd), paddingE) != 0);
1716 CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
1719 static void lustre_swab_ost_layout(struct ost_layout *ol)
1721 __swab32s(&ol->ol_stripe_size);
1722 __swab32s(&ol->ol_stripe_count);
1723 __swab64s(&ol->ol_comp_start);
1724 __swab64s(&ol->ol_comp_end);
1725 __swab32s(&ol->ol_comp_id);
1728 void lustre_swab_obdo (struct obdo *o)
1730 __swab64s(&o->o_valid);
1731 lustre_swab_ost_id(&o->o_oi);
1732 __swab64s(&o->o_parent_seq);
1733 __swab64s(&o->o_size);
1734 __swab64s(&o->o_mtime);
1735 __swab64s(&o->o_atime);
1736 __swab64s(&o->o_ctime);
1737 __swab64s(&o->o_blocks);
1738 __swab64s(&o->o_grant);
1739 __swab32s(&o->o_blksize);
1740 __swab32s(&o->o_mode);
1741 __swab32s(&o->o_uid);
1742 __swab32s(&o->o_gid);
1743 __swab32s(&o->o_flags);
1744 __swab32s(&o->o_nlink);
1745 __swab32s(&o->o_parent_oid);
1746 __swab32s(&o->o_misc);
1747 __swab64s(&o->o_ioepoch);
1748 __swab32s(&o->o_stripe_idx);
1749 __swab32s(&o->o_parent_ver);
1750 lustre_swab_ost_layout(&o->o_layout);
1751 __swab32s(&o->o_layout_version);
1752 __swab32s(&o->o_uid_h);
1753 __swab32s(&o->o_gid_h);
1754 __swab64s(&o->o_data_version);
1755 __swab32s(&o->o_projid);
1756 CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
1757 CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
1758 CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
1761 EXPORT_SYMBOL(lustre_swab_obdo);
1763 void lustre_swab_obd_statfs (struct obd_statfs *os)
1765 __swab64s(&os->os_type);
1766 __swab64s(&os->os_blocks);
1767 __swab64s(&os->os_bfree);
1768 __swab64s(&os->os_bavail);
1769 __swab64s(&os->os_files);
1770 __swab64s(&os->os_ffree);
1771 /* no need to swab os_fsid */
1772 __swab32s(&os->os_bsize);
1773 __swab32s(&os->os_namelen);
1774 __swab64s(&os->os_maxbytes);
1775 __swab32s(&os->os_state);
1776 __swab32s(&os->os_fprecreated);
1777 __swab32s(&os->os_granted);
1778 CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
1779 CLASSERT(offsetof(typeof(*os), os_spare4) != 0);
1780 CLASSERT(offsetof(typeof(*os), os_spare5) != 0);
1781 CLASSERT(offsetof(typeof(*os), os_spare6) != 0);
1782 CLASSERT(offsetof(typeof(*os), os_spare7) != 0);
1783 CLASSERT(offsetof(typeof(*os), os_spare8) != 0);
1784 CLASSERT(offsetof(typeof(*os), os_spare9) != 0);
1787 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
1789 lustre_swab_ost_id(&ioo->ioo_oid);
1790 __swab32s(&ioo->ioo_max_brw);
1791 __swab32s(&ioo->ioo_bufcnt);
1794 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
1796 __swab64s(&nbr->rnb_offset);
1797 __swab32s(&nbr->rnb_len);
1798 __swab32s(&nbr->rnb_flags);
1801 void lustre_swab_ost_body (struct ost_body *b)
1803 lustre_swab_obdo (&b->oa);
1806 void lustre_swab_ost_last_id(u64 *id)
1811 void lustre_swab_generic_32s(__u32 *val)
1816 void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *desc)
1818 lustre_swab_lu_fid(&desc->gl_id.qid_fid);
1819 __swab64s(&desc->gl_flags);
1820 __swab64s(&desc->gl_ver);
1821 __swab64s(&desc->gl_hardlimit);
1822 __swab64s(&desc->gl_softlimit);
1823 __swab64s(&desc->gl_time);
1824 CLASSERT(offsetof(typeof(*desc), gl_pad2) != 0);
1826 EXPORT_SYMBOL(lustre_swab_gl_lquota_desc);
1828 void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *desc)
1830 __swab32s(&desc->lgbd_status);
1831 __swab32s(&desc->lgbd_timeout);
1832 CLASSERT(offsetof(typeof(*desc), lgbd_padding) != 0);
1834 EXPORT_SYMBOL(lustre_swab_gl_barrier_desc);
1836 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
1838 __swab64s(&lvb->lvb_size);
1839 __swab64s(&lvb->lvb_mtime);
1840 __swab64s(&lvb->lvb_atime);
1841 __swab64s(&lvb->lvb_ctime);
1842 __swab64s(&lvb->lvb_blocks);
1844 EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
1846 void lustre_swab_ost_lvb(struct ost_lvb *lvb)
1848 __swab64s(&lvb->lvb_size);
1849 __swab64s(&lvb->lvb_mtime);
1850 __swab64s(&lvb->lvb_atime);
1851 __swab64s(&lvb->lvb_ctime);
1852 __swab64s(&lvb->lvb_blocks);
1853 __swab32s(&lvb->lvb_mtime_ns);
1854 __swab32s(&lvb->lvb_atime_ns);
1855 __swab32s(&lvb->lvb_ctime_ns);
1856 __swab32s(&lvb->lvb_padding);
1858 EXPORT_SYMBOL(lustre_swab_ost_lvb);
1860 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
1862 __swab64s(&lvb->lvb_flags);
1863 __swab64s(&lvb->lvb_id_may_rel);
1864 __swab64s(&lvb->lvb_id_rel);
1865 __swab64s(&lvb->lvb_id_qunit);
1866 __swab64s(&lvb->lvb_pad1);
1868 EXPORT_SYMBOL(lustre_swab_lquota_lvb);
1870 void lustre_swab_barrier_lvb(struct barrier_lvb *lvb)
1872 __swab32s(&lvb->lvb_status);
1873 __swab32s(&lvb->lvb_index);
1874 CLASSERT(offsetof(typeof(*lvb), lvb_padding) != 0);
1876 EXPORT_SYMBOL(lustre_swab_barrier_lvb);
1878 void lustre_swab_mdt_body (struct mdt_body *b)
1880 lustre_swab_lu_fid(&b->mbo_fid1);
1881 lustre_swab_lu_fid(&b->mbo_fid2);
1882 /* handle is opaque */
1883 __swab64s(&b->mbo_valid);
1884 __swab64s(&b->mbo_size);
1885 __swab64s(&b->mbo_mtime);
1886 __swab64s(&b->mbo_atime);
1887 __swab64s(&b->mbo_ctime);
1888 __swab64s(&b->mbo_blocks);
1889 __swab64s(&b->mbo_version);
1890 __swab64s(&b->mbo_t_state);
1891 __swab32s(&b->mbo_fsuid);
1892 __swab32s(&b->mbo_fsgid);
1893 __swab32s(&b->mbo_capability);
1894 __swab32s(&b->mbo_mode);
1895 __swab32s(&b->mbo_uid);
1896 __swab32s(&b->mbo_gid);
1897 __swab32s(&b->mbo_flags);
1898 __swab32s(&b->mbo_rdev);
1899 __swab32s(&b->mbo_nlink);
1900 __swab32s(&b->mbo_layout_gen);
1901 __swab32s(&b->mbo_suppgid);
1902 __swab32s(&b->mbo_eadatasize);
1903 __swab32s(&b->mbo_aclsize);
1904 __swab32s(&b->mbo_max_mdsize);
1905 CLASSERT(offsetof(typeof(*b), mbo_unused3) != 0);
1906 __swab32s(&b->mbo_uid_h);
1907 __swab32s(&b->mbo_gid_h);
1908 __swab32s(&b->mbo_projid);
1909 __swab64s(&b->mbo_dom_size);
1910 __swab64s(&b->mbo_dom_blocks);
1911 CLASSERT(offsetof(typeof(*b), mbo_padding_8) != 0);
1912 CLASSERT(offsetof(typeof(*b), mbo_padding_9) != 0);
1913 CLASSERT(offsetof(typeof(*b), mbo_padding_10) != 0);
1916 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
1918 /* mio_open_handle is opaque */
1919 CLASSERT(offsetof(typeof(*b), mio_unused1) != 0);
1920 CLASSERT(offsetof(typeof(*b), mio_unused2) != 0);
1921 CLASSERT(offsetof(typeof(*b), mio_padding) != 0);
1924 void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
1928 __swab32s(&mti->mti_lustre_ver);
1929 __swab32s(&mti->mti_stripe_index);
1930 __swab32s(&mti->mti_config_ver);
1931 __swab32s(&mti->mti_flags);
1932 __swab32s(&mti->mti_instance);
1933 __swab32s(&mti->mti_nid_count);
1934 CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
1935 for (i = 0; i < MTI_NIDS_MAX; i++)
1936 __swab64s(&mti->mti_nids[i]);
1939 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
1943 __swab64s(&entry->mne_version);
1944 __swab32s(&entry->mne_instance);
1945 __swab32s(&entry->mne_index);
1946 __swab32s(&entry->mne_length);
1948 /* mne_nid_(count|type) must be one byte size because we're gonna
1949 * access it w/o swapping. */
1950 CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
1951 CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
1953 /* remove this assertion if ipv6 is supported. */
1954 LASSERT(entry->mne_nid_type == 0);
1955 for (i = 0; i < entry->mne_nid_count; i++) {
1956 CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
1957 __swab64s(&entry->u.nids[i]);
1960 EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
1962 void lustre_swab_mgs_config_body(struct mgs_config_body *body)
1964 __swab64s(&body->mcb_offset);
1965 __swab32s(&body->mcb_units);
1966 __swab16s(&body->mcb_type);
1969 void lustre_swab_mgs_config_res(struct mgs_config_res *body)
1971 __swab64s(&body->mcr_offset);
1972 __swab64s(&body->mcr_size);
1975 static void lustre_swab_obd_dqinfo (struct obd_dqinfo *i)
1977 __swab64s (&i->dqi_bgrace);
1978 __swab64s (&i->dqi_igrace);
1979 __swab32s (&i->dqi_flags);
1980 __swab32s (&i->dqi_valid);
1983 static void lustre_swab_obd_dqblk (struct obd_dqblk *b)
1985 __swab64s (&b->dqb_ihardlimit);
1986 __swab64s (&b->dqb_isoftlimit);
1987 __swab64s (&b->dqb_curinodes);
1988 __swab64s (&b->dqb_bhardlimit);
1989 __swab64s (&b->dqb_bsoftlimit);
1990 __swab64s (&b->dqb_curspace);
1991 __swab64s (&b->dqb_btime);
1992 __swab64s (&b->dqb_itime);
1993 __swab32s (&b->dqb_valid);
1994 CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
1997 void lustre_swab_obd_quotactl (struct obd_quotactl *q)
1999 __swab32s (&q->qc_cmd);
2000 __swab32s (&q->qc_type);
2001 __swab32s (&q->qc_id);
2002 __swab32s (&q->qc_stat);
2003 lustre_swab_obd_dqinfo (&q->qc_dqinfo);
2004 lustre_swab_obd_dqblk (&q->qc_dqblk);
2007 void lustre_swab_fid2path(struct getinfo_fid2path *gf)
2009 lustre_swab_lu_fid(&gf->gf_fid);
2010 __swab64s(&gf->gf_recno);
2011 __swab32s(&gf->gf_linkno);
2012 __swab32s(&gf->gf_pathlen);
2014 EXPORT_SYMBOL(lustre_swab_fid2path);
2016 static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
2018 __swab64s(&fm_extent->fe_logical);
2019 __swab64s(&fm_extent->fe_physical);
2020 __swab64s(&fm_extent->fe_length);
2021 __swab32s(&fm_extent->fe_flags);
2022 __swab32s(&fm_extent->fe_device);
2025 void lustre_swab_fiemap(struct fiemap *fiemap)
2029 __swab64s(&fiemap->fm_start);
2030 __swab64s(&fiemap->fm_length);
2031 __swab32s(&fiemap->fm_flags);
2032 __swab32s(&fiemap->fm_mapped_extents);
2033 __swab32s(&fiemap->fm_extent_count);
2034 __swab32s(&fiemap->fm_reserved);
2036 for (i = 0; i < fiemap->fm_mapped_extents; i++)
2037 lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
2040 void lustre_swab_idx_info(struct idx_info *ii)
2042 __swab32s(&ii->ii_magic);
2043 __swab32s(&ii->ii_flags);
2044 __swab16s(&ii->ii_count);
2045 __swab32s(&ii->ii_attrs);
2046 lustre_swab_lu_fid(&ii->ii_fid);
2047 __swab64s(&ii->ii_version);
2048 __swab64s(&ii->ii_hash_start);
2049 __swab64s(&ii->ii_hash_end);
2050 __swab16s(&ii->ii_keysize);
2051 __swab16s(&ii->ii_recsize);
2054 void lustre_swab_lip_header(struct lu_idxpage *lip)
2057 __swab32s(&lip->lip_magic);
2058 __swab16s(&lip->lip_flags);
2059 __swab16s(&lip->lip_nr);
2061 EXPORT_SYMBOL(lustre_swab_lip_header);
2063 void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
2065 __swab32s(&rr->rr_opcode);
2066 __swab32s(&rr->rr_cap);
2067 __swab32s(&rr->rr_fsuid);
2068 /* rr_fsuid_h is unused */
2069 __swab32s(&rr->rr_fsgid);
2070 /* rr_fsgid_h is unused */
2071 __swab32s(&rr->rr_suppgid1);
2072 /* rr_suppgid1_h is unused */
2073 __swab32s(&rr->rr_suppgid2);
2074 /* rr_suppgid2_h is unused */
2075 lustre_swab_lu_fid(&rr->rr_fid1);
2076 lustre_swab_lu_fid(&rr->rr_fid2);
2077 __swab64s(&rr->rr_mtime);
2078 __swab64s(&rr->rr_atime);
2079 __swab64s(&rr->rr_ctime);
2080 __swab64s(&rr->rr_size);
2081 __swab64s(&rr->rr_blocks);
2082 __swab32s(&rr->rr_bias);
2083 __swab32s(&rr->rr_mode);
2084 __swab32s(&rr->rr_flags);
2085 __swab32s(&rr->rr_flags_h);
2086 __swab32s(&rr->rr_umask);
2087 __swab16s(&rr->rr_mirror_id);
2089 CLASSERT(offsetof(typeof(*rr), rr_padding_4) != 0);
2092 void lustre_swab_lov_desc (struct lov_desc *ld)
2094 __swab32s (&ld->ld_tgt_count);
2095 __swab32s (&ld->ld_active_tgt_count);
2096 __swab32s (&ld->ld_default_stripe_count);
2097 __swab32s (&ld->ld_pattern);
2098 __swab64s (&ld->ld_default_stripe_size);
2099 __swab64s (&ld->ld_default_stripe_offset);
2100 __swab32s (&ld->ld_qos_maxage);
2101 /* uuid endian insensitive */
2103 EXPORT_SYMBOL(lustre_swab_lov_desc);
2105 void lustre_swab_lmv_desc (struct lmv_desc *ld)
2107 __swab32s (&ld->ld_tgt_count);
2108 __swab32s (&ld->ld_active_tgt_count);
2109 __swab32s (&ld->ld_default_stripe_count);
2110 __swab32s (&ld->ld_pattern);
2111 __swab64s (&ld->ld_default_hash_size);
2112 __swab32s (&ld->ld_qos_maxage);
2113 /* uuid endian insensitive */
2116 /* This structure is always in little-endian */
2117 static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
2121 __swab32s(&lmm1->lmv_magic);
2122 __swab32s(&lmm1->lmv_stripe_count);
2123 __swab32s(&lmm1->lmv_master_mdt_index);
2124 __swab32s(&lmm1->lmv_hash_type);
2125 __swab32s(&lmm1->lmv_layout_version);
2126 for (i = 0; i < lmm1->lmv_stripe_count; i++)
2127 lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
2130 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
2132 switch (lmm->lmv_magic) {
2134 lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
2140 EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
2142 void lustre_swab_lmv_user_md_objects(struct lmv_user_mds_data *lmd,
2147 for (i = 0; i < stripe_count; i++)
2148 __swab32s(&(lmd[i].lum_mds));
2150 EXPORT_SYMBOL(lustre_swab_lmv_user_md_objects);
2153 void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
2157 if (lum->lum_magic == LMV_MAGIC_FOREIGN) {
2158 __swab32s(&lum->lum_magic);
2159 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_length);
2160 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_type);
2161 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_flags);
2165 count = lum->lum_stripe_count;
2166 __swab32s(&lum->lum_magic);
2167 __swab32s(&lum->lum_stripe_count);
2168 __swab32s(&lum->lum_stripe_offset);
2169 __swab32s(&lum->lum_hash_type);
2170 __swab32s(&lum->lum_type);
2171 CLASSERT(offsetof(typeof(*lum), lum_padding1) != 0);
2172 switch (lum->lum_magic) {
2173 case LMV_USER_MAGIC_SPECIFIC:
2174 count = lum->lum_stripe_count;
2176 case __swab32(LMV_USER_MAGIC_SPECIFIC):
2177 lustre_swab_lmv_user_md_objects(lum->lum_objects, count);
2183 EXPORT_SYMBOL(lustre_swab_lmv_user_md);
2185 static void lustre_print_v1v3(unsigned int lvl, struct lov_user_md *lum,
2188 CDEBUG(lvl, "%s lov_user_md %p:\n", msg, lum);
2189 CDEBUG(lvl, "\tlmm_magic: %#x\n", lum->lmm_magic);
2190 CDEBUG(lvl, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
2191 CDEBUG(lvl, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
2192 CDEBUG(lvl, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
2193 CDEBUG(lvl, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
2194 CDEBUG(lvl, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
2195 CDEBUG(lvl, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
2196 lum->lmm_stripe_offset);
2197 if (lum->lmm_magic == LOV_USER_MAGIC_V3) {
2198 struct lov_user_md_v3 *v3 = (void *)lum;
2199 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2201 if (lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2202 struct lov_user_md_v3 *v3 = (void *)lum;
2205 if (v3->lmm_pool_name[0] != '\0')
2206 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2208 CDEBUG(lvl, "\ttarget list:\n");
2209 for (i = 0; i < v3->lmm_stripe_count; i++)
2210 CDEBUG(lvl, "\t\t%u\n", v3->lmm_objects[i].l_ost_idx);
2214 void lustre_print_user_md(unsigned int lvl, struct lov_user_md *lum,
2217 struct lov_comp_md_v1 *comp_v1;
2220 if (likely(!cfs_cdebug_show(lvl, DEBUG_SUBSYSTEM)))
2223 if (lum->lmm_magic == LOV_USER_MAGIC_V1 ||
2224 lum->lmm_magic == LOV_USER_MAGIC_V3) {
2225 lustre_print_v1v3(lvl, lum, msg);
2229 if (lum->lmm_magic != LOV_USER_MAGIC_COMP_V1) {
2230 CDEBUG(lvl, "%s: bad magic: %x\n", msg, lum->lmm_magic);
2234 comp_v1 = (struct lov_comp_md_v1 *)lum;
2235 CDEBUG(lvl, "%s: lov_comp_md_v1 %p:\n", msg, lum);
2236 CDEBUG(lvl, "\tlcm_magic: %#x\n", comp_v1->lcm_magic);
2237 CDEBUG(lvl, "\tlcm_size: %#x\n", comp_v1->lcm_size);
2238 CDEBUG(lvl, "\tlcm_layout_gen: %#x\n", comp_v1->lcm_layout_gen);
2239 CDEBUG(lvl, "\tlcm_flags: %#x\n", comp_v1->lcm_flags);
2240 CDEBUG(lvl, "\tlcm_entry_count: %#x\n\n", comp_v1->lcm_entry_count);
2241 CDEBUG(lvl, "\tlcm_mirror_count: %#x\n\n", comp_v1->lcm_mirror_count);
2243 for (i = 0; i < comp_v1->lcm_entry_count; i++) {
2244 struct lov_comp_md_entry_v1 *ent = &comp_v1->lcm_entries[i];
2245 struct lov_user_md *v1;
2247 CDEBUG(lvl, "\tentry %d:\n", i);
2248 CDEBUG(lvl, "\tlcme_id: %#x\n", ent->lcme_id);
2249 CDEBUG(lvl, "\tlcme_flags: %#x\n", ent->lcme_flags);
2250 if (ent->lcme_flags & LCME_FL_NOSYNC)
2251 CDEBUG(lvl, "\tlcme_timestamp: %llu\n",
2252 ent->lcme_timestamp);
2253 CDEBUG(lvl, "\tlcme_extent.e_start: %llu\n",
2254 ent->lcme_extent.e_start);
2255 CDEBUG(lvl, "\tlcme_extent.e_end: %llu\n",
2256 ent->lcme_extent.e_end);
2257 CDEBUG(lvl, "\tlcme_offset: %#x\n", ent->lcme_offset);
2258 CDEBUG(lvl, "\tlcme_size: %#x\n\n", ent->lcme_size);
2260 v1 = (struct lov_user_md *)((char *)comp_v1 +
2261 comp_v1->lcm_entries[i].lcme_offset);
2262 lustre_print_v1v3(lvl, v1, msg);
2265 EXPORT_SYMBOL(lustre_print_user_md);
2267 static void lustre_swab_lmm_oi(struct ost_id *oi)
2269 __swab64s(&oi->oi.oi_id);
2270 __swab64s(&oi->oi.oi_seq);
2273 static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
2276 __swab32s(&lum->lmm_magic);
2277 __swab32s(&lum->lmm_pattern);
2278 lustre_swab_lmm_oi(&lum->lmm_oi);
2279 __swab32s(&lum->lmm_stripe_size);
2280 __swab16s(&lum->lmm_stripe_count);
2281 __swab16s(&lum->lmm_stripe_offset);
2285 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
2288 CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
2289 lustre_swab_lov_user_md_common(lum);
2292 EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
2294 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
2297 CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
2298 lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
2299 /* lmm_pool_name nothing to do with char */
2302 EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
2304 void lustre_swab_lov_comp_md_v1(struct lov_comp_md_v1 *lum)
2306 struct lov_comp_md_entry_v1 *ent;
2307 struct lov_user_md_v1 *v1;
2308 struct lov_user_md_v3 *v3;
2312 __u16 ent_count, stripe_count;
2315 cpu_endian = lum->lcm_magic == LOV_USER_MAGIC_COMP_V1;
2316 ent_count = lum->lcm_entry_count;
2318 __swab16s(&ent_count);
2320 CDEBUG(D_IOCTL, "swabbing lov_user_comp_md v1\n");
2321 __swab32s(&lum->lcm_magic);
2322 __swab32s(&lum->lcm_size);
2323 __swab32s(&lum->lcm_layout_gen);
2324 __swab16s(&lum->lcm_flags);
2325 __swab16s(&lum->lcm_entry_count);
2326 __swab16s(&lum->lcm_mirror_count);
2327 CLASSERT(offsetof(typeof(*lum), lcm_padding1) != 0);
2328 CLASSERT(offsetof(typeof(*lum), lcm_padding2) != 0);
2330 for (i = 0; i < ent_count; i++) {
2331 ent = &lum->lcm_entries[i];
2332 off = ent->lcme_offset;
2333 size = ent->lcme_size;
2339 __swab32s(&ent->lcme_id);
2340 __swab32s(&ent->lcme_flags);
2341 __swab64s(&ent->lcme_timestamp);
2342 __swab64s(&ent->lcme_extent.e_start);
2343 __swab64s(&ent->lcme_extent.e_end);
2344 __swab32s(&ent->lcme_offset);
2345 __swab32s(&ent->lcme_size);
2346 __swab32s(&ent->lcme_layout_gen);
2347 CLASSERT(offsetof(typeof(*ent), lcme_padding_1) != 0);
2349 v1 = (struct lov_user_md_v1 *)((char *)lum + off);
2350 stripe_count = v1->lmm_stripe_count;
2352 __swab16s(&stripe_count);
2354 if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V1) ||
2355 v1->lmm_magic == LOV_USER_MAGIC_V1) {
2356 lustre_swab_lov_user_md_v1(v1);
2357 if (size > sizeof(*v1))
2358 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2360 } else if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V3) ||
2361 v1->lmm_magic == LOV_USER_MAGIC_V3 ||
2362 v1->lmm_magic == __swab32(LOV_USER_MAGIC_SPECIFIC) ||
2363 v1->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2364 v3 = (struct lov_user_md_v3 *)v1;
2365 lustre_swab_lov_user_md_v3(v3);
2366 if (size > sizeof(*v3))
2367 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2370 CERROR("Invalid magic %#x\n", v1->lmm_magic);
2374 EXPORT_SYMBOL(lustre_swab_lov_comp_md_v1);
2376 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
2381 for (i = 0; i < stripe_count; i++) {
2382 lustre_swab_ost_id(&(lod[i].l_ost_oi));
2383 __swab32s(&(lod[i].l_ost_gen));
2384 __swab32s(&(lod[i].l_ost_idx));
2388 EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
2390 void lustre_swab_lov_user_md(struct lov_user_md *lum, size_t size)
2392 struct lov_user_md_v1 *v1;
2393 struct lov_user_md_v3 *v3;
2394 struct lov_foreign_md *lfm;
2398 CDEBUG(D_IOCTL, "swabbing lov_user_md\n");
2399 switch (lum->lmm_magic) {
2400 case __swab32(LOV_MAGIC_V1):
2401 case LOV_USER_MAGIC_V1:
2403 v1 = (struct lov_user_md_v1 *)lum;
2404 stripe_count = v1->lmm_stripe_count;
2406 if (lum->lmm_magic != LOV_USER_MAGIC_V1)
2407 __swab16s(&stripe_count);
2409 lustre_swab_lov_user_md_v1(v1);
2410 if (size > sizeof(*v1))
2411 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2416 case __swab32(LOV_MAGIC_V3):
2417 case LOV_USER_MAGIC_V3:
2419 v3 = (struct lov_user_md_v3 *)lum;
2420 stripe_count = v3->lmm_stripe_count;
2422 if (lum->lmm_magic != LOV_USER_MAGIC_V3)
2423 __swab16s(&stripe_count);
2425 lustre_swab_lov_user_md_v3(v3);
2426 if (size > sizeof(*v3))
2427 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2431 case __swab32(LOV_USER_MAGIC_SPECIFIC):
2432 case LOV_USER_MAGIC_SPECIFIC:
2434 v3 = (struct lov_user_md_v3 *)lum;
2435 stripe_count = v3->lmm_stripe_count;
2437 if (lum->lmm_magic != LOV_USER_MAGIC_SPECIFIC)
2438 __swab16s(&stripe_count);
2440 lustre_swab_lov_user_md_v3(v3);
2441 lustre_swab_lov_user_md_objects(v3->lmm_objects, stripe_count);
2444 case __swab32(LOV_MAGIC_COMP_V1):
2445 case LOV_USER_MAGIC_COMP_V1:
2446 lustre_swab_lov_comp_md_v1((struct lov_comp_md_v1 *)lum);
2448 case __swab32(LOV_MAGIC_FOREIGN):
2449 case LOV_USER_MAGIC_FOREIGN:
2451 lfm = (struct lov_foreign_md *)lum;
2452 __swab32s(&lfm->lfm_magic);
2453 __swab32s(&lfm->lfm_length);
2454 __swab32s(&lfm->lfm_type);
2455 __swab32s(&lfm->lfm_flags);
2459 CDEBUG(D_IOCTL, "Invalid LOV magic %08x\n", lum->lmm_magic);
2462 EXPORT_SYMBOL(lustre_swab_lov_user_md);
2464 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
2467 CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
2468 __swab32s(&lmm->lmm_magic);
2469 __swab32s(&lmm->lmm_pattern);
2470 lustre_swab_lmm_oi(&lmm->lmm_oi);
2471 __swab32s(&lmm->lmm_stripe_size);
2472 __swab16s(&lmm->lmm_stripe_count);
2473 __swab16s(&lmm->lmm_layout_gen);
2476 EXPORT_SYMBOL(lustre_swab_lov_mds_md);
2478 void lustre_swab_ldlm_res_id (struct ldlm_res_id *id)
2482 for (i = 0; i < RES_NAME_SIZE; i++)
2483 __swab64s (&id->name[i]);
2486 void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
2488 /* the lock data is a union and the first two fields are always an
2489 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
2490 * data the same way. */
2491 __swab64s(&d->l_extent.start);
2492 __swab64s(&d->l_extent.end);
2493 __swab64s(&d->l_extent.gid);
2494 __swab64s(&d->l_flock.lfw_owner);
2495 __swab32s(&d->l_flock.lfw_pid);
2498 void lustre_swab_ldlm_intent (struct ldlm_intent *i)
2503 void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
2505 __swab32s(&r->lr_type);
2506 CLASSERT(offsetof(typeof(*r), lr_pad) != 0);
2507 lustre_swab_ldlm_res_id(&r->lr_name);
2510 void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l)
2512 lustre_swab_ldlm_resource_desc (&l->l_resource);
2513 __swab32s (&l->l_req_mode);
2514 __swab32s (&l->l_granted_mode);
2515 lustre_swab_ldlm_policy_data (&l->l_policy_data);
2518 void lustre_swab_ldlm_request (struct ldlm_request *rq)
2520 __swab32s (&rq->lock_flags);
2521 lustre_swab_ldlm_lock_desc (&rq->lock_desc);
2522 __swab32s (&rq->lock_count);
2523 /* lock_handle[] opaque */
2526 void lustre_swab_ldlm_reply (struct ldlm_reply *r)
2528 __swab32s (&r->lock_flags);
2529 CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
2530 lustre_swab_ldlm_lock_desc (&r->lock_desc);
2531 /* lock_handle opaque */
2532 __swab64s (&r->lock_policy_res1);
2533 __swab64s (&r->lock_policy_res2);
2536 void lustre_swab_quota_body(struct quota_body *b)
2538 lustre_swab_lu_fid(&b->qb_fid);
2539 lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
2540 __swab32s(&b->qb_flags);
2541 __swab64s(&b->qb_count);
2542 __swab64s(&b->qb_usage);
2543 __swab64s(&b->qb_slv_ver);
2546 /* Dump functions */
2547 void dump_ioo(struct obd_ioobj *ioo)
2550 "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
2551 "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
2555 void dump_rniobuf(struct niobuf_remote *nb)
2557 CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
2558 nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
2561 void dump_obdo(struct obdo *oa)
2563 u64 valid = oa->o_valid;
2565 CDEBUG(D_RPCTRACE, "obdo: o_valid = %#llx\n", valid);
2566 if (valid & OBD_MD_FLID)
2567 CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
2568 if (valid & OBD_MD_FLFID)
2569 CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
2571 if (valid & OBD_MD_FLSIZE)
2572 CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
2573 if (valid & OBD_MD_FLMTIME)
2574 CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
2575 if (valid & OBD_MD_FLATIME)
2576 CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
2577 if (valid & OBD_MD_FLCTIME)
2578 CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
2579 if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
2580 CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
2581 if (valid & OBD_MD_FLGRANT)
2582 CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
2583 if (valid & OBD_MD_FLBLKSZ)
2584 CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
2585 if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
2586 CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
2587 oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
2588 (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
2589 if (valid & OBD_MD_FLUID)
2590 CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
2591 if (valid & OBD_MD_FLUID)
2592 CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
2593 if (valid & OBD_MD_FLGID)
2594 CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
2595 if (valid & OBD_MD_FLGID)
2596 CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
2597 if (valid & OBD_MD_FLFLAGS)
2598 CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
2599 if (valid & OBD_MD_FLNLINK)
2600 CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
2601 else if (valid & OBD_MD_FLCKSUM)
2602 CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
2604 if (valid & OBD_MD_FLPARENT)
2605 CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
2607 if (valid & OBD_MD_FLFID) {
2608 CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
2610 CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
2613 if (valid & OBD_MD_FLHANDLE)
2614 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
2615 oa->o_handle.cookie);
2618 void dump_ost_body(struct ost_body *ob)
2623 void dump_rcs(__u32 *rc)
2625 CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
2628 static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2630 LASSERT(req->rq_reqmsg);
2632 switch (req->rq_reqmsg->lm_magic) {
2633 case LUSTRE_MSG_MAGIC_V2:
2634 return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF);
2636 CERROR("bad lustre msg magic: %#08X\n",
2637 req->rq_reqmsg->lm_magic);
2642 static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2644 if (unlikely(!req->rq_repmsg))
2647 switch (req->rq_repmsg->lm_magic) {
2648 case LUSTRE_MSG_MAGIC_V2:
2649 return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF);
2651 /* uninitialized yet */
2656 void _debug_req(struct ptlrpc_request *req,
2657 struct libcfs_debug_msg_data *msgdata, const char *fmt, ...)
2659 bool req_ok = req->rq_reqmsg != NULL;
2660 bool rep_ok = false;
2661 lnet_nid_t nid = LNET_NID_ANY;
2662 struct va_format vaf;
2665 int rep_status = -1;
2667 spin_lock(&req->rq_early_free_lock);
2671 if (ptlrpc_req_need_swab(req)) {
2672 req_ok = req_ok && req_ptlrpc_body_swabbed(req);
2673 rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
2677 rep_flags = lustre_msg_get_flags(req->rq_repmsg);
2678 rep_status = lustre_msg_get_status(req->rq_repmsg);
2680 spin_unlock(&req->rq_early_free_lock);
2682 if (req->rq_import && req->rq_import->imp_connection)
2683 nid = req->rq_import->imp_connection->c_peer.nid;
2684 else if (req->rq_export && req->rq_export->exp_connection)
2685 nid = req->rq_export->exp_connection->c_peer.nid;
2687 va_start(args, fmt);
2690 libcfs_debug_msg(msgdata,
2691 "%pV req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d job:'%s'\n",
2693 req, req->rq_xid, req->rq_transno,
2694 req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
2695 req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
2697 req->rq_import->imp_obd->obd_name :
2699 req->rq_export->exp_client_uuid.uuid :
2701 libcfs_nid2str(nid),
2702 req->rq_request_portal, req->rq_reply_portal,
2703 req->rq_reqlen, req->rq_replen,
2704 req->rq_early_count, (s64)req->rq_timedout,
2705 (s64)req->rq_deadline,
2706 atomic_read(&req->rq_refcount),
2707 DEBUG_REQ_FLAGS(req),
2708 req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
2709 rep_flags, req->rq_status, rep_status,
2710 req_ok ? lustre_msg_get_jobid(req->rq_reqmsg) ?: ""
2714 EXPORT_SYMBOL(_debug_req);
2716 void lustre_swab_lustre_capa(struct lustre_capa *c)
2718 lustre_swab_lu_fid(&c->lc_fid);
2719 __swab64s (&c->lc_opc);
2720 __swab64s (&c->lc_uid);
2721 __swab64s (&c->lc_gid);
2722 __swab32s (&c->lc_flags);
2723 __swab32s (&c->lc_keyid);
2724 __swab32s (&c->lc_timeout);
2725 __swab32s (&c->lc_expiry);
2728 void lustre_swab_lustre_capa_key(struct lustre_capa_key *k)
2730 __swab64s (&k->lk_seq);
2731 __swab32s (&k->lk_keyid);
2732 CLASSERT(offsetof(typeof(*k), lk_padding) != 0);
2735 void lustre_swab_hsm_user_state(struct hsm_user_state *state)
2737 __swab32s(&state->hus_states);
2738 __swab32s(&state->hus_archive_id);
2741 void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
2743 __swab32s(&hss->hss_valid);
2744 __swab64s(&hss->hss_setmask);
2745 __swab64s(&hss->hss_clearmask);
2746 __swab32s(&hss->hss_archive_id);
2749 static void lustre_swab_hsm_extent(struct hsm_extent *extent)
2751 __swab64s(&extent->offset);
2752 __swab64s(&extent->length);
2755 void lustre_swab_hsm_current_action(struct hsm_current_action *action)
2757 __swab32s(&action->hca_state);
2758 __swab32s(&action->hca_action);
2759 lustre_swab_hsm_extent(&action->hca_location);
2762 void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
2764 lustre_swab_lu_fid(&hui->hui_fid);
2765 lustre_swab_hsm_extent(&hui->hui_extent);
2768 void lustre_swab_lu_extent(struct lu_extent *le)
2770 __swab64s(&le->e_start);
2771 __swab64s(&le->e_end);
2774 void lustre_swab_layout_intent(struct layout_intent *li)
2776 __swab32s(&li->li_opc);
2777 __swab32s(&li->li_flags);
2778 lustre_swab_lu_extent(&li->li_extent);
2781 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
2783 lustre_swab_lu_fid(&hpk->hpk_fid);
2784 __swab64s(&hpk->hpk_cookie);
2785 __swab64s(&hpk->hpk_extent.offset);
2786 __swab64s(&hpk->hpk_extent.length);
2787 __swab16s(&hpk->hpk_flags);
2788 __swab16s(&hpk->hpk_errval);
2791 void lustre_swab_hsm_request(struct hsm_request *hr)
2793 __swab32s(&hr->hr_action);
2794 __swab32s(&hr->hr_archive_id);
2795 __swab64s(&hr->hr_flags);
2796 __swab32s(&hr->hr_itemcount);
2797 __swab32s(&hr->hr_data_len);
2800 void lustre_swab_object_update(struct object_update *ou)
2802 struct object_update_param *param;
2805 __swab16s(&ou->ou_type);
2806 __swab16s(&ou->ou_params_count);
2807 __swab32s(&ou->ou_result_size);
2808 __swab32s(&ou->ou_flags);
2809 __swab32s(&ou->ou_padding1);
2810 __swab64s(&ou->ou_batchid);
2811 lustre_swab_lu_fid(&ou->ou_fid);
2812 param = &ou->ou_params[0];
2813 for (i = 0; i < ou->ou_params_count; i++) {
2814 __swab16s(¶m->oup_len);
2815 __swab16s(¶m->oup_padding);
2816 __swab32s(¶m->oup_padding2);
2817 param = (struct object_update_param *)((char *)param +
2818 object_update_param_size(param));
2822 void lustre_swab_object_update_request(struct object_update_request *our)
2825 __swab32s(&our->ourq_magic);
2826 __swab16s(&our->ourq_count);
2827 __swab16s(&our->ourq_padding);
2828 for (i = 0; i < our->ourq_count; i++) {
2829 struct object_update *ou;
2831 ou = object_update_request_get(our, i, NULL);
2834 lustre_swab_object_update(ou);
2838 void lustre_swab_object_update_result(struct object_update_result *our)
2840 __swab32s(&our->our_rc);
2841 __swab16s(&our->our_datalen);
2842 __swab16s(&our->our_padding);
2845 void lustre_swab_object_update_reply(struct object_update_reply *our)
2849 __swab32s(&our->ourp_magic);
2850 __swab16s(&our->ourp_count);
2851 __swab16s(&our->ourp_padding);
2852 for (i = 0; i < our->ourp_count; i++) {
2853 struct object_update_result *ourp;
2855 __swab16s(&our->ourp_lens[i]);
2856 ourp = object_update_result_get(our, i, NULL);
2859 lustre_swab_object_update_result(ourp);
2863 void lustre_swab_out_update_header(struct out_update_header *ouh)
2865 __swab32s(&ouh->ouh_magic);
2866 __swab32s(&ouh->ouh_count);
2867 __swab32s(&ouh->ouh_inline_length);
2868 __swab32s(&ouh->ouh_reply_size);
2870 EXPORT_SYMBOL(lustre_swab_out_update_header);
2872 void lustre_swab_out_update_buffer(struct out_update_buffer *oub)
2874 __swab32s(&oub->oub_size);
2875 __swab32s(&oub->oub_padding);
2877 EXPORT_SYMBOL(lustre_swab_out_update_buffer);
2879 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
2881 __swab64s(&msl->msl_flags);
2884 void lustre_swab_close_data(struct close_data *cd)
2886 lustre_swab_lu_fid(&cd->cd_fid);
2887 __swab64s(&cd->cd_data_version);
2890 void lustre_swab_close_data_resync_done(struct close_data_resync_done *resync)
2894 __swab32s(&resync->resync_count);
2895 /* after swab, resync_count must in CPU endian */
2896 if (resync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) {
2897 for (i = 0; i < resync->resync_count; i++)
2898 __swab32s(&resync->resync_ids_inline[i]);
2901 EXPORT_SYMBOL(lustre_swab_close_data_resync_done);
2903 void lustre_swab_lfsck_request(struct lfsck_request *lr)
2905 __swab32s(&lr->lr_event);
2906 __swab32s(&lr->lr_index);
2907 __swab32s(&lr->lr_flags);
2908 __swab32s(&lr->lr_valid);
2909 __swab32s(&lr->lr_speed);
2910 __swab16s(&lr->lr_version);
2911 __swab16s(&lr->lr_active);
2912 __swab16s(&lr->lr_param);
2913 __swab16s(&lr->lr_async_windows);
2914 __swab32s(&lr->lr_flags);
2915 lustre_swab_lu_fid(&lr->lr_fid);
2916 lustre_swab_lu_fid(&lr->lr_fid2);
2917 __swab32s(&lr->lr_comp_id);
2918 CLASSERT(offsetof(typeof(*lr), lr_padding_0) != 0);
2919 CLASSERT(offsetof(typeof(*lr), lr_padding_1) != 0);
2920 CLASSERT(offsetof(typeof(*lr), lr_padding_2) != 0);
2921 CLASSERT(offsetof(typeof(*lr), lr_padding_3) != 0);
2924 void lustre_swab_lfsck_reply(struct lfsck_reply *lr)
2926 __swab32s(&lr->lr_status);
2927 CLASSERT(offsetof(typeof(*lr), lr_padding_1) != 0);
2928 __swab64s(&lr->lr_repaired);
2931 static void lustre_swab_orphan_rec(struct lu_orphan_rec *rec)
2933 lustre_swab_lu_fid(&rec->lor_fid);
2934 __swab32s(&rec->lor_uid);
2935 __swab32s(&rec->lor_gid);
2938 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent)
2940 lustre_swab_lu_fid(&ent->loe_key);
2941 lustre_swab_orphan_rec(&ent->loe_rec);
2943 EXPORT_SYMBOL(lustre_swab_orphan_ent);
2945 void lustre_swab_orphan_ent_v2(struct lu_orphan_ent_v2 *ent)
2947 lustre_swab_lu_fid(&ent->loe_key);
2948 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
2949 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
2950 CLASSERT(offsetof(typeof(ent->loe_rec), lor_padding) != 0);
2952 EXPORT_SYMBOL(lustre_swab_orphan_ent_v2);
2954 void lustre_swab_orphan_ent_v3(struct lu_orphan_ent_v3 *ent)
2956 lustre_swab_lu_fid(&ent->loe_key);
2957 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
2958 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
2959 __swab32s(&ent->loe_rec.lor_layout_version);
2960 __swab32s(&ent->loe_rec.lor_range);
2961 CLASSERT(offsetof(typeof(ent->loe_rec), lor_padding_1) != 0);
2962 CLASSERT(offsetof(typeof(ent->loe_rec), lor_padding_2) != 0);
2964 EXPORT_SYMBOL(lustre_swab_orphan_ent_v3);
2966 void lustre_swab_ladvise(struct lu_ladvise *ladvise)
2968 __swab16s(&ladvise->lla_advice);
2969 __swab16s(&ladvise->lla_value1);
2970 __swab32s(&ladvise->lla_value2);
2971 __swab64s(&ladvise->lla_start);
2972 __swab64s(&ladvise->lla_end);
2973 __swab32s(&ladvise->lla_value3);
2974 __swab32s(&ladvise->lla_value4);
2976 EXPORT_SYMBOL(lustre_swab_ladvise);
2978 void lustre_swab_ladvise_hdr(struct ladvise_hdr *ladvise_hdr)
2980 __swab32s(&ladvise_hdr->lah_magic);
2981 __swab32s(&ladvise_hdr->lah_count);
2982 __swab64s(&ladvise_hdr->lah_flags);
2983 __swab32s(&ladvise_hdr->lah_value1);
2984 __swab32s(&ladvise_hdr->lah_value2);
2985 __swab64s(&ladvise_hdr->lah_value3);
2987 EXPORT_SYMBOL(lustre_swab_ladvise_hdr);