4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ptlrpc/pack_generic.c
34 * (Un)packing of OST requests
36 * Author: Peter J. Braam <braam@clusterfs.com>
37 * Author: Phil Schwan <phil@clusterfs.com>
38 * Author: Eric Barton <eeb@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_RPC
43 #ifndef CONFIG_CRYPTO_CRC32
44 #include <linux/crc32.h>
47 #include <libcfs/libcfs.h>
49 #include <llog_swab.h>
50 #include <lustre_net.h>
51 #include <lustre_swab.h>
52 #include <obd_cksum.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <obj_update.h>
57 #include "ptlrpc_internal.h"
59 static inline __u32 lustre_msg_hdr_size_v2(__u32 count)
61 return cfs_size_round(offsetof(struct lustre_msg_v2,
65 __u32 lustre_msg_hdr_size(__u32 magic, __u32 count)
70 case LUSTRE_MSG_MAGIC_V2:
71 return lustre_msg_hdr_size_v2(count);
73 LASSERTF(0, "incorrect message magic: %08x\n", magic);
78 void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
82 lustre_set_req_swabbed(req, index);
84 lustre_set_rep_swabbed(req, index);
87 bool ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
91 return (ptlrpc_req_need_swab(req) &&
92 !lustre_req_swabbed(req, index));
94 return (ptlrpc_rep_need_swab(req) && !lustre_rep_swabbed(req, index));
97 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
98 enum lustre_msg_version version)
100 enum lustre_msg_version ver = lustre_msg_get_version(msg);
102 return (ver & LUSTRE_VERSION_MASK) != version;
105 int lustre_msg_check_version(struct lustre_msg *msg,
106 enum lustre_msg_version version)
108 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
109 switch (msg->lm_magic) {
110 case LUSTRE_MSG_MAGIC_V1:
111 CERROR("msg v1 not supported - please upgrade you system\n");
113 case LUSTRE_MSG_MAGIC_V2:
114 return lustre_msg_check_version_v2(msg, version);
116 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
119 #undef LUSTRE_MSG_MAGIC_V1
122 /* early reply size */
123 __u32 lustre_msg_early_size()
125 __u32 pblen = sizeof(struct ptlrpc_body);
127 return lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
129 EXPORT_SYMBOL(lustre_msg_early_size);
131 __u32 lustre_msg_size_v2(int count, __u32 *lengths)
137 size = lustre_msg_hdr_size_v2(count);
138 for (i = 0; i < count; i++)
139 size += cfs_size_round(lengths[i]);
143 EXPORT_SYMBOL(lustre_msg_size_v2);
146 * This returns the size of the buffer that is required to hold a lustre_msg
147 * with the given sub-buffer lengths.
148 * NOTE: this should only be used for NEW requests, and should always be
149 * in the form of a v2 request. If this is a connection to a v1
150 * target then the first buffer will be stripped because the ptlrpc
151 * data is part of the lustre_msg_v1 header. b=14043
153 __u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
155 __u32 size[] = { sizeof(struct ptlrpc_body) };
163 LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
166 case LUSTRE_MSG_MAGIC_V2:
167 return lustre_msg_size_v2(count, lens);
169 LASSERTF(0, "incorrect message magic: %08x\n", magic);
175 * This is used to determine the size of a buffer that was already packed
176 * and will correctly handle the different message formats.
178 __u32 lustre_packed_msg_size(struct lustre_msg *msg)
180 switch (msg->lm_magic) {
181 case LUSTRE_MSG_MAGIC_V2:
182 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
184 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
188 EXPORT_SYMBOL(lustre_packed_msg_size);
190 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
198 msg->lm_bufcount = count;
199 /* XXX: lm_secflvr uninitialized here */
200 msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
202 for (i = 0; i < count; i++)
203 msg->lm_buflens[i] = lens[i];
208 ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
209 for (i = 0; i < count; i++) {
213 memcpy(ptr, tmp, lens[i]);
214 ptr += cfs_size_round(lens[i]);
217 EXPORT_SYMBOL(lustre_init_msg_v2);
219 static int lustre_pack_request_v2(struct ptlrpc_request *req,
220 int count, __u32 *lens, char **bufs)
224 reqlen = lustre_msg_size_v2(count, lens);
226 rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
230 req->rq_reqlen = reqlen;
232 lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
233 lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
237 int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
238 __u32 *lens, char **bufs)
240 __u32 size[] = { sizeof(struct ptlrpc_body) };
248 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
250 /* only use new format, we don't need to be compatible with 1.4 */
251 magic = LUSTRE_MSG_MAGIC_V2;
254 case LUSTRE_MSG_MAGIC_V2:
255 return lustre_pack_request_v2(req, count, lens, bufs);
257 LASSERTF(0, "incorrect message magic: %08x\n", magic);
263 struct list_head ptlrpc_rs_debug_lru =
264 LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
265 spinlock_t ptlrpc_rs_debug_lock;
267 #define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
269 spin_lock(&ptlrpc_rs_debug_lock); \
270 list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
271 spin_unlock(&ptlrpc_rs_debug_lock); \
274 #define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
276 spin_lock(&ptlrpc_rs_debug_lock); \
277 list_del(&(rs)->rs_debug_list); \
278 spin_unlock(&ptlrpc_rs_debug_lock); \
281 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
282 # define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
285 struct ptlrpc_reply_state *
286 lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
288 struct ptlrpc_reply_state *rs = NULL;
290 spin_lock(&svcpt->scp_rep_lock);
292 /* See if we have anything in a pool, and wait if nothing */
293 while (list_empty(&svcpt->scp_rep_idle)) {
296 spin_unlock(&svcpt->scp_rep_lock);
297 /* If we cannot get anything for some long time, we better
298 * bail out instead of waiting infinitely */
299 rc = wait_event_idle_timeout(svcpt->scp_rep_waitq,
300 !list_empty(&svcpt->scp_rep_idle),
301 cfs_time_seconds(10));
304 spin_lock(&svcpt->scp_rep_lock);
307 rs = list_entry(svcpt->scp_rep_idle.next,
308 struct ptlrpc_reply_state, rs_list);
309 list_del(&rs->rs_list);
311 spin_unlock(&svcpt->scp_rep_lock);
313 memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
314 rs->rs_size = svcpt->scp_service->srv_max_reply_size;
315 rs->rs_svcpt = svcpt;
321 void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
323 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
325 spin_lock(&svcpt->scp_rep_lock);
326 list_add(&rs->rs_list, &svcpt->scp_rep_idle);
327 spin_unlock(&svcpt->scp_rep_lock);
328 wake_up(&svcpt->scp_rep_waitq);
331 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
332 __u32 *lens, char **bufs, int flags)
334 struct ptlrpc_reply_state *rs;
338 LASSERT(req->rq_reply_state == NULL);
341 if ((flags & LPRFL_EARLY_REPLY) == 0) {
342 spin_lock(&req->rq_lock);
343 req->rq_packed_final = 1;
344 spin_unlock(&req->rq_lock);
347 msg_len = lustre_msg_size_v2(count, lens);
348 rc = sptlrpc_svc_alloc_rs(req, msg_len);
352 rs = req->rq_reply_state;
353 atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
354 rs->rs_cb_id.cbid_fn = reply_out_callback;
355 rs->rs_cb_id.cbid_arg = rs;
356 rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
357 INIT_LIST_HEAD(&rs->rs_exp_list);
358 INIT_LIST_HEAD(&rs->rs_obd_list);
359 INIT_LIST_HEAD(&rs->rs_list);
360 spin_lock_init(&rs->rs_lock);
362 req->rq_replen = msg_len;
363 req->rq_reply_state = rs;
364 req->rq_repmsg = rs->rs_msg;
366 lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
367 lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
369 PTLRPC_RS_DEBUG_LRU_ADD(rs);
373 EXPORT_SYMBOL(lustre_pack_reply_v2);
375 int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
376 char **bufs, int flags)
379 __u32 size[] = { sizeof(struct ptlrpc_body) };
387 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
389 switch (req->rq_reqmsg->lm_magic) {
390 case LUSTRE_MSG_MAGIC_V2:
391 rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
394 LASSERTF(0, "incorrect message magic: %08x\n",
395 req->rq_reqmsg->lm_magic);
399 CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
400 lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
404 int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
407 return lustre_pack_reply_flags(req, count, lens, bufs, 0);
409 EXPORT_SYMBOL(lustre_pack_reply);
411 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size)
413 __u32 i, offset, buflen, bufcount;
416 LASSERT(m->lm_bufcount > 0);
418 bufcount = m->lm_bufcount;
419 if (unlikely(n >= bufcount)) {
420 CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
425 buflen = m->lm_buflens[n];
426 if (unlikely(buflen < min_size)) {
427 CERROR("msg %p buffer[%d] size %d too small "
428 "(required %d, opc=%d)\n", m, n, buflen, min_size,
429 n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
433 offset = lustre_msg_hdr_size_v2(bufcount);
434 for (i = 0; i < n; i++)
435 offset += cfs_size_round(m->lm_buflens[i]);
437 return (char *)m + offset;
440 void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 min_size)
442 switch (m->lm_magic) {
443 case LUSTRE_MSG_MAGIC_V2:
444 return lustre_msg_buf_v2(m, n, min_size);
446 LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n",
451 EXPORT_SYMBOL(lustre_msg_buf);
453 static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
454 unsigned int newlen, int move_data)
456 char *tail = NULL, *newpos;
460 LASSERT(msg->lm_bufcount > segment);
461 LASSERT(msg->lm_buflens[segment] >= newlen);
463 if (msg->lm_buflens[segment] == newlen)
466 if (move_data && msg->lm_bufcount > segment + 1) {
467 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
468 for (n = segment + 1; n < msg->lm_bufcount; n++)
469 tail_len += cfs_size_round(msg->lm_buflens[n]);
472 msg->lm_buflens[segment] = newlen;
474 if (tail && tail_len) {
475 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
476 LASSERT(newpos <= tail);
478 memmove(newpos, tail, tail_len);
481 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
485 * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
486 * we also move data forward from @segment + 1.
488 * if @newlen == 0, we remove the segment completely, but we still keep the
489 * totally bufcount the same to save possible data moving. this will leave a
490 * unused segment with size 0 at the tail, but that's ok.
492 * return new msg size after shrinking.
495 * + if any buffers higher than @segment has been filled in, must call shrink
496 * with non-zero @move_data.
497 * + caller should NOT keep pointers to msg buffers which higher than @segment
500 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
501 unsigned int newlen, int move_data)
503 switch (msg->lm_magic) {
504 case LUSTRE_MSG_MAGIC_V2:
505 return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
507 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
510 EXPORT_SYMBOL(lustre_shrink_msg);
512 static int lustre_grow_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
515 char *tail = NULL, *newpos;
519 LASSERT(msg->lm_bufcount > segment);
520 LASSERT(msg->lm_buflens[segment] <= newlen);
522 if (msg->lm_buflens[segment] == newlen)
525 if (msg->lm_bufcount > segment + 1) {
526 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
527 for (n = segment + 1; n < msg->lm_bufcount; n++)
528 tail_len += cfs_size_round(msg->lm_buflens[n]);
531 msg->lm_buflens[segment] = newlen;
533 if (tail && tail_len) {
534 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
535 memmove(newpos, tail, tail_len);
538 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
542 * for @msg, grow @segment to size @newlen.
543 * Always move higher buffer forward.
545 * return new msg size after growing.
548 * - caller must make sure there is enough space in allocated message buffer
549 * - caller should NOT keep pointers to msg buffers which higher than @segment
552 int lustre_grow_msg(struct lustre_msg *msg, int segment, unsigned int newlen)
554 switch (msg->lm_magic) {
555 case LUSTRE_MSG_MAGIC_V2:
556 return lustre_grow_msg_v2(msg, segment, newlen);
558 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
561 EXPORT_SYMBOL(lustre_grow_msg);
563 void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
565 PTLRPC_RS_DEBUG_LRU_DEL(rs);
567 LASSERT(atomic_read(&rs->rs_refcount) == 0);
568 LASSERT(!rs->rs_difficult || rs->rs_handled);
569 LASSERT(!rs->rs_on_net);
570 LASSERT(!rs->rs_scheduled);
571 LASSERT(rs->rs_export == NULL);
572 LASSERT(rs->rs_nlocks == 0);
573 LASSERT(list_empty(&rs->rs_exp_list));
574 LASSERT(list_empty(&rs->rs_obd_list));
576 sptlrpc_svc_free_rs(rs);
579 static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
581 int swabbed, required_len, i, buflen;
583 /* Now we know the sender speaks my language. */
584 required_len = lustre_msg_hdr_size_v2(0);
585 if (len < required_len) {
586 /* can't even look inside the message */
587 CERROR("message length %d too small for lustre_msg\n", len);
591 swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
594 __swab32s(&m->lm_magic);
595 __swab32s(&m->lm_bufcount);
596 __swab32s(&m->lm_secflvr);
597 __swab32s(&m->lm_repsize);
598 __swab32s(&m->lm_cksum);
599 __swab32s(&m->lm_flags);
600 BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_2) == 0);
601 BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_3) == 0);
604 if (m->lm_bufcount == 0 || m->lm_bufcount > PTLRPC_MAX_BUFCOUNT) {
605 CERROR("message bufcount %d is not valid\n", m->lm_bufcount);
608 required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
609 if (len < required_len) {
610 /* didn't receive all the buffer lengths */
611 CERROR("message length %d too small for %d buflens\n",
612 len, m->lm_bufcount);
616 for (i = 0; i < m->lm_bufcount; i++) {
618 __swab32s(&m->lm_buflens[i]);
619 buflen = cfs_size_round(m->lm_buflens[i]);
620 if (buflen < 0 || buflen > PTLRPC_MAX_BUFLEN) {
621 CERROR("buffer %d length %d is not valid\n", i, buflen);
624 required_len += buflen;
626 if (len < required_len || required_len > PTLRPC_MAX_BUFLEN) {
627 CERROR("len: %d, required_len %d, bufcount: %d\n",
628 len, required_len, m->lm_bufcount);
629 for (i = 0; i < m->lm_bufcount; i++)
630 CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
637 int __lustre_unpack_msg(struct lustre_msg *m, int len)
639 int required_len, rc;
643 * We can provide a slightly better error log, if we check the
644 * message magic and version first. In the future, struct
645 * lustre_msg may grow, and we'd like to log a version mismatch,
646 * rather than a short message.
648 required_len = offsetof(struct lustre_msg, lm_magic) +
650 if (len < required_len) {
651 /* can't even look inside the message */
652 CERROR("message length %d too small for magic/version check\n",
657 rc = lustre_unpack_msg_v2(m, len);
661 EXPORT_SYMBOL(__lustre_unpack_msg);
663 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
667 rc = __lustre_unpack_msg(req->rq_reqmsg, len);
669 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
675 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
679 rc = __lustre_unpack_msg(req->rq_repmsg, len);
681 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
687 static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
688 const int inout, int offset)
690 struct ptlrpc_body *pb;
691 struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg;
693 pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
695 CERROR("error unpacking ptlrpc body\n");
698 if (ptlrpc_buf_need_swab(req, inout, offset)) {
699 lustre_swab_ptlrpc_body(pb);
700 ptlrpc_buf_set_swabbed(req, inout, offset);
703 if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
704 CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
709 pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
714 int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
716 switch (req->rq_reqmsg->lm_magic) {
717 case LUSTRE_MSG_MAGIC_V2:
718 return lustre_unpack_ptlrpc_body_v2(req, 1, offset);
720 CERROR("bad lustre msg magic: %08x\n",
721 req->rq_reqmsg->lm_magic);
726 int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
728 switch (req->rq_repmsg->lm_magic) {
729 case LUSTRE_MSG_MAGIC_V2:
730 return lustre_unpack_ptlrpc_body_v2(req, 0, offset);
732 CERROR("bad lustre msg magic: %08x\n",
733 req->rq_repmsg->lm_magic);
738 static inline __u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, __u32 n)
740 if (n >= m->lm_bufcount)
743 return m->lm_buflens[n];
747 * lustre_msg_buflen - return the length of buffer \a n in message \a m
748 * \param m lustre_msg (request or reply) to look at
749 * \param n message index (base 0)
751 * returns zero for non-existent message indices
753 __u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n)
755 switch (m->lm_magic) {
756 case LUSTRE_MSG_MAGIC_V2:
757 return lustre_msg_buflen_v2(m, n);
759 CERROR("incorrect message magic: %08x\n", m->lm_magic);
763 EXPORT_SYMBOL(lustre_msg_buflen);
766 lustre_msg_set_buflen_v2(struct lustre_msg_v2 *m, __u32 n, __u32 len)
768 if (n >= m->lm_bufcount)
771 m->lm_buflens[n] = len;
774 void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len)
776 switch (m->lm_magic) {
777 case LUSTRE_MSG_MAGIC_V2:
778 lustre_msg_set_buflen_v2(m, n, len);
781 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
786 * NB return the bufcount for lustre_msg_v2 format, so if message is packed
787 * in V1 format, the result is one bigger. (add struct ptlrpc_body).
789 __u32 lustre_msg_bufcount(struct lustre_msg *m)
791 switch (m->lm_magic) {
792 case LUSTRE_MSG_MAGIC_V2:
793 return m->lm_bufcount;
795 CERROR("incorrect message magic: %08x\n", m->lm_magic);
800 char *lustre_msg_string(struct lustre_msg *m, __u32 index, __u32 max_len)
802 /* max_len == 0 means the string should fill the buffer */
806 switch (m->lm_magic) {
807 case LUSTRE_MSG_MAGIC_V2:
808 str = lustre_msg_buf_v2(m, index, 0);
809 blen = lustre_msg_buflen_v2(m, index);
812 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
816 CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
820 slen = strnlen(str, blen);
822 if (slen == blen) { /* not NULL terminated */
823 CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n",
827 if (blen > PTLRPC_MAX_BUFLEN) {
828 CERROR("buffer length of msg %p buffer[%d] is invalid(%d)\n",
834 if (slen != blen - 1) {
835 CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n",
836 m, index, blen, slen);
839 } else if (slen > max_len) {
840 CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n",
841 m, index, blen, slen, max_len);
848 /* Wrap up the normal fixed length cases */
849 static inline void *__lustre_swab_buf(struct lustre_msg *msg, __u32 index,
850 __u32 min_size, void *swabber)
854 LASSERT(msg != NULL);
855 switch (msg->lm_magic) {
856 case LUSTRE_MSG_MAGIC_V2:
857 ptr = lustre_msg_buf_v2(msg, index, min_size);
860 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
863 if (ptr != NULL && swabber != NULL)
864 ((void (*)(void *))swabber)(ptr);
869 static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
871 return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
872 sizeof(struct ptlrpc_body_v2));
875 enum lustre_msghdr lustre_msghdr_get_flags(struct lustre_msg *msg)
877 switch (msg->lm_magic) {
878 case LUSTRE_MSG_MAGIC_V2:
879 /* already in host endian */
880 return msg->lm_flags;
882 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
886 EXPORT_SYMBOL(lustre_msghdr_get_flags);
888 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
890 switch (msg->lm_magic) {
891 case LUSTRE_MSG_MAGIC_V2:
892 msg->lm_flags = flags;
895 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
899 __u32 lustre_msg_get_flags(struct lustre_msg *msg)
901 switch (msg->lm_magic) {
902 case LUSTRE_MSG_MAGIC_V2: {
903 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
907 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
912 * flags might be printed in debug code while message
918 EXPORT_SYMBOL(lustre_msg_get_flags);
920 void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags)
922 switch (msg->lm_magic) {
923 case LUSTRE_MSG_MAGIC_V2: {
924 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
925 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
926 pb->pb_flags |= flags;
930 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
933 EXPORT_SYMBOL(lustre_msg_add_flags);
935 void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags)
937 switch (msg->lm_magic) {
938 case LUSTRE_MSG_MAGIC_V2: {
939 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
940 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
941 pb->pb_flags = flags;
945 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
949 void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags)
951 switch (msg->lm_magic) {
952 case LUSTRE_MSG_MAGIC_V2: {
953 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
954 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
955 pb->pb_flags &= ~flags;
960 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
963 EXPORT_SYMBOL(lustre_msg_clear_flags);
965 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
967 switch (msg->lm_magic) {
968 case LUSTRE_MSG_MAGIC_V2: {
969 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
971 return pb->pb_op_flags;
973 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
981 void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags)
983 switch (msg->lm_magic) {
984 case LUSTRE_MSG_MAGIC_V2: {
985 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
986 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
987 pb->pb_op_flags |= flags;
991 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
994 EXPORT_SYMBOL(lustre_msg_add_op_flags);
996 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
998 switch (msg->lm_magic) {
999 case LUSTRE_MSG_MAGIC_V2: {
1000 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1002 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1005 return &pb->pb_handle;
1008 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1013 __u32 lustre_msg_get_type(struct lustre_msg *msg)
1015 switch (msg->lm_magic) {
1016 case LUSTRE_MSG_MAGIC_V2: {
1017 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1019 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1020 return PTL_RPC_MSG_ERR;
1025 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1026 return PTL_RPC_MSG_ERR;
1029 EXPORT_SYMBOL(lustre_msg_get_type);
1031 enum lustre_msg_version lustre_msg_get_version(struct lustre_msg *msg)
1033 switch (msg->lm_magic) {
1034 case LUSTRE_MSG_MAGIC_V2: {
1035 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1037 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1040 return pb->pb_version;
1043 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1048 void lustre_msg_add_version(struct lustre_msg *msg, __u32 version)
1050 switch (msg->lm_magic) {
1051 case LUSTRE_MSG_MAGIC_V2: {
1052 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1053 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1054 pb->pb_version |= version;
1058 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1062 __u32 lustre_msg_get_opc(struct lustre_msg *msg)
1064 switch (msg->lm_magic) {
1065 case LUSTRE_MSG_MAGIC_V2: {
1066 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1068 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1074 CERROR("incorrect message magic: %08x (msg:%p)\n",
1075 msg->lm_magic, msg);
1079 EXPORT_SYMBOL(lustre_msg_get_opc);
1081 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg)
1083 switch (msg->lm_magic) {
1084 case LUSTRE_MSG_MAGIC_V2: {
1085 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1087 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1090 return pb->pb_last_xid;
1093 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1097 EXPORT_SYMBOL(lustre_msg_get_last_xid);
1099 __u16 lustre_msg_get_tag(struct lustre_msg *msg)
1101 switch (msg->lm_magic) {
1102 case LUSTRE_MSG_MAGIC_V2: {
1103 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1105 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1111 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1115 EXPORT_SYMBOL(lustre_msg_get_tag);
1117 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
1119 switch (msg->lm_magic) {
1120 case LUSTRE_MSG_MAGIC_V2: {
1121 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1123 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1126 return pb->pb_last_committed;
1129 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1133 EXPORT_SYMBOL(lustre_msg_get_last_committed);
1135 __u64 *lustre_msg_get_versions(struct lustre_msg *msg)
1137 switch (msg->lm_magic) {
1138 case LUSTRE_MSG_MAGIC_V2: {
1139 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1141 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1144 return pb->pb_pre_versions;
1147 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1151 EXPORT_SYMBOL(lustre_msg_get_versions);
1153 __u64 lustre_msg_get_transno(struct lustre_msg *msg)
1155 switch (msg->lm_magic) {
1156 case LUSTRE_MSG_MAGIC_V2: {
1157 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1159 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1162 return pb->pb_transno;
1165 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1169 EXPORT_SYMBOL(lustre_msg_get_transno);
1171 int lustre_msg_get_status(struct lustre_msg *msg)
1173 switch (msg->lm_magic) {
1174 case LUSTRE_MSG_MAGIC_V2: {
1175 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1177 return pb->pb_status;
1178 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1183 * status might be printed in debug code while message
1189 EXPORT_SYMBOL(lustre_msg_get_status);
1191 __u64 lustre_msg_get_slv(struct lustre_msg *msg)
1193 switch (msg->lm_magic) {
1194 case LUSTRE_MSG_MAGIC_V2: {
1195 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1197 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1203 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1209 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
1211 switch (msg->lm_magic) {
1212 case LUSTRE_MSG_MAGIC_V2: {
1213 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1215 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1222 CERROR("invalid msg magic %x\n", msg->lm_magic);
1227 __u32 lustre_msg_get_limit(struct lustre_msg *msg)
1229 switch (msg->lm_magic) {
1230 case LUSTRE_MSG_MAGIC_V2: {
1231 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1233 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1236 return pb->pb_limit;
1239 CERROR("invalid msg magic %x\n", msg->lm_magic);
1245 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
1247 switch (msg->lm_magic) {
1248 case LUSTRE_MSG_MAGIC_V2: {
1249 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1251 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1254 pb->pb_limit = limit;
1258 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1263 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
1265 switch (msg->lm_magic) {
1266 case LUSTRE_MSG_MAGIC_V2: {
1267 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1269 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1272 return pb->pb_conn_cnt;
1275 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1279 EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
1281 __u32 lustre_msg_get_magic(struct lustre_msg *msg)
1283 switch (msg->lm_magic) {
1284 case LUSTRE_MSG_MAGIC_V2:
1285 return msg->lm_magic;
1287 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1292 timeout_t lustre_msg_get_timeout(struct lustre_msg *msg)
1294 switch (msg->lm_magic) {
1295 case LUSTRE_MSG_MAGIC_V2: {
1296 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1299 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1302 return pb->pb_timeout;
1305 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1310 timeout_t lustre_msg_get_service_timeout(struct lustre_msg *msg)
1312 switch (msg->lm_magic) {
1313 case LUSTRE_MSG_MAGIC_V2: {
1314 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1317 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1320 return pb->pb_service_time;
1323 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1328 char *lustre_msg_get_jobid(struct lustre_msg *msg)
1330 switch (msg->lm_magic) {
1331 case LUSTRE_MSG_MAGIC_V2: {
1332 struct ptlrpc_body *pb;
1334 /* the old pltrpc_body_v2 is smaller; doesn't include jobid */
1335 if (msg->lm_buflens[MSG_PTLRPC_BODY_OFF] <
1336 sizeof(struct ptlrpc_body))
1339 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1340 sizeof(struct ptlrpc_body));
1344 return pb->pb_jobid;
1347 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1351 EXPORT_SYMBOL(lustre_msg_get_jobid);
1353 __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
1355 switch (msg->lm_magic) {
1356 case LUSTRE_MSG_MAGIC_V2:
1357 return msg->lm_cksum;
1359 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1364 __u64 lustre_msg_get_mbits(struct lustre_msg *msg)
1366 switch (msg->lm_magic) {
1367 case LUSTRE_MSG_MAGIC_V2: {
1368 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1370 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1373 return pb->pb_mbits;
1376 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1381 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, __u32 buf)
1383 switch (msg->lm_magic) {
1384 case LUSTRE_MSG_MAGIC_V2: {
1385 struct ptlrpc_body *pb = lustre_msg_buf_v2(msg, buf, 0);
1386 __u32 len = lustre_msg_buflen(msg, buf);
1389 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1390 #ifdef CONFIG_CRYPTO_CRC32
1391 unsigned int hsize = 4;
1392 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
1393 len, NULL, 0, (unsigned char *)&crc,
1396 crc = crc32_le(~(__u32)0, (unsigned char *)pb, len);
1401 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1406 void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
1408 switch (msg->lm_magic) {
1409 case LUSTRE_MSG_MAGIC_V2: {
1410 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1411 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1412 pb->pb_handle = *handle;
1416 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1420 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
1422 switch (msg->lm_magic) {
1423 case LUSTRE_MSG_MAGIC_V2: {
1424 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1425 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1430 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1434 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
1436 switch (msg->lm_magic) {
1437 case LUSTRE_MSG_MAGIC_V2: {
1438 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1439 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1444 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1448 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid)
1450 switch (msg->lm_magic) {
1451 case LUSTRE_MSG_MAGIC_V2: {
1452 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1453 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1454 pb->pb_last_xid = last_xid;
1458 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1461 EXPORT_SYMBOL(lustre_msg_set_last_xid);
1463 void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
1465 switch (msg->lm_magic) {
1466 case LUSTRE_MSG_MAGIC_V2: {
1467 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1468 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1473 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1476 EXPORT_SYMBOL(lustre_msg_set_tag);
1478 void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed)
1480 switch (msg->lm_magic) {
1481 case LUSTRE_MSG_MAGIC_V2: {
1482 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1483 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1484 pb->pb_last_committed = last_committed;
1488 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1492 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
1494 switch (msg->lm_magic) {
1495 case LUSTRE_MSG_MAGIC_V2: {
1496 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1497 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1498 pb->pb_pre_versions[0] = versions[0];
1499 pb->pb_pre_versions[1] = versions[1];
1500 pb->pb_pre_versions[2] = versions[2];
1501 pb->pb_pre_versions[3] = versions[3];
1505 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1508 EXPORT_SYMBOL(lustre_msg_set_versions);
1510 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
1512 switch (msg->lm_magic) {
1513 case LUSTRE_MSG_MAGIC_V2: {
1514 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1515 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1516 pb->pb_transno = transno;
1520 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1523 EXPORT_SYMBOL(lustre_msg_set_transno);
1525 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
1527 switch (msg->lm_magic) {
1528 case LUSTRE_MSG_MAGIC_V2: {
1529 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1530 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1531 pb->pb_status = status;
1535 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1538 EXPORT_SYMBOL(lustre_msg_set_status);
1540 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
1542 switch (msg->lm_magic) {
1543 case LUSTRE_MSG_MAGIC_V2: {
1544 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1545 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1546 pb->pb_conn_cnt = conn_cnt;
1550 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1554 void lustre_msg_set_timeout(struct lustre_msg *msg, timeout_t timeout)
1556 switch (msg->lm_magic) {
1557 case LUSTRE_MSG_MAGIC_V2: {
1558 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1560 LASSERT(timeout >= 0);
1561 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1562 pb->pb_timeout = timeout;
1566 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1570 void lustre_msg_set_service_timeout(struct lustre_msg *msg,
1571 timeout_t service_timeout)
1573 switch (msg->lm_magic) {
1574 case LUSTRE_MSG_MAGIC_V2: {
1575 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1577 LASSERT(service_timeout >= 0);
1578 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1579 pb->pb_service_time = service_timeout;
1583 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1587 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
1589 switch (msg->lm_magic) {
1590 case LUSTRE_MSG_MAGIC_V2: {
1591 __u32 opc = lustre_msg_get_opc(msg);
1592 struct ptlrpc_body *pb;
1594 /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
1595 * See the comment in ptlrpc_request_pack(). */
1596 if (!opc || opc == LDLM_BL_CALLBACK ||
1597 opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
1600 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1601 sizeof(struct ptlrpc_body));
1602 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1605 memcpy(pb->pb_jobid, jobid, sizeof(pb->pb_jobid));
1606 else if (pb->pb_jobid[0] == '\0')
1607 lustre_get_jobid(pb->pb_jobid, sizeof(pb->pb_jobid));
1611 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1614 EXPORT_SYMBOL(lustre_msg_set_jobid);
1616 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
1618 switch (msg->lm_magic) {
1619 case LUSTRE_MSG_MAGIC_V2:
1620 msg->lm_cksum = cksum;
1623 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1627 void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
1629 switch (msg->lm_magic) {
1630 case LUSTRE_MSG_MAGIC_V2: {
1631 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1633 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1634 pb->pb_mbits = mbits;
1638 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1642 void ptlrpc_request_set_replen(struct ptlrpc_request *req)
1644 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
1646 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
1647 req->rq_pill.rc_area[RCL_SERVER]);
1648 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1649 req->rq_reqmsg->lm_repsize = req->rq_replen;
1651 EXPORT_SYMBOL(ptlrpc_request_set_replen);
1653 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *lens)
1655 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens);
1656 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1657 req->rq_reqmsg->lm_repsize = req->rq_replen;
1661 * Send a remote set_info_async.
1663 * This may go from client to server or server to client.
1665 int do_set_info_async(struct obd_import *imp,
1666 int opcode, int version,
1667 size_t keylen, void *key,
1668 size_t vallen, void *val,
1669 struct ptlrpc_request_set *set)
1671 struct ptlrpc_request *req;
1677 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_CHANGELOG_CLEAR) ?
1683 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
1684 RCL_CLIENT, keylen);
1685 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
1686 RCL_CLIENT, vallen);
1687 rc = ptlrpc_request_pack(req, version, opcode);
1689 ptlrpc_request_free(req);
1693 if (KEY_IS(KEY_CHANGELOG_CLEAR))
1696 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1697 memcpy(tmp, key, keylen);
1698 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1699 memcpy(tmp, val, vallen);
1701 ptlrpc_request_set_replen(req);
1704 ptlrpc_set_add_req(set, req);
1705 ptlrpc_check_set(NULL, set);
1707 rc = ptlrpc_queue_wait(req);
1708 ptlrpc_req_finished(req);
1713 EXPORT_SYMBOL(do_set_info_async);
1715 /* byte flipping routines for all wire types declared in
1716 * lustre_idl.h implemented here.
1718 void lustre_swab_ptlrpc_body(struct ptlrpc_body *body)
1720 __swab32s(&body->pb_type);
1721 __swab32s(&body->pb_version);
1722 __swab32s(&body->pb_opc);
1723 __swab32s(&body->pb_status);
1724 __swab64s(&body->pb_last_xid);
1725 __swab16s(&body->pb_tag);
1726 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding0) == 0);
1727 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding1) == 0);
1728 __swab64s(&body->pb_last_committed);
1729 __swab64s(&body->pb_transno);
1730 __swab32s(&body->pb_flags);
1731 __swab32s(&body->pb_op_flags);
1732 __swab32s(&body->pb_conn_cnt);
1733 __swab32s(&body->pb_timeout);
1734 __swab32s(&body->pb_service_time);
1735 __swab32s(&body->pb_limit);
1736 __swab64s(&body->pb_slv);
1737 __swab64s(&body->pb_pre_versions[0]);
1738 __swab64s(&body->pb_pre_versions[1]);
1739 __swab64s(&body->pb_pre_versions[2]);
1740 __swab64s(&body->pb_pre_versions[3]);
1741 __swab64s(&body->pb_mbits);
1742 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_0) == 0);
1743 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_1) == 0);
1744 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_2) == 0);
1746 * While we need to maintain compatibility between
1747 * clients and servers without ptlrpc_body_v2 (< 2.3)
1748 * do not swab any fields beyond pb_jobid, as we are
1749 * using this swab function for both ptlrpc_body
1750 * and ptlrpc_body_v2.
1752 /* pb_jobid is an ASCII string and should not be swabbed */
1753 BUILD_BUG_ON(offsetof(typeof(*body), pb_jobid) == 0);
1756 void lustre_swab_connect(struct obd_connect_data *ocd)
1758 __swab64s(&ocd->ocd_connect_flags);
1759 __swab32s(&ocd->ocd_version);
1760 __swab32s(&ocd->ocd_grant);
1761 __swab64s(&ocd->ocd_ibits_known);
1762 __swab32s(&ocd->ocd_index);
1763 __swab32s(&ocd->ocd_brw_size);
1765 * ocd_blocksize and ocd_inodespace don't need to be swabbed because
1766 * they are 8-byte values
1768 __swab16s(&ocd->ocd_grant_tax_kb);
1769 __swab32s(&ocd->ocd_grant_max_blks);
1770 __swab64s(&ocd->ocd_transno);
1771 __swab32s(&ocd->ocd_group);
1772 __swab32s(&ocd->ocd_cksum_types);
1773 __swab32s(&ocd->ocd_instance);
1775 * Fields after ocd_cksum_types are only accessible by the receiver
1776 * if the corresponding flag in ocd_connect_flags is set. Accessing
1777 * any field after ocd_maxbytes on the receiver without a valid flag
1778 * may result in out-of-bound memory access and kernel oops.
1780 if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
1781 __swab32s(&ocd->ocd_max_easize);
1782 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
1783 __swab64s(&ocd->ocd_maxbytes);
1784 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1785 __swab16s(&ocd->ocd_maxmodrpcs);
1786 BUILD_BUG_ON(offsetof(typeof(*ocd), padding0) == 0);
1787 BUILD_BUG_ON(offsetof(typeof(*ocd), padding1) == 0);
1788 if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
1789 __swab64s(&ocd->ocd_connect_flags2);
1790 BUILD_BUG_ON(offsetof(typeof(*ocd), padding3) == 0);
1791 BUILD_BUG_ON(offsetof(typeof(*ocd), padding4) == 0);
1792 BUILD_BUG_ON(offsetof(typeof(*ocd), padding5) == 0);
1793 BUILD_BUG_ON(offsetof(typeof(*ocd), padding6) == 0);
1794 BUILD_BUG_ON(offsetof(typeof(*ocd), padding7) == 0);
1795 BUILD_BUG_ON(offsetof(typeof(*ocd), padding8) == 0);
1796 BUILD_BUG_ON(offsetof(typeof(*ocd), padding9) == 0);
1797 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingA) == 0);
1798 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingB) == 0);
1799 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingC) == 0);
1800 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingD) == 0);
1801 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingE) == 0);
1802 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingF) == 0);
1805 static void lustre_swab_ost_layout(struct ost_layout *ol)
1807 __swab32s(&ol->ol_stripe_size);
1808 __swab32s(&ol->ol_stripe_count);
1809 __swab64s(&ol->ol_comp_start);
1810 __swab64s(&ol->ol_comp_end);
1811 __swab32s(&ol->ol_comp_id);
1814 void lustre_swab_obdo(struct obdo *o)
1816 __swab64s(&o->o_valid);
1817 lustre_swab_ost_id(&o->o_oi);
1818 __swab64s(&o->o_parent_seq);
1819 __swab64s(&o->o_size);
1820 __swab64s(&o->o_mtime);
1821 __swab64s(&o->o_atime);
1822 __swab64s(&o->o_ctime);
1823 __swab64s(&o->o_blocks);
1824 __swab64s(&o->o_grant);
1825 __swab32s(&o->o_blksize);
1826 __swab32s(&o->o_mode);
1827 __swab32s(&o->o_uid);
1828 __swab32s(&o->o_gid);
1829 __swab32s(&o->o_flags);
1830 __swab32s(&o->o_nlink);
1831 __swab32s(&o->o_parent_oid);
1832 __swab32s(&o->o_misc);
1833 __swab64s(&o->o_ioepoch);
1834 __swab32s(&o->o_stripe_idx);
1835 __swab32s(&o->o_parent_ver);
1836 lustre_swab_ost_layout(&o->o_layout);
1837 __swab32s(&o->o_layout_version);
1838 __swab32s(&o->o_uid_h);
1839 __swab32s(&o->o_gid_h);
1840 __swab64s(&o->o_data_version);
1841 __swab32s(&o->o_projid);
1842 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_4) == 0);
1843 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_5) == 0);
1844 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_6) == 0);
1847 EXPORT_SYMBOL(lustre_swab_obdo);
1849 void lustre_swab_obd_statfs(struct obd_statfs *os)
1851 __swab64s(&os->os_type);
1852 __swab64s(&os->os_blocks);
1853 __swab64s(&os->os_bfree);
1854 __swab64s(&os->os_bavail);
1855 __swab64s(&os->os_files);
1856 __swab64s(&os->os_ffree);
1857 /* no need to swab os_fsid */
1858 __swab32s(&os->os_bsize);
1859 __swab32s(&os->os_namelen);
1860 __swab64s(&os->os_maxbytes);
1861 __swab32s(&os->os_state);
1862 __swab32s(&os->os_fprecreated);
1863 __swab32s(&os->os_granted);
1864 BUILD_BUG_ON(offsetof(typeof(*os), os_spare3) == 0);
1865 BUILD_BUG_ON(offsetof(typeof(*os), os_spare4) == 0);
1866 BUILD_BUG_ON(offsetof(typeof(*os), os_spare5) == 0);
1867 BUILD_BUG_ON(offsetof(typeof(*os), os_spare6) == 0);
1868 BUILD_BUG_ON(offsetof(typeof(*os), os_spare7) == 0);
1869 BUILD_BUG_ON(offsetof(typeof(*os), os_spare8) == 0);
1870 BUILD_BUG_ON(offsetof(typeof(*os), os_spare9) == 0);
1873 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
1875 lustre_swab_ost_id(&ioo->ioo_oid);
1876 __swab32s(&ioo->ioo_max_brw);
1877 __swab32s(&ioo->ioo_bufcnt);
1880 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
1882 __swab64s(&nbr->rnb_offset);
1883 __swab32s(&nbr->rnb_len);
1884 __swab32s(&nbr->rnb_flags);
1887 void lustre_swab_ost_body(struct ost_body *b)
1889 lustre_swab_obdo(&b->oa);
1892 void lustre_swab_ost_last_id(u64 *id)
1897 void lustre_swab_generic_32s(__u32 *val)
1902 void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *desc)
1904 lustre_swab_lu_fid(&desc->gl_id.qid_fid);
1905 __swab64s(&desc->gl_flags);
1906 __swab64s(&desc->gl_ver);
1907 __swab64s(&desc->gl_hardlimit);
1908 __swab64s(&desc->gl_softlimit);
1909 __swab64s(&desc->gl_time);
1910 BUILD_BUG_ON(offsetof(typeof(*desc), gl_pad2) == 0);
1912 EXPORT_SYMBOL(lustre_swab_gl_lquota_desc);
1914 void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *desc)
1916 __swab32s(&desc->lgbd_status);
1917 __swab32s(&desc->lgbd_timeout);
1918 BUILD_BUG_ON(offsetof(typeof(*desc), lgbd_padding) == 0);
1920 EXPORT_SYMBOL(lustre_swab_gl_barrier_desc);
1922 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
1924 __swab64s(&lvb->lvb_size);
1925 __swab64s(&lvb->lvb_mtime);
1926 __swab64s(&lvb->lvb_atime);
1927 __swab64s(&lvb->lvb_ctime);
1928 __swab64s(&lvb->lvb_blocks);
1930 EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
1932 void lustre_swab_ost_lvb(struct ost_lvb *lvb)
1934 __swab64s(&lvb->lvb_size);
1935 __swab64s(&lvb->lvb_mtime);
1936 __swab64s(&lvb->lvb_atime);
1937 __swab64s(&lvb->lvb_ctime);
1938 __swab64s(&lvb->lvb_blocks);
1939 __swab32s(&lvb->lvb_mtime_ns);
1940 __swab32s(&lvb->lvb_atime_ns);
1941 __swab32s(&lvb->lvb_ctime_ns);
1942 __swab32s(&lvb->lvb_padding);
1944 EXPORT_SYMBOL(lustre_swab_ost_lvb);
1946 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
1948 __swab64s(&lvb->lvb_flags);
1949 __swab64s(&lvb->lvb_id_may_rel);
1950 __swab64s(&lvb->lvb_id_rel);
1951 __swab64s(&lvb->lvb_id_qunit);
1952 __swab64s(&lvb->lvb_pad1);
1954 EXPORT_SYMBOL(lustre_swab_lquota_lvb);
1956 void lustre_swab_barrier_lvb(struct barrier_lvb *lvb)
1958 __swab32s(&lvb->lvb_status);
1959 __swab32s(&lvb->lvb_index);
1960 BUILD_BUG_ON(offsetof(typeof(*lvb), lvb_padding) == 0);
1962 EXPORT_SYMBOL(lustre_swab_barrier_lvb);
1964 void lustre_swab_mdt_body(struct mdt_body *b)
1966 lustre_swab_lu_fid(&b->mbo_fid1);
1967 lustre_swab_lu_fid(&b->mbo_fid2);
1968 /* handle is opaque */
1969 __swab64s(&b->mbo_valid);
1970 __swab64s(&b->mbo_size);
1971 __swab64s(&b->mbo_mtime);
1972 __swab64s(&b->mbo_atime);
1973 __swab64s(&b->mbo_ctime);
1974 __swab64s(&b->mbo_blocks);
1975 __swab64s(&b->mbo_version);
1976 __swab64s(&b->mbo_t_state);
1977 __swab32s(&b->mbo_fsuid);
1978 __swab32s(&b->mbo_fsgid);
1979 __swab32s(&b->mbo_capability);
1980 __swab32s(&b->mbo_mode);
1981 __swab32s(&b->mbo_uid);
1982 __swab32s(&b->mbo_gid);
1983 __swab32s(&b->mbo_flags);
1984 __swab32s(&b->mbo_rdev);
1985 __swab32s(&b->mbo_nlink);
1986 __swab32s(&b->mbo_layout_gen);
1987 __swab32s(&b->mbo_suppgid);
1988 __swab32s(&b->mbo_eadatasize);
1989 __swab32s(&b->mbo_aclsize);
1990 __swab32s(&b->mbo_max_mdsize);
1991 BUILD_BUG_ON(offsetof(typeof(*b), mbo_unused3) == 0);
1992 __swab32s(&b->mbo_uid_h);
1993 __swab32s(&b->mbo_gid_h);
1994 __swab32s(&b->mbo_projid);
1995 __swab64s(&b->mbo_dom_size);
1996 __swab64s(&b->mbo_dom_blocks);
1997 __swab64s(&b->mbo_btime);
1998 BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_9) == 0);
1999 BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_10) == 0);
2002 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
2004 /* mio_open_handle is opaque */
2005 BUILD_BUG_ON(offsetof(typeof(*b), mio_unused1) == 0);
2006 BUILD_BUG_ON(offsetof(typeof(*b), mio_unused2) == 0);
2007 BUILD_BUG_ON(offsetof(typeof(*b), mio_padding) == 0);
2010 void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
2014 __swab32s(&mti->mti_lustre_ver);
2015 __swab32s(&mti->mti_stripe_index);
2016 __swab32s(&mti->mti_config_ver);
2017 __swab32s(&mti->mti_flags);
2018 __swab32s(&mti->mti_instance);
2019 __swab32s(&mti->mti_nid_count);
2020 BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
2021 for (i = 0; i < MTI_NIDS_MAX; i++)
2022 __swab64s(&mti->mti_nids[i]);
2025 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
2029 __swab64s(&entry->mne_version);
2030 __swab32s(&entry->mne_instance);
2031 __swab32s(&entry->mne_index);
2032 __swab32s(&entry->mne_length);
2034 /* mne_nid_(count|type) must be one byte size because we're gonna
2035 * access it w/o swapping. */
2036 BUILD_BUG_ON(sizeof(entry->mne_nid_count) != sizeof(__u8));
2037 BUILD_BUG_ON(sizeof(entry->mne_nid_type) != sizeof(__u8));
2039 /* remove this assertion if ipv6 is supported. */
2040 LASSERT(entry->mne_nid_type == 0);
2041 for (i = 0; i < entry->mne_nid_count; i++) {
2042 BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
2043 __swab64s(&entry->u.nids[i]);
2046 EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
2048 void lustre_swab_mgs_config_body(struct mgs_config_body *body)
2050 __swab64s(&body->mcb_offset);
2051 __swab32s(&body->mcb_units);
2052 __swab16s(&body->mcb_type);
2055 void lustre_swab_mgs_config_res(struct mgs_config_res *body)
2057 __swab64s(&body->mcr_offset);
2058 __swab64s(&body->mcr_size);
2061 static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
2063 __swab64s(&i->dqi_bgrace);
2064 __swab64s(&i->dqi_igrace);
2065 __swab32s(&i->dqi_flags);
2066 __swab32s(&i->dqi_valid);
2069 static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
2071 __swab64s(&b->dqb_ihardlimit);
2072 __swab64s(&b->dqb_isoftlimit);
2073 __swab64s(&b->dqb_curinodes);
2074 __swab64s(&b->dqb_bhardlimit);
2075 __swab64s(&b->dqb_bsoftlimit);
2076 __swab64s(&b->dqb_curspace);
2077 __swab64s(&b->dqb_btime);
2078 __swab64s(&b->dqb_itime);
2079 __swab32s(&b->dqb_valid);
2080 BUILD_BUG_ON(offsetof(typeof(*b), dqb_padding) == 0);
2083 int lustre_swab_obd_quotactl(struct obd_quotactl *q, __u32 len)
2085 if (unlikely(len <= sizeof(struct obd_quotactl)))
2088 __swab32s(&q->qc_cmd);
2089 __swab32s(&q->qc_type);
2090 __swab32s(&q->qc_id);
2091 __swab32s(&q->qc_stat);
2092 lustre_swab_obd_dqinfo(&q->qc_dqinfo);
2093 lustre_swab_obd_dqblk(&q->qc_dqblk);
2098 void lustre_swab_fid2path(struct getinfo_fid2path *gf)
2100 lustre_swab_lu_fid(&gf->gf_fid);
2101 __swab64s(&gf->gf_recno);
2102 __swab32s(&gf->gf_linkno);
2103 __swab32s(&gf->gf_pathlen);
2105 EXPORT_SYMBOL(lustre_swab_fid2path);
2107 static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
2109 __swab64s(&fm_extent->fe_logical);
2110 __swab64s(&fm_extent->fe_physical);
2111 __swab64s(&fm_extent->fe_length);
2112 __swab32s(&fm_extent->fe_flags);
2113 __swab32s(&fm_extent->fe_device);
2116 static void lustre_swab_fiemap_hdr(struct fiemap *fiemap)
2118 __swab64s(&fiemap->fm_start);
2119 __swab64s(&fiemap->fm_length);
2120 __swab32s(&fiemap->fm_flags);
2121 __swab32s(&fiemap->fm_mapped_extents);
2122 __swab32s(&fiemap->fm_extent_count);
2123 __swab32s(&fiemap->fm_reserved);
2126 int lustre_swab_fiemap(struct fiemap *fiemap, __u32 len)
2128 __u32 i, size, count;
2130 lustre_swab_fiemap_hdr(fiemap);
2132 size = fiemap_count_to_size(fiemap->fm_mapped_extents);
2133 count = fiemap->fm_mapped_extents;
2134 if (unlikely(size > len)) {
2135 count = (len - sizeof(struct fiemap)) /
2136 sizeof(struct fiemap_extent);
2137 fiemap->fm_mapped_extents = count;
2140 /* still swab extents as we cannot yet pass rc to callers */
2141 for (i = 0; i < count; i++)
2142 lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
2147 void lustre_swab_fiemap_info_key(struct ll_fiemap_info_key *fiemap_info)
2149 lustre_swab_obdo(&fiemap_info->lfik_oa);
2150 lustre_swab_fiemap_hdr(&fiemap_info->lfik_fiemap);
2153 void lustre_swab_idx_info(struct idx_info *ii)
2155 __swab32s(&ii->ii_magic);
2156 __swab32s(&ii->ii_flags);
2157 __swab16s(&ii->ii_count);
2158 __swab32s(&ii->ii_attrs);
2159 lustre_swab_lu_fid(&ii->ii_fid);
2160 __swab64s(&ii->ii_version);
2161 __swab64s(&ii->ii_hash_start);
2162 __swab64s(&ii->ii_hash_end);
2163 __swab16s(&ii->ii_keysize);
2164 __swab16s(&ii->ii_recsize);
2167 void lustre_swab_lip_header(struct lu_idxpage *lip)
2170 __swab32s(&lip->lip_magic);
2171 __swab16s(&lip->lip_flags);
2172 __swab16s(&lip->lip_nr);
2174 EXPORT_SYMBOL(lustre_swab_lip_header);
2176 void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
2178 __swab32s(&rr->rr_opcode);
2179 __swab32s(&rr->rr_cap);
2180 __swab32s(&rr->rr_fsuid);
2181 /* rr_fsuid_h is unused */
2182 __swab32s(&rr->rr_fsgid);
2183 /* rr_fsgid_h is unused */
2184 __swab32s(&rr->rr_suppgid1);
2185 /* rr_suppgid1_h is unused */
2186 __swab32s(&rr->rr_suppgid2);
2187 /* rr_suppgid2_h is unused */
2188 lustre_swab_lu_fid(&rr->rr_fid1);
2189 lustre_swab_lu_fid(&rr->rr_fid2);
2190 __swab64s(&rr->rr_mtime);
2191 __swab64s(&rr->rr_atime);
2192 __swab64s(&rr->rr_ctime);
2193 __swab64s(&rr->rr_size);
2194 __swab64s(&rr->rr_blocks);
2195 __swab32s(&rr->rr_bias);
2196 __swab32s(&rr->rr_mode);
2197 __swab32s(&rr->rr_flags);
2198 __swab32s(&rr->rr_flags_h);
2199 __swab32s(&rr->rr_umask);
2200 __swab16s(&rr->rr_mirror_id);
2202 BUILD_BUG_ON(offsetof(typeof(*rr), rr_padding_4) == 0);
2205 void lustre_swab_lov_desc(struct lov_desc *ld)
2207 __swab32s(&ld->ld_tgt_count);
2208 __swab32s(&ld->ld_active_tgt_count);
2209 __swab32s(&ld->ld_default_stripe_count);
2210 __swab32s(&ld->ld_pattern);
2211 __swab64s(&ld->ld_default_stripe_size);
2212 __swab64s(&ld->ld_default_stripe_offset);
2213 __swab32s(&ld->ld_qos_maxage);
2214 /* uuid endian insensitive */
2216 EXPORT_SYMBOL(lustre_swab_lov_desc);
2218 void lustre_swab_lmv_desc(struct lmv_desc *ld)
2220 __swab32s(&ld->ld_tgt_count);
2221 __swab32s(&ld->ld_active_tgt_count);
2222 __swab32s(&ld->ld_default_stripe_count);
2223 __swab32s(&ld->ld_pattern);
2224 __swab64s(&ld->ld_default_hash_size);
2225 __swab32s(&ld->ld_qos_maxage);
2226 /* uuid endian insensitive */
2229 /* This structure is always in little-endian */
2230 static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
2234 __swab32s(&lmm1->lmv_magic);
2235 __swab32s(&lmm1->lmv_stripe_count);
2236 __swab32s(&lmm1->lmv_master_mdt_index);
2237 __swab32s(&lmm1->lmv_hash_type);
2238 __swab32s(&lmm1->lmv_layout_version);
2239 for (i = 0; i < lmm1->lmv_stripe_count; i++)
2240 lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
2243 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
2245 switch (lmm->lmv_magic) {
2247 lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
2253 EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
2255 void lustre_swab_lmv_user_md_objects(struct lmv_user_mds_data *lmd,
2260 for (i = 0; i < stripe_count; i++)
2261 __swab32s(&(lmd[i].lum_mds));
2263 EXPORT_SYMBOL(lustre_swab_lmv_user_md_objects);
2266 void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
2270 if (lum->lum_magic == LMV_MAGIC_FOREIGN) {
2271 __swab32s(&lum->lum_magic);
2272 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_length);
2273 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_type);
2274 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_flags);
2278 count = lum->lum_stripe_count;
2279 __swab32s(&lum->lum_magic);
2280 __swab32s(&lum->lum_stripe_count);
2281 __swab32s(&lum->lum_stripe_offset);
2282 __swab32s(&lum->lum_hash_type);
2283 __swab32s(&lum->lum_type);
2284 BUILD_BUG_ON(offsetof(typeof(*lum), lum_padding1) == 0);
2285 switch (lum->lum_magic) {
2286 case LMV_USER_MAGIC_SPECIFIC:
2287 count = lum->lum_stripe_count;
2289 case __swab32(LMV_USER_MAGIC_SPECIFIC):
2290 lustre_swab_lmv_user_md_objects(lum->lum_objects, count);
2296 EXPORT_SYMBOL(lustre_swab_lmv_user_md);
2298 static void lustre_print_v1v3(unsigned int lvl, struct lov_user_md *lum,
2301 CDEBUG(lvl, "%s lov_user_md %p:\n", msg, lum);
2302 CDEBUG(lvl, "\tlmm_magic: %#x\n", lum->lmm_magic);
2303 CDEBUG(lvl, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
2304 CDEBUG(lvl, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
2305 CDEBUG(lvl, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
2306 CDEBUG(lvl, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
2307 CDEBUG(lvl, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
2308 CDEBUG(lvl, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
2309 lum->lmm_stripe_offset);
2310 if (lum->lmm_magic == LOV_USER_MAGIC_V3) {
2311 struct lov_user_md_v3 *v3 = (void *)lum;
2312 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2314 if (lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2315 struct lov_user_md_v3 *v3 = (void *)lum;
2318 if (v3->lmm_pool_name[0] != '\0')
2319 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2321 CDEBUG(lvl, "\ttarget list:\n");
2322 for (i = 0; i < v3->lmm_stripe_count; i++)
2323 CDEBUG(lvl, "\t\t%u\n", v3->lmm_objects[i].l_ost_idx);
2327 void lustre_print_user_md(unsigned int lvl, struct lov_user_md *lum,
2330 struct lov_comp_md_v1 *comp_v1;
2333 if (likely(!cfs_cdebug_show(lvl, DEBUG_SUBSYSTEM)))
2336 if (lum->lmm_magic == LOV_USER_MAGIC_V1 ||
2337 lum->lmm_magic == LOV_USER_MAGIC_V3) {
2338 lustre_print_v1v3(lvl, lum, msg);
2342 if (lum->lmm_magic != LOV_USER_MAGIC_COMP_V1) {
2343 CDEBUG(lvl, "%s: bad magic: %x\n", msg, lum->lmm_magic);
2347 comp_v1 = (struct lov_comp_md_v1 *)lum;
2348 CDEBUG(lvl, "%s: lov_comp_md_v1 %p:\n", msg, lum);
2349 CDEBUG(lvl, "\tlcm_magic: %#x\n", comp_v1->lcm_magic);
2350 CDEBUG(lvl, "\tlcm_size: %#x\n", comp_v1->lcm_size);
2351 CDEBUG(lvl, "\tlcm_layout_gen: %#x\n", comp_v1->lcm_layout_gen);
2352 CDEBUG(lvl, "\tlcm_flags: %#x\n", comp_v1->lcm_flags);
2353 CDEBUG(lvl, "\tlcm_entry_count: %#x\n\n", comp_v1->lcm_entry_count);
2354 CDEBUG(lvl, "\tlcm_mirror_count: %#x\n\n", comp_v1->lcm_mirror_count);
2356 for (i = 0; i < comp_v1->lcm_entry_count; i++) {
2357 struct lov_comp_md_entry_v1 *ent = &comp_v1->lcm_entries[i];
2358 struct lov_user_md *v1;
2360 CDEBUG(lvl, "\tentry %d:\n", i);
2361 CDEBUG(lvl, "\tlcme_id: %#x\n", ent->lcme_id);
2362 CDEBUG(lvl, "\tlcme_flags: %#x\n", ent->lcme_flags);
2363 if (ent->lcme_flags & LCME_FL_NOSYNC)
2364 CDEBUG(lvl, "\tlcme_timestamp: %llu\n",
2365 ent->lcme_timestamp);
2366 CDEBUG(lvl, "\tlcme_extent.e_start: %llu\n",
2367 ent->lcme_extent.e_start);
2368 CDEBUG(lvl, "\tlcme_extent.e_end: %llu\n",
2369 ent->lcme_extent.e_end);
2370 CDEBUG(lvl, "\tlcme_offset: %#x\n", ent->lcme_offset);
2371 CDEBUG(lvl, "\tlcme_size: %#x\n\n", ent->lcme_size);
2373 v1 = (struct lov_user_md *)((char *)comp_v1 +
2374 comp_v1->lcm_entries[i].lcme_offset);
2375 lustre_print_v1v3(lvl, v1, msg);
2378 EXPORT_SYMBOL(lustre_print_user_md);
2380 static void lustre_swab_lmm_oi(struct ost_id *oi)
2382 __swab64s(&oi->oi.oi_id);
2383 __swab64s(&oi->oi.oi_seq);
2386 static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
2389 __swab32s(&lum->lmm_magic);
2390 __swab32s(&lum->lmm_pattern);
2391 lustre_swab_lmm_oi(&lum->lmm_oi);
2392 __swab32s(&lum->lmm_stripe_size);
2393 __swab16s(&lum->lmm_stripe_count);
2394 __swab16s(&lum->lmm_stripe_offset);
2398 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
2401 CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
2402 lustre_swab_lov_user_md_common(lum);
2405 EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
2407 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
2410 CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
2411 lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
2412 /* lmm_pool_name nothing to do with char */
2415 EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
2417 void lustre_swab_lov_comp_md_v1(struct lov_comp_md_v1 *lum)
2419 struct lov_comp_md_entry_v1 *ent;
2420 struct lov_user_md_v1 *v1;
2421 struct lov_user_md_v3 *v3;
2425 __u16 ent_count, stripe_count;
2428 cpu_endian = lum->lcm_magic == LOV_USER_MAGIC_COMP_V1;
2429 ent_count = lum->lcm_entry_count;
2431 __swab16s(&ent_count);
2433 CDEBUG(D_IOCTL, "swabbing lov_user_comp_md v1\n");
2434 __swab32s(&lum->lcm_magic);
2435 __swab32s(&lum->lcm_size);
2436 __swab32s(&lum->lcm_layout_gen);
2437 __swab16s(&lum->lcm_flags);
2438 __swab16s(&lum->lcm_entry_count);
2439 __swab16s(&lum->lcm_mirror_count);
2440 BUILD_BUG_ON(offsetof(typeof(*lum), lcm_padding1) == 0);
2441 BUILD_BUG_ON(offsetof(typeof(*lum), lcm_padding2) == 0);
2443 for (i = 0; i < ent_count; i++) {
2444 ent = &lum->lcm_entries[i];
2445 off = ent->lcme_offset;
2446 size = ent->lcme_size;
2452 __swab32s(&ent->lcme_id);
2453 __swab32s(&ent->lcme_flags);
2454 __swab64s(&ent->lcme_timestamp);
2455 __swab64s(&ent->lcme_extent.e_start);
2456 __swab64s(&ent->lcme_extent.e_end);
2457 __swab32s(&ent->lcme_offset);
2458 __swab32s(&ent->lcme_size);
2459 __swab32s(&ent->lcme_layout_gen);
2460 BUILD_BUG_ON(offsetof(typeof(*ent), lcme_padding_1) == 0);
2462 v1 = (struct lov_user_md_v1 *)((char *)lum + off);
2463 stripe_count = v1->lmm_stripe_count;
2465 __swab16s(&stripe_count);
2467 if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V1) ||
2468 v1->lmm_magic == LOV_USER_MAGIC_V1) {
2469 lustre_swab_lov_user_md_v1(v1);
2470 if (size > sizeof(*v1))
2471 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2473 } else if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V3) ||
2474 v1->lmm_magic == LOV_USER_MAGIC_V3 ||
2475 v1->lmm_magic == __swab32(LOV_USER_MAGIC_SPECIFIC) ||
2476 v1->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2477 v3 = (struct lov_user_md_v3 *)v1;
2478 lustre_swab_lov_user_md_v3(v3);
2479 if (size > sizeof(*v3))
2480 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2483 CERROR("Invalid magic %#x\n", v1->lmm_magic);
2487 EXPORT_SYMBOL(lustre_swab_lov_comp_md_v1);
2489 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
2495 for (i = 0; i < stripe_count; i++) {
2496 lustre_swab_ost_id(&(lod[i].l_ost_oi));
2497 __swab32s(&(lod[i].l_ost_gen));
2498 __swab32s(&(lod[i].l_ost_idx));
2502 EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
2504 void lustre_swab_lov_user_md(struct lov_user_md *lum, size_t size)
2506 struct lov_user_md_v1 *v1;
2507 struct lov_user_md_v3 *v3;
2508 struct lov_foreign_md *lfm;
2512 CDEBUG(D_IOCTL, "swabbing lov_user_md\n");
2513 switch (lum->lmm_magic) {
2514 case __swab32(LOV_MAGIC_V1):
2515 case LOV_USER_MAGIC_V1:
2517 v1 = (struct lov_user_md_v1 *)lum;
2518 stripe_count = v1->lmm_stripe_count;
2520 if (lum->lmm_magic != LOV_USER_MAGIC_V1)
2521 __swab16s(&stripe_count);
2523 lustre_swab_lov_user_md_v1(v1);
2524 if (size > sizeof(*v1))
2525 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2530 case __swab32(LOV_MAGIC_V3):
2531 case LOV_USER_MAGIC_V3:
2533 v3 = (struct lov_user_md_v3 *)lum;
2534 stripe_count = v3->lmm_stripe_count;
2536 if (lum->lmm_magic != LOV_USER_MAGIC_V3)
2537 __swab16s(&stripe_count);
2539 lustre_swab_lov_user_md_v3(v3);
2540 if (size > sizeof(*v3))
2541 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2545 case __swab32(LOV_USER_MAGIC_SPECIFIC):
2546 case LOV_USER_MAGIC_SPECIFIC:
2548 v3 = (struct lov_user_md_v3 *)lum;
2549 stripe_count = v3->lmm_stripe_count;
2551 if (lum->lmm_magic != LOV_USER_MAGIC_SPECIFIC)
2552 __swab16s(&stripe_count);
2554 lustre_swab_lov_user_md_v3(v3);
2555 lustre_swab_lov_user_md_objects(v3->lmm_objects, stripe_count);
2558 case __swab32(LOV_MAGIC_COMP_V1):
2559 case LOV_USER_MAGIC_COMP_V1:
2560 lustre_swab_lov_comp_md_v1((struct lov_comp_md_v1 *)lum);
2562 case __swab32(LOV_MAGIC_FOREIGN):
2563 case LOV_USER_MAGIC_FOREIGN:
2565 lfm = (struct lov_foreign_md *)lum;
2566 __swab32s(&lfm->lfm_magic);
2567 __swab32s(&lfm->lfm_length);
2568 __swab32s(&lfm->lfm_type);
2569 __swab32s(&lfm->lfm_flags);
2573 CDEBUG(D_IOCTL, "Invalid LOV magic %08x\n", lum->lmm_magic);
2576 EXPORT_SYMBOL(lustre_swab_lov_user_md);
2578 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
2581 CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
2582 __swab32s(&lmm->lmm_magic);
2583 __swab32s(&lmm->lmm_pattern);
2584 lustre_swab_lmm_oi(&lmm->lmm_oi);
2585 __swab32s(&lmm->lmm_stripe_size);
2586 __swab16s(&lmm->lmm_stripe_count);
2587 __swab16s(&lmm->lmm_layout_gen);
2590 EXPORT_SYMBOL(lustre_swab_lov_mds_md);
2592 void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
2596 for (i = 0; i < RES_NAME_SIZE; i++)
2597 __swab64s(&id->name[i]);
2600 void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
2602 /* the lock data is a union and the first two fields are always an
2603 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
2604 * data the same way.
2606 __swab64s(&d->l_extent.start);
2607 __swab64s(&d->l_extent.end);
2608 __swab64s(&d->l_extent.gid);
2609 __swab64s(&d->l_flock.lfw_owner);
2610 __swab32s(&d->l_flock.lfw_pid);
2613 void lustre_swab_ldlm_intent(struct ldlm_intent *i)
2618 void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
2620 __swab32s(&r->lr_type);
2621 BUILD_BUG_ON(offsetof(typeof(*r), lr_pad) == 0);
2622 lustre_swab_ldlm_res_id(&r->lr_name);
2625 void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
2627 lustre_swab_ldlm_resource_desc(&l->l_resource);
2628 __swab32s(&l->l_req_mode);
2629 __swab32s(&l->l_granted_mode);
2630 lustre_swab_ldlm_policy_data(&l->l_policy_data);
2633 void lustre_swab_ldlm_request(struct ldlm_request *rq)
2635 __swab32s(&rq->lock_flags);
2636 lustre_swab_ldlm_lock_desc(&rq->lock_desc);
2637 __swab32s(&rq->lock_count);
2638 /* lock_handle[] opaque */
2641 void lustre_swab_ldlm_reply(struct ldlm_reply *r)
2643 __swab32s(&r->lock_flags);
2644 BUILD_BUG_ON(offsetof(typeof(*r), lock_padding) == 0);
2645 lustre_swab_ldlm_lock_desc(&r->lock_desc);
2646 /* lock_handle opaque */
2647 __swab64s(&r->lock_policy_res1);
2648 __swab64s(&r->lock_policy_res2);
2651 void lustre_swab_quota_body(struct quota_body *b)
2653 lustre_swab_lu_fid(&b->qb_fid);
2654 lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
2655 __swab32s(&b->qb_flags);
2656 __swab64s(&b->qb_count);
2657 __swab64s(&b->qb_usage);
2658 __swab64s(&b->qb_slv_ver);
2661 /* Dump functions */
2662 void dump_ioo(struct obd_ioobj *ioo)
2665 "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
2666 "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
2670 void dump_rniobuf(struct niobuf_remote *nb)
2672 CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
2673 nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
2676 void dump_obdo(struct obdo *oa)
2678 u64 valid = oa->o_valid;
2680 CDEBUG(D_RPCTRACE, "obdo: o_valid = %#llx\n", valid);
2681 if (valid & OBD_MD_FLID)
2682 CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
2683 if (valid & OBD_MD_FLFID)
2684 CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
2686 if (valid & OBD_MD_FLSIZE)
2687 CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
2688 if (valid & OBD_MD_FLMTIME)
2689 CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
2690 if (valid & OBD_MD_FLATIME)
2691 CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
2692 if (valid & OBD_MD_FLCTIME)
2693 CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
2694 if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
2695 CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
2696 if (valid & OBD_MD_FLGRANT)
2697 CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
2698 if (valid & OBD_MD_FLBLKSZ)
2699 CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
2700 if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
2701 CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
2702 oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
2703 (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
2704 if (valid & OBD_MD_FLUID)
2705 CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
2706 if (valid & OBD_MD_FLUID)
2707 CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
2708 if (valid & OBD_MD_FLGID)
2709 CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
2710 if (valid & OBD_MD_FLGID)
2711 CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
2712 if (valid & OBD_MD_FLFLAGS)
2713 CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
2714 if (valid & OBD_MD_FLNLINK)
2715 CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
2716 else if (valid & OBD_MD_FLCKSUM)
2717 CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
2719 if (valid & OBD_MD_FLPARENT)
2720 CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
2722 if (valid & OBD_MD_FLFID) {
2723 CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
2725 CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
2728 if (valid & OBD_MD_FLHANDLE)
2729 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
2730 oa->o_handle.cookie);
2733 void dump_ost_body(struct ost_body *ob)
2738 void dump_rcs(__u32 *rc)
2740 CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
2743 static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2745 LASSERT(req->rq_reqmsg);
2747 switch (req->rq_reqmsg->lm_magic) {
2748 case LUSTRE_MSG_MAGIC_V2:
2749 return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF);
2751 CERROR("bad lustre msg magic: %#08X\n",
2752 req->rq_reqmsg->lm_magic);
2757 static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2759 if (unlikely(!req->rq_repmsg))
2762 switch (req->rq_repmsg->lm_magic) {
2763 case LUSTRE_MSG_MAGIC_V2:
2764 return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF);
2766 /* uninitialized yet */
2771 void _debug_req(struct ptlrpc_request *req,
2772 struct libcfs_debug_msg_data *msgdata, const char *fmt, ...)
2774 bool req_ok = req->rq_reqmsg != NULL;
2775 bool rep_ok = false;
2776 lnet_nid_t nid = LNET_NID_ANY;
2777 struct va_format vaf;
2780 int rep_status = -1;
2782 spin_lock(&req->rq_early_free_lock);
2786 if (ptlrpc_req_need_swab(req)) {
2787 req_ok = req_ok && req_ptlrpc_body_swabbed(req);
2788 rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
2792 rep_flags = lustre_msg_get_flags(req->rq_repmsg);
2793 rep_status = lustre_msg_get_status(req->rq_repmsg);
2795 spin_unlock(&req->rq_early_free_lock);
2797 if (req->rq_import && req->rq_import->imp_connection)
2798 nid = req->rq_import->imp_connection->c_peer.nid;
2799 else if (req->rq_export && req->rq_export->exp_connection)
2800 nid = req->rq_export->exp_connection->c_peer.nid;
2802 va_start(args, fmt);
2805 libcfs_debug_msg(msgdata,
2806 "%pV req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d job:'%s'\n",
2808 req, req->rq_xid, req->rq_transno,
2809 req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
2810 req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
2812 req->rq_import->imp_obd->obd_name :
2814 req->rq_export->exp_client_uuid.uuid :
2816 libcfs_nid2str(nid),
2817 req->rq_request_portal, req->rq_reply_portal,
2818 req->rq_reqlen, req->rq_replen,
2819 req->rq_early_count, (s64)req->rq_timedout,
2820 (s64)req->rq_deadline,
2821 atomic_read(&req->rq_refcount),
2822 DEBUG_REQ_FLAGS(req),
2823 req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
2824 rep_flags, req->rq_status, rep_status,
2825 req_ok ? lustre_msg_get_jobid(req->rq_reqmsg) ?: ""
2829 EXPORT_SYMBOL(_debug_req);
2831 void lustre_swab_hsm_user_state(struct hsm_user_state *state)
2833 __swab32s(&state->hus_states);
2834 __swab32s(&state->hus_archive_id);
2837 void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
2839 __swab32s(&hss->hss_valid);
2840 __swab64s(&hss->hss_setmask);
2841 __swab64s(&hss->hss_clearmask);
2842 __swab32s(&hss->hss_archive_id);
2845 static void lustre_swab_hsm_extent(struct hsm_extent *extent)
2847 __swab64s(&extent->offset);
2848 __swab64s(&extent->length);
2851 void lustre_swab_hsm_current_action(struct hsm_current_action *action)
2853 __swab32s(&action->hca_state);
2854 __swab32s(&action->hca_action);
2855 lustre_swab_hsm_extent(&action->hca_location);
2858 void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
2860 lustre_swab_lu_fid(&hui->hui_fid);
2861 lustre_swab_hsm_extent(&hui->hui_extent);
2864 void lustre_swab_lu_extent(struct lu_extent *le)
2866 __swab64s(&le->e_start);
2867 __swab64s(&le->e_end);
2870 void lustre_swab_layout_intent(struct layout_intent *li)
2872 __swab32s(&li->li_opc);
2873 __swab32s(&li->li_flags);
2874 lustre_swab_lu_extent(&li->li_extent);
2877 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
2879 lustre_swab_lu_fid(&hpk->hpk_fid);
2880 __swab64s(&hpk->hpk_cookie);
2881 __swab64s(&hpk->hpk_extent.offset);
2882 __swab64s(&hpk->hpk_extent.length);
2883 __swab16s(&hpk->hpk_flags);
2884 __swab16s(&hpk->hpk_errval);
2887 void lustre_swab_hsm_request(struct hsm_request *hr)
2889 __swab32s(&hr->hr_action);
2890 __swab32s(&hr->hr_archive_id);
2891 __swab64s(&hr->hr_flags);
2892 __swab32s(&hr->hr_itemcount);
2893 __swab32s(&hr->hr_data_len);
2896 void lustre_swab_object_update(struct object_update *ou)
2898 struct object_update_param *param;
2901 __swab16s(&ou->ou_type);
2902 __swab16s(&ou->ou_params_count);
2903 __swab32s(&ou->ou_result_size);
2904 __swab32s(&ou->ou_flags);
2905 __swab32s(&ou->ou_padding1);
2906 __swab64s(&ou->ou_batchid);
2907 lustre_swab_lu_fid(&ou->ou_fid);
2908 param = &ou->ou_params[0];
2909 for (i = 0; i < ou->ou_params_count; i++) {
2910 __swab16s(¶m->oup_len);
2911 __swab16s(¶m->oup_padding);
2912 __swab32s(¶m->oup_padding2);
2913 param = (struct object_update_param *)((char *)param +
2914 object_update_param_size(param));
2918 int lustre_swab_object_update_request(struct object_update_request *our,
2922 struct object_update *ou;
2924 __swab32s(&our->ourq_magic);
2925 __swab16s(&our->ourq_count);
2926 __swab16s(&our->ourq_padding);
2928 /* Don't need to calculate request size if len is 0. */
2930 size = sizeof(struct object_update_request);
2931 for (i = 0; i < our->ourq_count; i++) {
2932 ou = object_update_request_get(our, i, NULL);
2935 size += sizeof(struct object_update) +
2936 ou->ou_params_count *
2937 sizeof(struct object_update_param);
2939 if (unlikely(size > len))
2943 for (i = 0; i < our->ourq_count; i++) {
2944 ou = object_update_request_get(our, i, NULL);
2945 lustre_swab_object_update(ou);
2951 void lustre_swab_object_update_result(struct object_update_result *our)
2953 __swab32s(&our->our_rc);
2954 __swab16s(&our->our_datalen);
2955 __swab16s(&our->our_padding);
2958 int lustre_swab_object_update_reply(struct object_update_reply *our, __u32 len)
2962 __swab32s(&our->ourp_magic);
2963 __swab16s(&our->ourp_count);
2964 __swab16s(&our->ourp_padding);
2966 size = sizeof(struct object_update_reply) + our->ourp_count *
2967 (sizeof(__u16) + sizeof(struct object_update_result));
2968 if (unlikely(size > len))
2971 for (i = 0; i < our->ourp_count; i++) {
2972 struct object_update_result *ourp;
2974 __swab16s(&our->ourp_lens[i]);
2975 ourp = object_update_result_get(our, i, NULL);
2978 lustre_swab_object_update_result(ourp);
2984 void lustre_swab_out_update_header(struct out_update_header *ouh)
2986 __swab32s(&ouh->ouh_magic);
2987 __swab32s(&ouh->ouh_count);
2988 __swab32s(&ouh->ouh_inline_length);
2989 __swab32s(&ouh->ouh_reply_size);
2991 EXPORT_SYMBOL(lustre_swab_out_update_header);
2993 void lustre_swab_out_update_buffer(struct out_update_buffer *oub)
2995 __swab32s(&oub->oub_size);
2996 __swab32s(&oub->oub_padding);
2998 EXPORT_SYMBOL(lustre_swab_out_update_buffer);
3000 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
3002 __swab64s(&msl->msl_flags);
3005 void lustre_swab_close_data(struct close_data *cd)
3007 lustre_swab_lu_fid(&cd->cd_fid);
3008 __swab64s(&cd->cd_data_version);
3011 void lustre_swab_close_data_resync_done(struct close_data_resync_done *resync)
3015 __swab32s(&resync->resync_count);
3016 /* after swab, resync_count must in CPU endian */
3017 if (resync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) {
3018 for (i = 0; i < resync->resync_count; i++)
3019 __swab32s(&resync->resync_ids_inline[i]);
3022 EXPORT_SYMBOL(lustre_swab_close_data_resync_done);
3024 void lustre_swab_lfsck_request(struct lfsck_request *lr)
3026 __swab32s(&lr->lr_event);
3027 __swab32s(&lr->lr_index);
3028 __swab32s(&lr->lr_flags);
3029 __swab32s(&lr->lr_valid);
3030 __swab32s(&lr->lr_speed);
3031 __swab16s(&lr->lr_version);
3032 __swab16s(&lr->lr_active);
3033 __swab16s(&lr->lr_param);
3034 __swab16s(&lr->lr_async_windows);
3035 __swab32s(&lr->lr_flags);
3036 lustre_swab_lu_fid(&lr->lr_fid);
3037 lustre_swab_lu_fid(&lr->lr_fid2);
3038 __swab32s(&lr->lr_comp_id);
3039 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_0) == 0);
3040 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_1) == 0);
3041 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_2) == 0);
3042 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_3) == 0);
3045 void lustre_swab_lfsck_reply(struct lfsck_reply *lr)
3047 __swab32s(&lr->lr_status);
3048 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_1) == 0);
3049 __swab64s(&lr->lr_repaired);
3052 static void lustre_swab_orphan_rec(struct lu_orphan_rec *rec)
3054 lustre_swab_lu_fid(&rec->lor_fid);
3055 __swab32s(&rec->lor_uid);
3056 __swab32s(&rec->lor_gid);
3059 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent)
3061 lustre_swab_lu_fid(&ent->loe_key);
3062 lustre_swab_orphan_rec(&ent->loe_rec);
3064 EXPORT_SYMBOL(lustre_swab_orphan_ent);
3066 void lustre_swab_orphan_ent_v2(struct lu_orphan_ent_v2 *ent)
3068 lustre_swab_lu_fid(&ent->loe_key);
3069 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
3070 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
3071 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding) == 0);
3073 EXPORT_SYMBOL(lustre_swab_orphan_ent_v2);
3075 void lustre_swab_orphan_ent_v3(struct lu_orphan_ent_v3 *ent)
3077 lustre_swab_lu_fid(&ent->loe_key);
3078 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
3079 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
3080 __swab32s(&ent->loe_rec.lor_layout_version);
3081 __swab32s(&ent->loe_rec.lor_range);
3082 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding_1) == 0);
3083 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding_2) == 0);
3085 EXPORT_SYMBOL(lustre_swab_orphan_ent_v3);
3087 void lustre_swab_ladvise(struct lu_ladvise *ladvise)
3089 __swab16s(&ladvise->lla_advice);
3090 __swab16s(&ladvise->lla_value1);
3091 __swab32s(&ladvise->lla_value2);
3092 __swab64s(&ladvise->lla_start);
3093 __swab64s(&ladvise->lla_end);
3094 __swab32s(&ladvise->lla_value3);
3095 __swab32s(&ladvise->lla_value4);
3097 EXPORT_SYMBOL(lustre_swab_ladvise);
3099 void lustre_swab_ladvise_hdr(struct ladvise_hdr *ladvise_hdr)
3101 __swab32s(&ladvise_hdr->lah_magic);
3102 __swab32s(&ladvise_hdr->lah_count);
3103 __swab64s(&ladvise_hdr->lah_flags);
3104 __swab32s(&ladvise_hdr->lah_value1);
3105 __swab32s(&ladvise_hdr->lah_value2);
3106 __swab64s(&ladvise_hdr->lah_value3);
3108 EXPORT_SYMBOL(lustre_swab_ladvise_hdr);