4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/pack_generic.c
33 * (Un)packing of OST requests
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eeb@clusterfs.com>
40 #define DEBUG_SUBSYSTEM S_RPC
42 #include <linux/crc32.h>
44 #include <libcfs/libcfs.h>
46 #include <llog_swab.h>
47 #include <lustre_net.h>
48 #include <lustre_swab.h>
49 #include <obd_cksum.h>
50 #include <obd_class.h>
51 #include <obd_support.h>
52 #include "ptlrpc_internal.h"
54 static inline __u32 lustre_msg_hdr_size_v2(__u32 count)
56 return cfs_size_round(offsetof(struct lustre_msg_v2,
60 __u32 lustre_msg_hdr_size(__u32 magic, __u32 count)
65 case LUSTRE_MSG_MAGIC_V2:
66 return lustre_msg_hdr_size_v2(count);
68 LASSERTF(0, "incorrect message magic: %08x\n", magic);
73 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
74 enum lustre_msg_version version)
76 enum lustre_msg_version ver = lustre_msg_get_version(msg);
78 return (ver & LUSTRE_VERSION_MASK) != version;
81 int lustre_msg_check_version(struct lustre_msg *msg,
82 enum lustre_msg_version version)
84 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
85 switch (msg->lm_magic) {
86 case LUSTRE_MSG_MAGIC_V1:
87 CERROR("msg v1 not supported - please upgrade you system\n");
89 case LUSTRE_MSG_MAGIC_V2:
90 return lustre_msg_check_version_v2(msg, version);
92 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
95 #undef LUSTRE_MSG_MAGIC_V1
98 /* early reply size */
99 __u32 lustre_msg_early_size()
101 __u32 pblen = sizeof(struct ptlrpc_body);
103 return lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
105 EXPORT_SYMBOL(lustre_msg_early_size);
107 __u32 lustre_msg_size_v2(int count, __u32 *lengths)
113 size = lustre_msg_hdr_size_v2(count);
114 for (i = 0; i < count; i++)
115 size += cfs_size_round(lengths[i]);
119 EXPORT_SYMBOL(lustre_msg_size_v2);
122 * This returns the size of the buffer that is required to hold a lustre_msg
123 * with the given sub-buffer lengths.
124 * NOTE: this should only be used for NEW requests, and should always be
125 * in the form of a v2 request. If this is a connection to a v1
126 * target then the first buffer will be stripped because the ptlrpc
127 * data is part of the lustre_msg_v1 header. b=14043
129 __u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
131 __u32 size[] = { sizeof(struct ptlrpc_body) };
139 LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
142 case LUSTRE_MSG_MAGIC_V2:
143 return lustre_msg_size_v2(count, lens);
145 LASSERTF(0, "incorrect message magic: %08x\n", magic);
151 * This is used to determine the size of a buffer that was already packed
152 * and will correctly handle the different message formats.
154 __u32 lustre_packed_msg_size(struct lustre_msg *msg)
156 switch (msg->lm_magic) {
157 case LUSTRE_MSG_MAGIC_V2:
158 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
160 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
164 EXPORT_SYMBOL(lustre_packed_msg_size);
166 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
174 msg->lm_bufcount = count;
175 /* XXX: lm_secflvr uninitialized here */
176 msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
178 for (i = 0; i < count; i++)
179 msg->lm_buflens[i] = lens[i];
184 ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
185 for (i = 0; i < count; i++) {
189 memcpy(ptr, tmp, lens[i]);
190 ptr += cfs_size_round(lens[i]);
193 EXPORT_SYMBOL(lustre_init_msg_v2);
195 static int lustre_pack_request_v2(struct ptlrpc_request *req,
196 int count, __u32 *lens, char **bufs)
200 reqlen = lustre_msg_size_v2(count, lens);
202 rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
206 req->rq_reqlen = reqlen;
208 lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
209 lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
213 int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
214 __u32 *lens, char **bufs)
216 __u32 size[] = { sizeof(struct ptlrpc_body) };
224 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
226 /* only use new format, we don't need to be compatible with 1.4 */
227 magic = LUSTRE_MSG_MAGIC_V2;
230 case LUSTRE_MSG_MAGIC_V2:
231 return lustre_pack_request_v2(req, count, lens, bufs);
233 LASSERTF(0, "incorrect message magic: %08x\n", magic);
239 struct list_head ptlrpc_rs_debug_lru =
240 LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
241 spinlock_t ptlrpc_rs_debug_lock;
243 #define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
245 spin_lock(&ptlrpc_rs_debug_lock); \
246 list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
247 spin_unlock(&ptlrpc_rs_debug_lock); \
250 #define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
252 spin_lock(&ptlrpc_rs_debug_lock); \
253 list_del(&(rs)->rs_debug_list); \
254 spin_unlock(&ptlrpc_rs_debug_lock); \
257 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
258 # define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
261 struct ptlrpc_reply_state *
262 lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
264 struct ptlrpc_reply_state *rs = NULL;
266 spin_lock(&svcpt->scp_rep_lock);
268 /* See if we have anything in a pool, and wait if nothing */
269 while (list_empty(&svcpt->scp_rep_idle)) {
272 spin_unlock(&svcpt->scp_rep_lock);
273 /* If we cannot get anything for some long time, we better
274 * bail out instead of waiting infinitely */
275 rc = wait_event_idle_timeout(svcpt->scp_rep_waitq,
276 !list_empty(&svcpt->scp_rep_idle),
277 cfs_time_seconds(10));
280 spin_lock(&svcpt->scp_rep_lock);
283 rs = list_first_entry(&svcpt->scp_rep_idle,
284 struct ptlrpc_reply_state, rs_list);
285 list_del(&rs->rs_list);
287 spin_unlock(&svcpt->scp_rep_lock);
289 memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
290 rs->rs_size = svcpt->scp_service->srv_max_reply_size;
291 rs->rs_svcpt = svcpt;
297 void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
299 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
301 spin_lock(&svcpt->scp_rep_lock);
302 list_add(&rs->rs_list, &svcpt->scp_rep_idle);
303 spin_unlock(&svcpt->scp_rep_lock);
304 wake_up(&svcpt->scp_rep_waitq);
307 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
308 __u32 *lens, char **bufs, int flags)
310 struct ptlrpc_reply_state *rs;
314 LASSERT(req->rq_reply_state == NULL);
317 if ((flags & LPRFL_EARLY_REPLY) == 0) {
318 spin_lock(&req->rq_lock);
319 req->rq_packed_final = 1;
320 spin_unlock(&req->rq_lock);
323 msg_len = lustre_msg_size_v2(count, lens);
324 rc = sptlrpc_svc_alloc_rs(req, msg_len);
328 rs = req->rq_reply_state;
329 atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
330 rs->rs_cb_id.cbid_fn = reply_out_callback;
331 rs->rs_cb_id.cbid_arg = rs;
332 rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
333 INIT_LIST_HEAD(&rs->rs_exp_list);
334 INIT_LIST_HEAD(&rs->rs_obd_list);
335 INIT_LIST_HEAD(&rs->rs_list);
336 spin_lock_init(&rs->rs_lock);
338 req->rq_replen = msg_len;
339 req->rq_reply_state = rs;
340 req->rq_repmsg = rs->rs_msg;
342 lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
343 lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
345 PTLRPC_RS_DEBUG_LRU_ADD(rs);
349 EXPORT_SYMBOL(lustre_pack_reply_v2);
351 int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
352 char **bufs, int flags)
355 __u32 size[] = { sizeof(struct ptlrpc_body) };
363 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
365 switch (req->rq_reqmsg->lm_magic) {
366 case LUSTRE_MSG_MAGIC_V2:
367 rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
370 LASSERTF(0, "incorrect message magic: %08x\n",
371 req->rq_reqmsg->lm_magic);
375 CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
376 lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
380 int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
383 return lustre_pack_reply_flags(req, count, lens, bufs, 0);
385 EXPORT_SYMBOL(lustre_pack_reply);
387 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size)
389 __u32 i, offset, buflen, bufcount;
392 LASSERT(m->lm_bufcount > 0);
394 bufcount = m->lm_bufcount;
395 if (unlikely(n >= bufcount)) {
396 CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
401 buflen = m->lm_buflens[n];
402 if (unlikely(buflen < min_size)) {
403 CERROR("msg %p buffer[%d] size %d too small "
404 "(required %d, opc=%d)\n", m, n, buflen, min_size,
405 n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
409 offset = lustre_msg_hdr_size_v2(bufcount);
410 for (i = 0; i < n; i++)
411 offset += cfs_size_round(m->lm_buflens[i]);
413 return (char *)m + offset;
416 void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 min_size)
418 switch (m->lm_magic) {
419 case LUSTRE_MSG_MAGIC_V2:
420 return lustre_msg_buf_v2(m, n, min_size);
422 LASSERTF(0, "incorrect message magic: %08x (msg:%p)\n",
427 EXPORT_SYMBOL(lustre_msg_buf);
429 static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
430 unsigned int newlen, int move_data)
432 char *tail = NULL, *newpos;
436 LASSERT(msg->lm_bufcount > segment);
437 LASSERT(msg->lm_buflens[segment] >= newlen);
439 if (msg->lm_buflens[segment] == newlen)
442 if (move_data && msg->lm_bufcount > segment + 1) {
443 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
444 for (n = segment + 1; n < msg->lm_bufcount; n++)
445 tail_len += cfs_size_round(msg->lm_buflens[n]);
448 msg->lm_buflens[segment] = newlen;
450 if (tail && tail_len) {
451 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
452 LASSERT(newpos <= tail);
454 memmove(newpos, tail, tail_len);
457 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
461 * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
462 * we also move data forward from @segment + 1.
464 * if @newlen == 0, we remove the segment completely, but we still keep the
465 * totally bufcount the same to save possible data moving. this will leave a
466 * unused segment with size 0 at the tail, but that's ok.
468 * return new msg size after shrinking.
471 * + if any buffers higher than @segment has been filled in, must call shrink
472 * with non-zero @move_data.
473 * + caller should NOT keep pointers to msg buffers which higher than @segment
476 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
477 unsigned int newlen, int move_data)
479 switch (msg->lm_magic) {
480 case LUSTRE_MSG_MAGIC_V2:
481 return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
483 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
486 EXPORT_SYMBOL(lustre_shrink_msg);
488 static int lustre_grow_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
491 char *tail = NULL, *newpos;
495 LASSERT(msg->lm_bufcount > segment);
496 LASSERT(msg->lm_buflens[segment] <= newlen);
498 if (msg->lm_buflens[segment] == newlen)
501 if (msg->lm_bufcount > segment + 1) {
502 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
503 for (n = segment + 1; n < msg->lm_bufcount; n++)
504 tail_len += cfs_size_round(msg->lm_buflens[n]);
507 msg->lm_buflens[segment] = newlen;
509 if (tail && tail_len) {
510 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
511 memmove(newpos, tail, tail_len);
514 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
518 * for @msg, grow @segment to size @newlen.
519 * Always move higher buffer forward.
521 * return new msg size after growing.
524 * - caller must make sure there is enough space in allocated message buffer
525 * - caller should NOT keep pointers to msg buffers which higher than @segment
528 int lustre_grow_msg(struct lustre_msg *msg, int segment, unsigned int newlen)
530 switch (msg->lm_magic) {
531 case LUSTRE_MSG_MAGIC_V2:
532 return lustre_grow_msg_v2(msg, segment, newlen);
534 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
537 EXPORT_SYMBOL(lustre_grow_msg);
539 void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
541 PTLRPC_RS_DEBUG_LRU_DEL(rs);
543 LASSERT(atomic_read(&rs->rs_refcount) == 0);
544 LASSERT(!rs->rs_difficult || rs->rs_handled);
545 LASSERT(!rs->rs_on_net);
546 LASSERT(!rs->rs_scheduled);
547 LASSERT(rs->rs_export == NULL);
548 LASSERT(rs->rs_nlocks == 0);
549 LASSERT(list_empty(&rs->rs_exp_list));
550 LASSERT(list_empty(&rs->rs_obd_list));
552 sptlrpc_svc_free_rs(rs);
555 static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
557 int swabbed, required_len, i, buflen;
559 /* Now we know the sender speaks my language. */
560 required_len = lustre_msg_hdr_size_v2(0);
561 if (len < required_len) {
562 /* can't even look inside the message */
563 CERROR("message length %d too small for lustre_msg\n", len);
567 swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
570 __swab32s(&m->lm_magic);
571 __swab32s(&m->lm_bufcount);
572 __swab32s(&m->lm_secflvr);
573 __swab32s(&m->lm_repsize);
574 __swab32s(&m->lm_cksum);
575 __swab32s(&m->lm_flags);
576 BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_2) == 0);
577 BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_3) == 0);
580 if (m->lm_bufcount == 0 || m->lm_bufcount > PTLRPC_MAX_BUFCOUNT) {
581 CERROR("message bufcount %d is not valid\n", m->lm_bufcount);
584 required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
585 if (len < required_len) {
586 /* didn't receive all the buffer lengths */
587 CERROR("message length %d too small for %d buflens\n",
588 len, m->lm_bufcount);
592 for (i = 0; i < m->lm_bufcount; i++) {
594 __swab32s(&m->lm_buflens[i]);
595 buflen = cfs_size_round(m->lm_buflens[i]);
596 if (buflen < 0 || buflen > PTLRPC_MAX_BUFLEN) {
597 CERROR("buffer %d length %d is not valid\n", i, buflen);
600 required_len += buflen;
602 if (len < required_len || required_len > PTLRPC_MAX_BUFLEN) {
603 CERROR("len: %d, required_len %d, bufcount: %d\n",
604 len, required_len, m->lm_bufcount);
605 for (i = 0; i < m->lm_bufcount; i++)
606 CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
613 int __lustre_unpack_msg(struct lustre_msg *m, int len)
615 int required_len, rc;
619 * We can provide a slightly better error log, if we check the
620 * message magic and version first. In the future, struct
621 * lustre_msg may grow, and we'd like to log a version mismatch,
622 * rather than a short message.
624 required_len = offsetof(struct lustre_msg, lm_magic) +
626 if (len < required_len) {
627 /* can't even look inside the message */
628 CERROR("message length %d too small for magic/version check\n",
633 rc = lustre_unpack_msg_v2(m, len);
637 EXPORT_SYMBOL(__lustre_unpack_msg);
639 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
643 rc = __lustre_unpack_msg(req->rq_reqmsg, len);
645 req_capsule_set_req_swabbed(&req->rq_pill,
646 MSG_PTLRPC_HEADER_OFF);
652 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
656 rc = __lustre_unpack_msg(req->rq_repmsg, len);
658 req_capsule_set_rep_swabbed(&req->rq_pill,
659 MSG_PTLRPC_HEADER_OFF);
666 lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
667 enum req_location loc, int offset)
669 struct ptlrpc_body *pb;
670 struct lustre_msg_v2 *m;
672 m = loc == RCL_CLIENT ? req->rq_reqmsg : req->rq_repmsg;
674 pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
676 CERROR("error unpacking ptlrpc body\n");
679 if (req_capsule_need_swab(&req->rq_pill, loc, offset)) {
680 lustre_swab_ptlrpc_body(pb);
681 req_capsule_set_swabbed(&req->rq_pill, loc, offset);
684 if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
685 CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
689 if (loc == RCL_SERVER)
690 pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
695 int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
697 switch (req->rq_reqmsg->lm_magic) {
698 case LUSTRE_MSG_MAGIC_V2:
699 return lustre_unpack_ptlrpc_body_v2(req, RCL_CLIENT, offset);
701 CERROR("bad lustre msg magic: %08x\n",
702 req->rq_reqmsg->lm_magic);
707 int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
709 switch (req->rq_repmsg->lm_magic) {
710 case LUSTRE_MSG_MAGIC_V2:
711 return lustre_unpack_ptlrpc_body_v2(req, RCL_SERVER, offset);
713 CERROR("bad lustre msg magic: %08x\n",
714 req->rq_repmsg->lm_magic);
719 static inline __u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, __u32 n)
721 if (n >= m->lm_bufcount)
724 return m->lm_buflens[n];
728 * lustre_msg_buflen - return the length of buffer \a n in message \a m
729 * \param m lustre_msg (request or reply) to look at
730 * \param n message index (base 0)
732 * returns zero for non-existent message indices
734 __u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n)
736 switch (m->lm_magic) {
737 case LUSTRE_MSG_MAGIC_V2:
738 return lustre_msg_buflen_v2(m, n);
740 CERROR("incorrect message magic: %08x\n", m->lm_magic);
744 EXPORT_SYMBOL(lustre_msg_buflen);
747 lustre_msg_set_buflen_v2(struct lustre_msg_v2 *m, __u32 n, __u32 len)
749 if (n >= m->lm_bufcount)
752 m->lm_buflens[n] = len;
755 void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len)
757 switch (m->lm_magic) {
758 case LUSTRE_MSG_MAGIC_V2:
759 lustre_msg_set_buflen_v2(m, n, len);
762 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
767 * NB return the bufcount for lustre_msg_v2 format, so if message is packed
768 * in V1 format, the result is one bigger. (add struct ptlrpc_body).
770 __u32 lustre_msg_bufcount(struct lustre_msg *m)
772 switch (m->lm_magic) {
773 case LUSTRE_MSG_MAGIC_V2:
774 return m->lm_bufcount;
776 CERROR("incorrect message magic: %08x\n", m->lm_magic);
781 char *lustre_msg_string(struct lustre_msg *m, __u32 index, __u32 max_len)
783 /* max_len == 0 means the string should fill the buffer */
787 switch (m->lm_magic) {
788 case LUSTRE_MSG_MAGIC_V2:
789 str = lustre_msg_buf_v2(m, index, 0);
790 blen = lustre_msg_buflen_v2(m, index);
793 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
797 CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
801 slen = strnlen(str, blen);
803 if (slen == blen) { /* not NULL terminated */
804 CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n",
808 if (blen > PTLRPC_MAX_BUFLEN) {
809 CERROR("buffer length of msg %p buffer[%d] is invalid(%d)\n",
815 if (slen != blen - 1) {
816 CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n",
817 m, index, blen, slen);
820 } else if (slen > max_len) {
821 CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n",
822 m, index, blen, slen, max_len);
829 /* Wrap up the normal fixed length cases */
830 static inline void *__lustre_swab_buf(struct lustre_msg *msg, __u32 index,
831 __u32 min_size, void *swabber)
835 LASSERT(msg != NULL);
836 switch (msg->lm_magic) {
837 case LUSTRE_MSG_MAGIC_V2:
838 ptr = lustre_msg_buf_v2(msg, index, min_size);
841 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
844 if (ptr != NULL && swabber != NULL)
845 ((void (*)(void *))swabber)(ptr);
850 static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
852 return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
853 sizeof(struct ptlrpc_body_v2));
856 enum lustre_msghdr lustre_msghdr_get_flags(struct lustre_msg *msg)
858 switch (msg->lm_magic) {
859 case LUSTRE_MSG_MAGIC_V2:
860 /* already in host endian */
861 return msg->lm_flags;
863 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
867 EXPORT_SYMBOL(lustre_msghdr_get_flags);
869 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
871 switch (msg->lm_magic) {
872 case LUSTRE_MSG_MAGIC_V2:
873 msg->lm_flags = flags;
876 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
880 __u32 lustre_msg_get_flags(struct lustre_msg *msg)
882 switch (msg->lm_magic) {
883 case LUSTRE_MSG_MAGIC_V2: {
884 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
888 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
893 * flags might be printed in debug code while message
899 EXPORT_SYMBOL(lustre_msg_get_flags);
901 void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags)
903 switch (msg->lm_magic) {
904 case LUSTRE_MSG_MAGIC_V2: {
905 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
906 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
907 pb->pb_flags |= flags;
911 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
914 EXPORT_SYMBOL(lustre_msg_add_flags);
916 void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags)
918 switch (msg->lm_magic) {
919 case LUSTRE_MSG_MAGIC_V2: {
920 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
921 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
922 pb->pb_flags = flags;
926 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
930 void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags)
932 switch (msg->lm_magic) {
933 case LUSTRE_MSG_MAGIC_V2: {
934 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
935 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
936 pb->pb_flags &= ~flags;
941 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
944 EXPORT_SYMBOL(lustre_msg_clear_flags);
946 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
948 switch (msg->lm_magic) {
949 case LUSTRE_MSG_MAGIC_V2: {
950 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
952 return pb->pb_op_flags;
954 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
962 void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags)
964 switch (msg->lm_magic) {
965 case LUSTRE_MSG_MAGIC_V2: {
966 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
967 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
968 pb->pb_op_flags |= flags;
972 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
975 EXPORT_SYMBOL(lustre_msg_add_op_flags);
977 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
979 switch (msg->lm_magic) {
980 case LUSTRE_MSG_MAGIC_V2: {
981 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
983 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
986 return &pb->pb_handle;
989 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
994 __u32 lustre_msg_get_type(struct lustre_msg *msg)
996 switch (msg->lm_magic) {
997 case LUSTRE_MSG_MAGIC_V2: {
998 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1000 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1001 return PTL_RPC_MSG_ERR;
1006 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1007 return PTL_RPC_MSG_ERR;
1010 EXPORT_SYMBOL(lustre_msg_get_type);
1012 enum lustre_msg_version lustre_msg_get_version(struct lustre_msg *msg)
1014 switch (msg->lm_magic) {
1015 case LUSTRE_MSG_MAGIC_V2: {
1016 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1018 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1021 return pb->pb_version;
1024 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1029 void lustre_msg_add_version(struct lustre_msg *msg, __u32 version)
1031 switch (msg->lm_magic) {
1032 case LUSTRE_MSG_MAGIC_V2: {
1033 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1034 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1035 pb->pb_version |= version;
1039 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1043 __u32 lustre_msg_get_opc(struct lustre_msg *msg)
1045 switch (msg->lm_magic) {
1046 case LUSTRE_MSG_MAGIC_V2: {
1047 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1049 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1055 CERROR("incorrect message magic: %08x (msg:%p)\n",
1056 msg->lm_magic, msg);
1060 EXPORT_SYMBOL(lustre_msg_get_opc);
1062 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg)
1064 switch (msg->lm_magic) {
1065 case LUSTRE_MSG_MAGIC_V2: {
1066 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1068 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1071 return pb->pb_last_xid;
1074 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1078 EXPORT_SYMBOL(lustre_msg_get_last_xid);
1080 __u16 lustre_msg_get_tag(struct lustre_msg *msg)
1082 switch (msg->lm_magic) {
1083 case LUSTRE_MSG_MAGIC_V2: {
1084 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1086 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1092 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1096 EXPORT_SYMBOL(lustre_msg_get_tag);
1098 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
1100 switch (msg->lm_magic) {
1101 case LUSTRE_MSG_MAGIC_V2: {
1102 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1104 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1107 return pb->pb_last_committed;
1110 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1114 EXPORT_SYMBOL(lustre_msg_get_last_committed);
1116 __u64 *lustre_msg_get_versions(struct lustre_msg *msg)
1118 switch (msg->lm_magic) {
1119 case LUSTRE_MSG_MAGIC_V2: {
1120 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1122 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1125 return pb->pb_pre_versions;
1128 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1132 EXPORT_SYMBOL(lustre_msg_get_versions);
1134 __u64 lustre_msg_get_transno(struct lustre_msg *msg)
1136 switch (msg->lm_magic) {
1137 case LUSTRE_MSG_MAGIC_V2: {
1138 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1140 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1143 return pb->pb_transno;
1146 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1150 EXPORT_SYMBOL(lustre_msg_get_transno);
1152 int lustre_msg_get_status(struct lustre_msg *msg)
1154 switch (msg->lm_magic) {
1155 case LUSTRE_MSG_MAGIC_V2: {
1156 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1158 return pb->pb_status;
1159 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1164 * status might be printed in debug code while message
1170 EXPORT_SYMBOL(lustre_msg_get_status);
1172 __u64 lustre_msg_get_slv(struct lustre_msg *msg)
1174 switch (msg->lm_magic) {
1175 case LUSTRE_MSG_MAGIC_V2: {
1176 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1178 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1184 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1190 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
1192 switch (msg->lm_magic) {
1193 case LUSTRE_MSG_MAGIC_V2: {
1194 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1196 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1203 CERROR("invalid msg magic %x\n", msg->lm_magic);
1208 __u32 lustre_msg_get_limit(struct lustre_msg *msg)
1210 switch (msg->lm_magic) {
1211 case LUSTRE_MSG_MAGIC_V2: {
1212 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1214 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1217 return pb->pb_limit;
1220 CERROR("invalid msg magic %x\n", msg->lm_magic);
1226 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
1228 switch (msg->lm_magic) {
1229 case LUSTRE_MSG_MAGIC_V2: {
1230 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1232 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1235 pb->pb_limit = limit;
1239 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1244 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
1246 switch (msg->lm_magic) {
1247 case LUSTRE_MSG_MAGIC_V2: {
1248 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1250 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1253 return pb->pb_conn_cnt;
1256 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1260 EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
1262 __u32 lustre_msg_get_magic(struct lustre_msg *msg)
1264 switch (msg->lm_magic) {
1265 case LUSTRE_MSG_MAGIC_V2:
1266 return msg->lm_magic;
1268 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1273 timeout_t lustre_msg_get_timeout(struct lustre_msg *msg)
1275 switch (msg->lm_magic) {
1276 case LUSTRE_MSG_MAGIC_V2: {
1277 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1280 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1283 return pb->pb_timeout;
1286 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1291 timeout_t lustre_msg_get_service_timeout(struct lustre_msg *msg)
1293 switch (msg->lm_magic) {
1294 case LUSTRE_MSG_MAGIC_V2: {
1295 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1298 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1301 return pb->pb_service_time;
1304 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1309 char *lustre_msg_get_jobid(struct lustre_msg *msg)
1311 switch (msg->lm_magic) {
1312 case LUSTRE_MSG_MAGIC_V2: {
1313 struct ptlrpc_body *pb;
1315 /* the old pltrpc_body_v2 is smaller; doesn't include jobid */
1316 if (msg->lm_buflens[MSG_PTLRPC_BODY_OFF] <
1317 sizeof(struct ptlrpc_body))
1320 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1321 sizeof(struct ptlrpc_body));
1325 return pb->pb_jobid;
1328 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1332 EXPORT_SYMBOL(lustre_msg_get_jobid);
1334 __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
1336 switch (msg->lm_magic) {
1337 case LUSTRE_MSG_MAGIC_V2:
1338 return msg->lm_cksum;
1340 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1345 __u64 lustre_msg_get_mbits(struct lustre_msg *msg)
1347 switch (msg->lm_magic) {
1348 case LUSTRE_MSG_MAGIC_V2: {
1349 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1351 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1354 return pb->pb_mbits;
1357 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1362 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, __u32 buf)
1364 switch (msg->lm_magic) {
1365 case LUSTRE_MSG_MAGIC_V2: {
1366 struct ptlrpc_body *pb = lustre_msg_buf_v2(msg, buf, 0);
1367 __u32 len = lustre_msg_buflen(msg, buf);
1370 #if IS_ENABLED(CONFIG_CRC32)
1371 /* about 10x faster than crypto_hash for small buffers */
1372 crc = crc32_le(~(__u32)0, (unsigned char *)pb, len);
1373 #elif IS_ENABLED(CONFIG_CRYPTO_CRC32)
1374 unsigned int hsize = 4;
1376 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
1377 len, NULL, 0, (unsigned char *)&crc,
1380 #error "need either CONFIG_CRC32 or CONFIG_CRYPTO_CRC32 enabled in the kernel"
1385 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1390 void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
1392 switch (msg->lm_magic) {
1393 case LUSTRE_MSG_MAGIC_V2: {
1394 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1395 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1396 pb->pb_handle = *handle;
1400 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1404 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
1406 switch (msg->lm_magic) {
1407 case LUSTRE_MSG_MAGIC_V2: {
1408 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1409 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1414 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1418 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
1420 switch (msg->lm_magic) {
1421 case LUSTRE_MSG_MAGIC_V2: {
1422 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1423 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1428 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1432 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid)
1434 switch (msg->lm_magic) {
1435 case LUSTRE_MSG_MAGIC_V2: {
1436 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1437 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1438 pb->pb_last_xid = last_xid;
1442 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1445 EXPORT_SYMBOL(lustre_msg_set_last_xid);
1447 void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
1449 switch (msg->lm_magic) {
1450 case LUSTRE_MSG_MAGIC_V2: {
1451 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1452 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1457 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1460 EXPORT_SYMBOL(lustre_msg_set_tag);
1462 void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed)
1464 switch (msg->lm_magic) {
1465 case LUSTRE_MSG_MAGIC_V2: {
1466 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1467 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1468 pb->pb_last_committed = last_committed;
1472 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1476 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
1478 switch (msg->lm_magic) {
1479 case LUSTRE_MSG_MAGIC_V2: {
1480 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1481 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1482 pb->pb_pre_versions[0] = versions[0];
1483 pb->pb_pre_versions[1] = versions[1];
1484 pb->pb_pre_versions[2] = versions[2];
1485 pb->pb_pre_versions[3] = versions[3];
1489 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1492 EXPORT_SYMBOL(lustre_msg_set_versions);
1494 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
1496 switch (msg->lm_magic) {
1497 case LUSTRE_MSG_MAGIC_V2: {
1498 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1499 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1500 pb->pb_transno = transno;
1504 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1507 EXPORT_SYMBOL(lustre_msg_set_transno);
1509 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
1511 switch (msg->lm_magic) {
1512 case LUSTRE_MSG_MAGIC_V2: {
1513 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1514 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1515 pb->pb_status = status;
1519 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1522 EXPORT_SYMBOL(lustre_msg_set_status);
1524 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
1526 switch (msg->lm_magic) {
1527 case LUSTRE_MSG_MAGIC_V2: {
1528 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1529 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1530 pb->pb_conn_cnt = conn_cnt;
1534 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1538 void lustre_msg_set_timeout(struct lustre_msg *msg, timeout_t timeout)
1540 switch (msg->lm_magic) {
1541 case LUSTRE_MSG_MAGIC_V2: {
1542 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1544 LASSERT(timeout >= 0);
1545 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1546 pb->pb_timeout = timeout;
1550 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1554 void lustre_msg_set_service_timeout(struct lustre_msg *msg,
1555 timeout_t service_timeout)
1557 switch (msg->lm_magic) {
1558 case LUSTRE_MSG_MAGIC_V2: {
1559 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1561 LASSERT(service_timeout >= 0);
1562 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1563 pb->pb_service_time = service_timeout;
1567 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1571 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
1573 switch (msg->lm_magic) {
1574 case LUSTRE_MSG_MAGIC_V2: {
1575 __u32 opc = lustre_msg_get_opc(msg);
1576 struct ptlrpc_body *pb;
1578 /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
1579 * See the comment in ptlrpc_request_pack(). */
1580 if (!opc || opc == LDLM_BL_CALLBACK ||
1581 opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
1584 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1585 sizeof(struct ptlrpc_body));
1586 LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
1589 memcpy(pb->pb_jobid, jobid, sizeof(pb->pb_jobid));
1590 else if (pb->pb_jobid[0] == '\0')
1591 lustre_get_jobid(pb->pb_jobid, sizeof(pb->pb_jobid));
1595 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1598 EXPORT_SYMBOL(lustre_msg_set_jobid);
1600 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
1602 switch (msg->lm_magic) {
1603 case LUSTRE_MSG_MAGIC_V2:
1604 msg->lm_cksum = cksum;
1607 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1611 void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
1613 switch (msg->lm_magic) {
1614 case LUSTRE_MSG_MAGIC_V2: {
1615 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1617 LASSERTF(pb != NULL, "invalid msg %p: no ptlrpc body!\n", msg);
1618 pb->pb_mbits = mbits;
1622 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1626 void ptlrpc_request_set_replen(struct ptlrpc_request *req)
1628 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
1630 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
1631 req->rq_pill.rc_area[RCL_SERVER]);
1632 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1633 req->rq_reqmsg->lm_repsize = req->rq_replen;
1635 EXPORT_SYMBOL(ptlrpc_request_set_replen);
1637 void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *lens)
1639 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens);
1640 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1641 req->rq_reqmsg->lm_repsize = req->rq_replen;
1645 * Send a remote set_info_async.
1647 * This may go from client to server or server to client.
1649 int do_set_info_async(struct obd_import *imp,
1650 int opcode, int version,
1651 size_t keylen, void *key,
1652 size_t vallen, void *val,
1653 struct ptlrpc_request_set *set)
1655 struct ptlrpc_request *req;
1661 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_CHANGELOG_CLEAR) ?
1667 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
1668 RCL_CLIENT, keylen);
1669 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
1670 RCL_CLIENT, vallen);
1671 rc = ptlrpc_request_pack(req, version, opcode);
1673 ptlrpc_request_free(req);
1677 if (KEY_IS(KEY_CHANGELOG_CLEAR))
1680 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1681 memcpy(tmp, key, keylen);
1682 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1683 memcpy(tmp, val, vallen);
1685 ptlrpc_request_set_replen(req);
1688 ptlrpc_set_add_req(set, req);
1689 ptlrpc_check_set(NULL, set);
1691 rc = ptlrpc_queue_wait(req);
1692 ptlrpc_req_finished(req);
1697 EXPORT_SYMBOL(do_set_info_async);
1699 /* byte flipping routines for all wire types declared in
1700 * lustre_idl.h implemented here.
1702 void lustre_swab_ptlrpc_body(struct ptlrpc_body *body)
1704 __swab32s(&body->pb_type);
1705 __swab32s(&body->pb_version);
1706 __swab32s(&body->pb_opc);
1707 __swab32s(&body->pb_status);
1708 __swab64s(&body->pb_last_xid);
1709 __swab16s(&body->pb_tag);
1710 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding0) == 0);
1711 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding1) == 0);
1712 __swab64s(&body->pb_last_committed);
1713 __swab64s(&body->pb_transno);
1714 __swab32s(&body->pb_flags);
1715 __swab32s(&body->pb_op_flags);
1716 __swab32s(&body->pb_conn_cnt);
1717 __swab32s(&body->pb_timeout);
1718 __swab32s(&body->pb_service_time);
1719 __swab32s(&body->pb_limit);
1720 __swab64s(&body->pb_slv);
1721 __swab64s(&body->pb_pre_versions[0]);
1722 __swab64s(&body->pb_pre_versions[1]);
1723 __swab64s(&body->pb_pre_versions[2]);
1724 __swab64s(&body->pb_pre_versions[3]);
1725 __swab64s(&body->pb_mbits);
1726 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_0) == 0);
1727 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_1) == 0);
1728 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_2) == 0);
1730 * While we need to maintain compatibility between
1731 * clients and servers without ptlrpc_body_v2 (< 2.3)
1732 * do not swab any fields beyond pb_jobid, as we are
1733 * using this swab function for both ptlrpc_body
1734 * and ptlrpc_body_v2.
1736 /* pb_jobid is an ASCII string and should not be swabbed */
1737 BUILD_BUG_ON(offsetof(typeof(*body), pb_jobid) == 0);
1740 void lustre_swab_connect(struct obd_connect_data *ocd)
1742 __swab64s(&ocd->ocd_connect_flags);
1743 __swab32s(&ocd->ocd_version);
1744 __swab32s(&ocd->ocd_grant);
1745 __swab64s(&ocd->ocd_ibits_known);
1746 __swab32s(&ocd->ocd_index);
1747 __swab32s(&ocd->ocd_brw_size);
1749 * ocd_blocksize and ocd_inodespace don't need to be swabbed because
1750 * they are 8-byte values
1752 __swab16s(&ocd->ocd_grant_tax_kb);
1753 __swab32s(&ocd->ocd_grant_max_blks);
1754 __swab64s(&ocd->ocd_transno);
1755 __swab32s(&ocd->ocd_group);
1756 __swab32s(&ocd->ocd_cksum_types);
1757 __swab32s(&ocd->ocd_instance);
1759 * Fields after ocd_cksum_types are only accessible by the receiver
1760 * if the corresponding flag in ocd_connect_flags is set. Accessing
1761 * any field after ocd_maxbytes on the receiver without a valid flag
1762 * may result in out-of-bound memory access and kernel oops.
1764 if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
1765 __swab32s(&ocd->ocd_max_easize);
1766 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
1767 __swab64s(&ocd->ocd_maxbytes);
1768 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1769 __swab16s(&ocd->ocd_maxmodrpcs);
1770 BUILD_BUG_ON(offsetof(typeof(*ocd), padding0) == 0);
1771 BUILD_BUG_ON(offsetof(typeof(*ocd), padding1) == 0);
1772 if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
1773 __swab64s(&ocd->ocd_connect_flags2);
1774 BUILD_BUG_ON(offsetof(typeof(*ocd), padding3) == 0);
1775 BUILD_BUG_ON(offsetof(typeof(*ocd), padding4) == 0);
1776 BUILD_BUG_ON(offsetof(typeof(*ocd), padding5) == 0);
1777 BUILD_BUG_ON(offsetof(typeof(*ocd), padding6) == 0);
1778 BUILD_BUG_ON(offsetof(typeof(*ocd), padding7) == 0);
1779 BUILD_BUG_ON(offsetof(typeof(*ocd), padding8) == 0);
1780 BUILD_BUG_ON(offsetof(typeof(*ocd), padding9) == 0);
1781 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingA) == 0);
1782 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingB) == 0);
1783 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingC) == 0);
1784 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingD) == 0);
1785 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingE) == 0);
1786 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingF) == 0);
1789 static void lustre_swab_ost_layout(struct ost_layout *ol)
1791 __swab32s(&ol->ol_stripe_size);
1792 __swab32s(&ol->ol_stripe_count);
1793 __swab64s(&ol->ol_comp_start);
1794 __swab64s(&ol->ol_comp_end);
1795 __swab32s(&ol->ol_comp_id);
1798 void lustre_swab_obdo(struct obdo *o)
1800 __swab64s(&o->o_valid);
1801 lustre_swab_ost_id(&o->o_oi);
1802 __swab64s(&o->o_parent_seq);
1803 __swab64s(&o->o_size);
1804 __swab64s(&o->o_mtime);
1805 __swab64s(&o->o_atime);
1806 __swab64s(&o->o_ctime);
1807 __swab64s(&o->o_blocks);
1808 __swab64s(&o->o_grant);
1809 __swab32s(&o->o_blksize);
1810 __swab32s(&o->o_mode);
1811 __swab32s(&o->o_uid);
1812 __swab32s(&o->o_gid);
1813 __swab32s(&o->o_flags);
1814 __swab32s(&o->o_nlink);
1815 __swab32s(&o->o_parent_oid);
1816 __swab32s(&o->o_misc);
1817 __swab64s(&o->o_ioepoch);
1818 __swab32s(&o->o_stripe_idx);
1819 __swab32s(&o->o_parent_ver);
1820 lustre_swab_ost_layout(&o->o_layout);
1821 __swab32s(&o->o_layout_version);
1822 __swab32s(&o->o_uid_h);
1823 __swab32s(&o->o_gid_h);
1824 __swab64s(&o->o_data_version);
1825 __swab32s(&o->o_projid);
1826 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_4) == 0);
1827 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_5) == 0);
1828 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_6) == 0);
1831 EXPORT_SYMBOL(lustre_swab_obdo);
1833 void lustre_swab_obd_statfs(struct obd_statfs *os)
1835 __swab64s(&os->os_type);
1836 __swab64s(&os->os_blocks);
1837 __swab64s(&os->os_bfree);
1838 __swab64s(&os->os_bavail);
1839 __swab64s(&os->os_files);
1840 __swab64s(&os->os_ffree);
1841 /* no need to swab os_fsid */
1842 __swab32s(&os->os_bsize);
1843 __swab32s(&os->os_namelen);
1844 __swab64s(&os->os_maxbytes);
1845 __swab32s(&os->os_state);
1846 __swab32s(&os->os_fprecreated);
1847 __swab32s(&os->os_granted);
1848 BUILD_BUG_ON(offsetof(typeof(*os), os_spare3) == 0);
1849 BUILD_BUG_ON(offsetof(typeof(*os), os_spare4) == 0);
1850 BUILD_BUG_ON(offsetof(typeof(*os), os_spare5) == 0);
1851 BUILD_BUG_ON(offsetof(typeof(*os), os_spare6) == 0);
1852 BUILD_BUG_ON(offsetof(typeof(*os), os_spare7) == 0);
1853 BUILD_BUG_ON(offsetof(typeof(*os), os_spare8) == 0);
1854 BUILD_BUG_ON(offsetof(typeof(*os), os_spare9) == 0);
1857 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
1859 lustre_swab_ost_id(&ioo->ioo_oid);
1860 __swab32s(&ioo->ioo_max_brw);
1861 __swab32s(&ioo->ioo_bufcnt);
1864 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
1866 __swab64s(&nbr->rnb_offset);
1867 __swab32s(&nbr->rnb_len);
1868 __swab32s(&nbr->rnb_flags);
1871 void lustre_swab_ost_body(struct ost_body *b)
1873 lustre_swab_obdo(&b->oa);
1876 void lustre_swab_ost_last_id(u64 *id)
1881 void lustre_swab_generic_32s(__u32 *val)
1886 void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *desc)
1888 lustre_swab_lu_fid(&desc->gl_id.qid_fid);
1889 __swab64s(&desc->gl_flags);
1890 __swab64s(&desc->gl_ver);
1891 __swab64s(&desc->gl_hardlimit);
1892 __swab64s(&desc->gl_softlimit);
1893 __swab64s(&desc->gl_time);
1894 BUILD_BUG_ON(offsetof(typeof(*desc), gl_pad2) == 0);
1896 EXPORT_SYMBOL(lustre_swab_gl_lquota_desc);
1898 void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *desc)
1900 __swab32s(&desc->lgbd_status);
1901 __swab32s(&desc->lgbd_timeout);
1902 BUILD_BUG_ON(offsetof(typeof(*desc), lgbd_padding) == 0);
1904 EXPORT_SYMBOL(lustre_swab_gl_barrier_desc);
1906 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
1908 __swab64s(&lvb->lvb_size);
1909 __swab64s(&lvb->lvb_mtime);
1910 __swab64s(&lvb->lvb_atime);
1911 __swab64s(&lvb->lvb_ctime);
1912 __swab64s(&lvb->lvb_blocks);
1914 EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
1916 void lustre_swab_ost_lvb(struct ost_lvb *lvb)
1918 __swab64s(&lvb->lvb_size);
1919 __swab64s(&lvb->lvb_mtime);
1920 __swab64s(&lvb->lvb_atime);
1921 __swab64s(&lvb->lvb_ctime);
1922 __swab64s(&lvb->lvb_blocks);
1923 __swab32s(&lvb->lvb_mtime_ns);
1924 __swab32s(&lvb->lvb_atime_ns);
1925 __swab32s(&lvb->lvb_ctime_ns);
1926 __swab32s(&lvb->lvb_padding);
1928 EXPORT_SYMBOL(lustre_swab_ost_lvb);
1930 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
1932 __swab64s(&lvb->lvb_flags);
1933 __swab64s(&lvb->lvb_id_may_rel);
1934 __swab64s(&lvb->lvb_id_rel);
1935 __swab64s(&lvb->lvb_id_qunit);
1936 __swab64s(&lvb->lvb_pad1);
1938 EXPORT_SYMBOL(lustre_swab_lquota_lvb);
1940 void lustre_swab_barrier_lvb(struct barrier_lvb *lvb)
1942 __swab32s(&lvb->lvb_status);
1943 __swab32s(&lvb->lvb_index);
1944 BUILD_BUG_ON(offsetof(typeof(*lvb), lvb_padding) == 0);
1946 EXPORT_SYMBOL(lustre_swab_barrier_lvb);
1948 void lustre_swab_mdt_body(struct mdt_body *b)
1950 lustre_swab_lu_fid(&b->mbo_fid1);
1951 lustre_swab_lu_fid(&b->mbo_fid2);
1952 /* handle is opaque */
1953 __swab64s(&b->mbo_valid);
1954 __swab64s(&b->mbo_size);
1955 __swab64s(&b->mbo_mtime);
1956 __swab64s(&b->mbo_atime);
1957 __swab64s(&b->mbo_ctime);
1958 __swab64s(&b->mbo_blocks);
1959 __swab64s(&b->mbo_version);
1960 __swab64s(&b->mbo_t_state);
1961 __swab32s(&b->mbo_fsuid);
1962 __swab32s(&b->mbo_fsgid);
1963 __swab32s(&b->mbo_capability);
1964 __swab32s(&b->mbo_mode);
1965 __swab32s(&b->mbo_uid);
1966 __swab32s(&b->mbo_gid);
1967 __swab32s(&b->mbo_flags);
1968 __swab32s(&b->mbo_rdev);
1969 __swab32s(&b->mbo_nlink);
1970 __swab32s(&b->mbo_layout_gen);
1971 __swab32s(&b->mbo_suppgid);
1972 __swab32s(&b->mbo_eadatasize);
1973 __swab32s(&b->mbo_aclsize);
1974 __swab32s(&b->mbo_max_mdsize);
1975 BUILD_BUG_ON(offsetof(typeof(*b), mbo_unused3) == 0);
1976 __swab32s(&b->mbo_uid_h);
1977 __swab32s(&b->mbo_gid_h);
1978 __swab32s(&b->mbo_projid);
1979 __swab64s(&b->mbo_dom_size);
1980 __swab64s(&b->mbo_dom_blocks);
1981 __swab64s(&b->mbo_btime);
1982 BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_9) == 0);
1983 BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_10) == 0);
1986 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
1988 /* mio_open_handle is opaque */
1989 BUILD_BUG_ON(offsetof(typeof(*b), mio_unused1) == 0);
1990 BUILD_BUG_ON(offsetof(typeof(*b), mio_unused2) == 0);
1991 BUILD_BUG_ON(offsetof(typeof(*b), mio_padding) == 0);
1994 void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
1998 __swab32s(&mti->mti_lustre_ver);
1999 __swab32s(&mti->mti_stripe_index);
2000 __swab32s(&mti->mti_config_ver);
2001 __swab32s(&mti->mti_flags);
2002 __swab32s(&mti->mti_instance);
2003 __swab32s(&mti->mti_nid_count);
2004 BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
2005 for (i = 0; i < MTI_NIDS_MAX; i++)
2006 __swab64s(&mti->mti_nids[i]);
2009 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
2013 __swab64s(&entry->mne_version);
2014 __swab32s(&entry->mne_instance);
2015 __swab32s(&entry->mne_index);
2016 __swab32s(&entry->mne_length);
2018 /* mne_nid_(count|type) must be one byte size because we're gonna
2019 * access it w/o swapping. */
2020 BUILD_BUG_ON(sizeof(entry->mne_nid_count) != sizeof(__u8));
2021 BUILD_BUG_ON(sizeof(entry->mne_nid_type) != sizeof(__u8));
2023 /* remove this assertion if ipv6 is supported. */
2024 LASSERT(entry->mne_nid_type == 0);
2025 for (i = 0; i < entry->mne_nid_count; i++) {
2026 BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
2027 __swab64s(&entry->u.nids[i]);
2030 EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
2032 void lustre_swab_mgs_config_body(struct mgs_config_body *body)
2034 __swab64s(&body->mcb_offset);
2035 __swab32s(&body->mcb_units);
2036 __swab16s(&body->mcb_type);
2039 void lustre_swab_mgs_config_res(struct mgs_config_res *body)
2041 __swab64s(&body->mcr_offset);
2042 __swab64s(&body->mcr_size);
2045 static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
2047 __swab64s(&i->dqi_bgrace);
2048 __swab64s(&i->dqi_igrace);
2049 __swab32s(&i->dqi_flags);
2050 __swab32s(&i->dqi_valid);
2053 static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
2055 __swab64s(&b->dqb_ihardlimit);
2056 __swab64s(&b->dqb_isoftlimit);
2057 __swab64s(&b->dqb_curinodes);
2058 __swab64s(&b->dqb_bhardlimit);
2059 __swab64s(&b->dqb_bsoftlimit);
2060 __swab64s(&b->dqb_curspace);
2061 __swab64s(&b->dqb_btime);
2062 __swab64s(&b->dqb_itime);
2063 __swab32s(&b->dqb_valid);
2064 BUILD_BUG_ON(offsetof(typeof(*b), dqb_padding) == 0);
2067 int lustre_swab_obd_quotactl(struct obd_quotactl *q, __u32 len)
2069 if (unlikely(len <= sizeof(struct obd_quotactl)))
2072 __swab32s(&q->qc_cmd);
2073 __swab32s(&q->qc_type);
2074 __swab32s(&q->qc_id);
2075 __swab32s(&q->qc_stat);
2076 lustre_swab_obd_dqinfo(&q->qc_dqinfo);
2077 lustre_swab_obd_dqblk(&q->qc_dqblk);
2082 void lustre_swab_fid2path(struct getinfo_fid2path *gf)
2084 lustre_swab_lu_fid(&gf->gf_fid);
2085 __swab64s(&gf->gf_recno);
2086 __swab32s(&gf->gf_linkno);
2087 __swab32s(&gf->gf_pathlen);
2089 EXPORT_SYMBOL(lustre_swab_fid2path);
2091 static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
2093 __swab64s(&fm_extent->fe_logical);
2094 __swab64s(&fm_extent->fe_physical);
2095 __swab64s(&fm_extent->fe_length);
2096 __swab32s(&fm_extent->fe_flags);
2097 __swab32s(&fm_extent->fe_device);
2100 static void lustre_swab_fiemap_hdr(struct fiemap *fiemap)
2102 __swab64s(&fiemap->fm_start);
2103 __swab64s(&fiemap->fm_length);
2104 __swab32s(&fiemap->fm_flags);
2105 __swab32s(&fiemap->fm_mapped_extents);
2106 __swab32s(&fiemap->fm_extent_count);
2107 __swab32s(&fiemap->fm_reserved);
2110 int lustre_swab_fiemap(struct fiemap *fiemap, __u32 len)
2112 __u32 i, size, count;
2114 lustre_swab_fiemap_hdr(fiemap);
2116 size = fiemap_count_to_size(fiemap->fm_mapped_extents);
2117 count = fiemap->fm_mapped_extents;
2118 if (unlikely(size > len)) {
2119 count = (len - sizeof(struct fiemap)) /
2120 sizeof(struct fiemap_extent);
2121 fiemap->fm_mapped_extents = count;
2124 /* still swab extents as we cannot yet pass rc to callers */
2125 for (i = 0; i < count; i++)
2126 lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
2131 void lustre_swab_fiemap_info_key(struct ll_fiemap_info_key *fiemap_info)
2133 lustre_swab_obdo(&fiemap_info->lfik_oa);
2134 lustre_swab_fiemap_hdr(&fiemap_info->lfik_fiemap);
2137 void lustre_swab_idx_info(struct idx_info *ii)
2139 __swab32s(&ii->ii_magic);
2140 __swab32s(&ii->ii_flags);
2141 __swab16s(&ii->ii_count);
2142 __swab32s(&ii->ii_attrs);
2143 lustre_swab_lu_fid(&ii->ii_fid);
2144 __swab64s(&ii->ii_version);
2145 __swab64s(&ii->ii_hash_start);
2146 __swab64s(&ii->ii_hash_end);
2147 __swab16s(&ii->ii_keysize);
2148 __swab16s(&ii->ii_recsize);
2151 void lustre_swab_lip_header(struct lu_idxpage *lip)
2154 __swab32s(&lip->lip_magic);
2155 __swab16s(&lip->lip_flags);
2156 __swab16s(&lip->lip_nr);
2158 EXPORT_SYMBOL(lustre_swab_lip_header);
2160 void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
2162 __swab32s(&rr->rr_opcode);
2163 __swab32s(&rr->rr_cap);
2164 __swab32s(&rr->rr_fsuid);
2165 /* rr_fsuid_h is unused */
2166 __swab32s(&rr->rr_fsgid);
2167 /* rr_fsgid_h is unused */
2168 __swab32s(&rr->rr_suppgid1);
2169 /* rr_suppgid1_h is unused */
2170 __swab32s(&rr->rr_suppgid2);
2171 /* rr_suppgid2_h is unused */
2172 lustre_swab_lu_fid(&rr->rr_fid1);
2173 lustre_swab_lu_fid(&rr->rr_fid2);
2174 __swab64s(&rr->rr_mtime);
2175 __swab64s(&rr->rr_atime);
2176 __swab64s(&rr->rr_ctime);
2177 __swab64s(&rr->rr_size);
2178 __swab64s(&rr->rr_blocks);
2179 __swab32s(&rr->rr_bias);
2180 __swab32s(&rr->rr_mode);
2181 __swab32s(&rr->rr_flags);
2182 __swab32s(&rr->rr_flags_h);
2183 __swab32s(&rr->rr_umask);
2184 __swab16s(&rr->rr_mirror_id);
2186 BUILD_BUG_ON(offsetof(typeof(*rr), rr_padding_4) == 0);
2189 void lustre_swab_lov_desc(struct lov_desc *ld)
2191 __swab32s(&ld->ld_tgt_count);
2192 __swab32s(&ld->ld_active_tgt_count);
2193 __swab32s(&ld->ld_default_stripe_count);
2194 __swab32s(&ld->ld_pattern);
2195 __swab64s(&ld->ld_default_stripe_size);
2196 __swab64s(&ld->ld_default_stripe_offset);
2197 __swab32s(&ld->ld_qos_maxage);
2198 /* uuid endian insensitive */
2200 EXPORT_SYMBOL(lustre_swab_lov_desc);
2202 void lustre_swab_lmv_desc(struct lmv_desc *ld)
2204 __swab32s(&ld->ld_tgt_count);
2205 __swab32s(&ld->ld_active_tgt_count);
2206 __swab32s(&ld->ld_default_stripe_count);
2207 __swab32s(&ld->ld_pattern);
2208 __swab64s(&ld->ld_default_hash_size);
2209 __swab32s(&ld->ld_qos_maxage);
2210 /* uuid endian insensitive */
2213 /* This structure is always in little-endian */
2214 static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
2218 __swab32s(&lmm1->lmv_magic);
2219 __swab32s(&lmm1->lmv_stripe_count);
2220 __swab32s(&lmm1->lmv_master_mdt_index);
2221 __swab32s(&lmm1->lmv_hash_type);
2222 __swab32s(&lmm1->lmv_layout_version);
2223 for (i = 0; i < lmm1->lmv_stripe_count; i++)
2224 lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
2227 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
2229 switch (lmm->lmv_magic) {
2231 lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
2237 EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
2239 void lustre_swab_lmv_user_md_objects(struct lmv_user_mds_data *lmd,
2244 for (i = 0; i < stripe_count; i++)
2245 __swab32s(&(lmd[i].lum_mds));
2247 EXPORT_SYMBOL(lustre_swab_lmv_user_md_objects);
2250 void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
2254 if (lum->lum_magic == LMV_MAGIC_FOREIGN) {
2255 __swab32s(&lum->lum_magic);
2256 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_length);
2257 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_type);
2258 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_flags);
2262 count = lum->lum_stripe_count;
2263 __swab32s(&lum->lum_magic);
2264 __swab32s(&lum->lum_stripe_count);
2265 __swab32s(&lum->lum_stripe_offset);
2266 __swab32s(&lum->lum_hash_type);
2267 __swab32s(&lum->lum_type);
2268 /* lum_max_inherit and lum_max_inherit_rr do not need to be swabbed */
2269 BUILD_BUG_ON(offsetof(typeof(*lum), lum_padding1) == 0);
2270 BUILD_BUG_ON(offsetof(typeof(*lum), lum_padding2) == 0);
2271 BUILD_BUG_ON(offsetof(typeof(*lum), lum_padding3) == 0);
2272 switch (lum->lum_magic) {
2273 case LMV_USER_MAGIC_SPECIFIC:
2274 count = lum->lum_stripe_count;
2276 case __swab32(LMV_USER_MAGIC_SPECIFIC):
2277 lustre_swab_lmv_user_md_objects(lum->lum_objects, count);
2283 EXPORT_SYMBOL(lustre_swab_lmv_user_md);
2285 static void lustre_print_v1v3(unsigned int lvl, struct lov_user_md *lum,
2288 CDEBUG(lvl, "%s lov_user_md %p:\n", msg, lum);
2289 CDEBUG(lvl, "\tlmm_magic: %#x\n", lum->lmm_magic);
2290 CDEBUG(lvl, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
2291 CDEBUG(lvl, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
2292 CDEBUG(lvl, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
2293 CDEBUG(lvl, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
2294 CDEBUG(lvl, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
2295 CDEBUG(lvl, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
2296 lum->lmm_stripe_offset);
2297 if (lum->lmm_magic == LOV_USER_MAGIC_V3) {
2298 struct lov_user_md_v3 *v3 = (void *)lum;
2299 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2301 if (lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2302 struct lov_user_md_v3 *v3 = (void *)lum;
2305 if (v3->lmm_pool_name[0] != '\0')
2306 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2308 CDEBUG(lvl, "\ttarget list:\n");
2309 for (i = 0; i < v3->lmm_stripe_count; i++)
2310 CDEBUG(lvl, "\t\t%u\n", v3->lmm_objects[i].l_ost_idx);
2314 void lustre_print_user_md(unsigned int lvl, struct lov_user_md *lum,
2317 struct lov_comp_md_v1 *comp_v1;
2320 if (likely(!cfs_cdebug_show(lvl, DEBUG_SUBSYSTEM)))
2323 if (lum->lmm_magic == LOV_USER_MAGIC_V1 ||
2324 lum->lmm_magic == LOV_USER_MAGIC_V3) {
2325 lustre_print_v1v3(lvl, lum, msg);
2329 if (lum->lmm_magic != LOV_USER_MAGIC_COMP_V1) {
2330 CDEBUG(lvl, "%s: bad magic: %x\n", msg, lum->lmm_magic);
2334 comp_v1 = (struct lov_comp_md_v1 *)lum;
2335 CDEBUG(lvl, "%s: lov_comp_md_v1 %p:\n", msg, lum);
2336 CDEBUG(lvl, "\tlcm_magic: %#x\n", comp_v1->lcm_magic);
2337 CDEBUG(lvl, "\tlcm_size: %#x\n", comp_v1->lcm_size);
2338 CDEBUG(lvl, "\tlcm_layout_gen: %#x\n", comp_v1->lcm_layout_gen);
2339 CDEBUG(lvl, "\tlcm_flags: %#x\n", comp_v1->lcm_flags);
2340 CDEBUG(lvl, "\tlcm_entry_count: %#x\n\n", comp_v1->lcm_entry_count);
2341 CDEBUG(lvl, "\tlcm_mirror_count: %#x\n\n", comp_v1->lcm_mirror_count);
2343 for (i = 0; i < comp_v1->lcm_entry_count; i++) {
2344 struct lov_comp_md_entry_v1 *ent = &comp_v1->lcm_entries[i];
2345 struct lov_user_md *v1;
2347 CDEBUG(lvl, "\tentry %d:\n", i);
2348 CDEBUG(lvl, "\tlcme_id: %#x\n", ent->lcme_id);
2349 CDEBUG(lvl, "\tlcme_flags: %#x\n", ent->lcme_flags);
2350 if (ent->lcme_flags & LCME_FL_NOSYNC)
2351 CDEBUG(lvl, "\tlcme_timestamp: %llu\n",
2352 ent->lcme_timestamp);
2353 CDEBUG(lvl, "\tlcme_extent.e_start: %llu\n",
2354 ent->lcme_extent.e_start);
2355 CDEBUG(lvl, "\tlcme_extent.e_end: %llu\n",
2356 ent->lcme_extent.e_end);
2357 CDEBUG(lvl, "\tlcme_offset: %#x\n", ent->lcme_offset);
2358 CDEBUG(lvl, "\tlcme_size: %#x\n\n", ent->lcme_size);
2360 v1 = (struct lov_user_md *)((char *)comp_v1 +
2361 comp_v1->lcm_entries[i].lcme_offset);
2362 lustre_print_v1v3(lvl, v1, msg);
2365 EXPORT_SYMBOL(lustre_print_user_md);
2367 static void lustre_swab_lmm_oi(struct ost_id *oi)
2369 __swab64s(&oi->oi.oi_id);
2370 __swab64s(&oi->oi.oi_seq);
2373 static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
2376 __swab32s(&lum->lmm_magic);
2377 __swab32s(&lum->lmm_pattern);
2378 lustre_swab_lmm_oi(&lum->lmm_oi);
2379 __swab32s(&lum->lmm_stripe_size);
2380 __swab16s(&lum->lmm_stripe_count);
2381 __swab16s(&lum->lmm_stripe_offset);
2385 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
2388 CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
2389 lustre_swab_lov_user_md_common(lum);
2392 EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
2394 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
2397 CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
2398 lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
2399 /* lmm_pool_name nothing to do with char */
2402 EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
2404 void lustre_swab_lov_comp_md_v1(struct lov_comp_md_v1 *lum)
2406 struct lov_comp_md_entry_v1 *ent;
2407 struct lov_user_md_v1 *v1;
2408 struct lov_user_md_v3 *v3;
2412 __u16 ent_count, stripe_count;
2415 cpu_endian = lum->lcm_magic == LOV_USER_MAGIC_COMP_V1;
2416 ent_count = lum->lcm_entry_count;
2418 __swab16s(&ent_count);
2420 CDEBUG(D_IOCTL, "swabbing lov_user_comp_md v1\n");
2421 __swab32s(&lum->lcm_magic);
2422 __swab32s(&lum->lcm_size);
2423 __swab32s(&lum->lcm_layout_gen);
2424 __swab16s(&lum->lcm_flags);
2425 __swab16s(&lum->lcm_entry_count);
2426 __swab16s(&lum->lcm_mirror_count);
2427 BUILD_BUG_ON(offsetof(typeof(*lum), lcm_padding1) == 0);
2428 BUILD_BUG_ON(offsetof(typeof(*lum), lcm_padding2) == 0);
2430 for (i = 0; i < ent_count; i++) {
2431 ent = &lum->lcm_entries[i];
2432 off = ent->lcme_offset;
2433 size = ent->lcme_size;
2439 __swab32s(&ent->lcme_id);
2440 __swab32s(&ent->lcme_flags);
2441 __swab64s(&ent->lcme_timestamp);
2442 __swab64s(&ent->lcme_extent.e_start);
2443 __swab64s(&ent->lcme_extent.e_end);
2444 __swab32s(&ent->lcme_offset);
2445 __swab32s(&ent->lcme_size);
2446 __swab32s(&ent->lcme_layout_gen);
2447 BUILD_BUG_ON(offsetof(typeof(*ent), lcme_padding_1) == 0);
2449 v1 = (struct lov_user_md_v1 *)((char *)lum + off);
2450 stripe_count = v1->lmm_stripe_count;
2452 __swab16s(&stripe_count);
2454 if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V1) ||
2455 v1->lmm_magic == LOV_USER_MAGIC_V1) {
2456 lustre_swab_lov_user_md_v1(v1);
2457 if (size > sizeof(*v1))
2458 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2460 } else if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V3) ||
2461 v1->lmm_magic == LOV_USER_MAGIC_V3 ||
2462 v1->lmm_magic == __swab32(LOV_USER_MAGIC_SPECIFIC) ||
2463 v1->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2464 v3 = (struct lov_user_md_v3 *)v1;
2465 lustre_swab_lov_user_md_v3(v3);
2466 if (size > sizeof(*v3))
2467 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2470 CERROR("Invalid magic %#x\n", v1->lmm_magic);
2474 EXPORT_SYMBOL(lustre_swab_lov_comp_md_v1);
2476 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
2482 for (i = 0; i < stripe_count; i++) {
2483 lustre_swab_ost_id(&(lod[i].l_ost_oi));
2484 __swab32s(&(lod[i].l_ost_gen));
2485 __swab32s(&(lod[i].l_ost_idx));
2489 EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
2491 void lustre_swab_lov_user_md(struct lov_user_md *lum, size_t size)
2493 struct lov_user_md_v1 *v1;
2494 struct lov_user_md_v3 *v3;
2495 struct lov_foreign_md *lfm;
2499 CDEBUG(D_IOCTL, "swabbing lov_user_md\n");
2500 switch (lum->lmm_magic) {
2501 case __swab32(LOV_MAGIC_V1):
2502 case LOV_USER_MAGIC_V1:
2504 v1 = (struct lov_user_md_v1 *)lum;
2505 stripe_count = v1->lmm_stripe_count;
2507 if (lum->lmm_magic != LOV_USER_MAGIC_V1)
2508 __swab16s(&stripe_count);
2510 lustre_swab_lov_user_md_v1(v1);
2511 if (size > sizeof(*v1))
2512 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2517 case __swab32(LOV_MAGIC_V3):
2518 case LOV_USER_MAGIC_V3:
2520 v3 = (struct lov_user_md_v3 *)lum;
2521 stripe_count = v3->lmm_stripe_count;
2523 if (lum->lmm_magic != LOV_USER_MAGIC_V3)
2524 __swab16s(&stripe_count);
2526 lustre_swab_lov_user_md_v3(v3);
2527 if (size > sizeof(*v3))
2528 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2532 case __swab32(LOV_USER_MAGIC_SPECIFIC):
2533 case LOV_USER_MAGIC_SPECIFIC:
2535 v3 = (struct lov_user_md_v3 *)lum;
2536 stripe_count = v3->lmm_stripe_count;
2538 if (lum->lmm_magic != LOV_USER_MAGIC_SPECIFIC)
2539 __swab16s(&stripe_count);
2541 lustre_swab_lov_user_md_v3(v3);
2542 lustre_swab_lov_user_md_objects(v3->lmm_objects, stripe_count);
2545 case __swab32(LOV_MAGIC_COMP_V1):
2546 case LOV_USER_MAGIC_COMP_V1:
2547 lustre_swab_lov_comp_md_v1((struct lov_comp_md_v1 *)lum);
2549 case __swab32(LOV_MAGIC_FOREIGN):
2550 case LOV_USER_MAGIC_FOREIGN:
2552 lfm = (struct lov_foreign_md *)lum;
2553 __swab32s(&lfm->lfm_magic);
2554 __swab32s(&lfm->lfm_length);
2555 __swab32s(&lfm->lfm_type);
2556 __swab32s(&lfm->lfm_flags);
2560 CDEBUG(D_IOCTL, "Invalid LOV magic %08x\n", lum->lmm_magic);
2563 EXPORT_SYMBOL(lustre_swab_lov_user_md);
2565 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
2568 CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
2569 __swab32s(&lmm->lmm_magic);
2570 __swab32s(&lmm->lmm_pattern);
2571 lustre_swab_lmm_oi(&lmm->lmm_oi);
2572 __swab32s(&lmm->lmm_stripe_size);
2573 __swab16s(&lmm->lmm_stripe_count);
2574 __swab16s(&lmm->lmm_layout_gen);
2577 EXPORT_SYMBOL(lustre_swab_lov_mds_md);
2579 void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
2583 for (i = 0; i < RES_NAME_SIZE; i++)
2584 __swab64s(&id->name[i]);
2587 void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
2589 /* the lock data is a union and the first two fields are always an
2590 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
2591 * data the same way.
2593 __swab64s(&d->l_extent.start);
2594 __swab64s(&d->l_extent.end);
2595 __swab64s(&d->l_extent.gid);
2596 __swab64s(&d->l_flock.lfw_owner);
2597 __swab32s(&d->l_flock.lfw_pid);
2600 void lustre_swab_ldlm_intent(struct ldlm_intent *i)
2605 void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
2607 __swab32s(&r->lr_type);
2608 BUILD_BUG_ON(offsetof(typeof(*r), lr_pad) == 0);
2609 lustre_swab_ldlm_res_id(&r->lr_name);
2612 void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
2614 lustre_swab_ldlm_resource_desc(&l->l_resource);
2615 __swab32s(&l->l_req_mode);
2616 __swab32s(&l->l_granted_mode);
2617 lustre_swab_ldlm_policy_data(&l->l_policy_data);
2620 void lustre_swab_ldlm_request(struct ldlm_request *rq)
2622 __swab32s(&rq->lock_flags);
2623 lustre_swab_ldlm_lock_desc(&rq->lock_desc);
2624 __swab32s(&rq->lock_count);
2625 /* lock_handle[] opaque */
2628 void lustre_swab_ldlm_reply(struct ldlm_reply *r)
2630 __swab32s(&r->lock_flags);
2631 BUILD_BUG_ON(offsetof(typeof(*r), lock_padding) == 0);
2632 lustre_swab_ldlm_lock_desc(&r->lock_desc);
2633 /* lock_handle opaque */
2634 __swab64s(&r->lock_policy_res1);
2635 __swab64s(&r->lock_policy_res2);
2638 void lustre_swab_quota_body(struct quota_body *b)
2640 lustre_swab_lu_fid(&b->qb_fid);
2641 lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
2642 __swab32s(&b->qb_flags);
2643 __swab64s(&b->qb_count);
2644 __swab64s(&b->qb_usage);
2645 __swab64s(&b->qb_slv_ver);
2648 /* Dump functions */
2649 void dump_ioo(struct obd_ioobj *ioo)
2652 "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
2653 "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
2657 void dump_rniobuf(struct niobuf_remote *nb)
2659 CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
2660 nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
2663 void dump_obdo(struct obdo *oa)
2665 u64 valid = oa->o_valid;
2667 CDEBUG(D_RPCTRACE, "obdo: o_valid = %#llx\n", valid);
2668 if (valid & OBD_MD_FLID)
2669 CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
2670 if (valid & OBD_MD_FLFID)
2671 CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
2673 if (valid & OBD_MD_FLSIZE)
2674 CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
2675 if (valid & OBD_MD_FLMTIME)
2676 CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
2677 if (valid & OBD_MD_FLATIME)
2678 CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
2679 if (valid & OBD_MD_FLCTIME)
2680 CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
2681 if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
2682 CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
2683 if (valid & OBD_MD_FLGRANT)
2684 CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
2685 if (valid & OBD_MD_FLBLKSZ)
2686 CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
2687 if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
2688 CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
2689 oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
2690 (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
2691 if (valid & OBD_MD_FLUID)
2692 CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
2693 if (valid & OBD_MD_FLUID)
2694 CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
2695 if (valid & OBD_MD_FLGID)
2696 CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
2697 if (valid & OBD_MD_FLGID)
2698 CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
2699 if (valid & OBD_MD_FLFLAGS)
2700 CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
2701 if (valid & OBD_MD_FLNLINK)
2702 CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
2703 else if (valid & OBD_MD_FLCKSUM)
2704 CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
2706 if (valid & OBD_MD_FLPARENT)
2707 CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
2709 if (valid & OBD_MD_FLFID) {
2710 CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
2712 CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
2715 if (valid & OBD_MD_FLHANDLE)
2716 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
2717 oa->o_handle.cookie);
2720 void dump_ost_body(struct ost_body *ob)
2725 void dump_rcs(__u32 *rc)
2727 CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
2730 static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2732 LASSERT(req->rq_reqmsg);
2734 switch (req->rq_reqmsg->lm_magic) {
2735 case LUSTRE_MSG_MAGIC_V2:
2736 return req_capsule_req_swabbed(&req->rq_pill,
2737 MSG_PTLRPC_BODY_OFF);
2739 CERROR("bad lustre msg magic: %#08X\n",
2740 req->rq_reqmsg->lm_magic);
2745 static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2747 if (unlikely(!req->rq_repmsg))
2750 switch (req->rq_repmsg->lm_magic) {
2751 case LUSTRE_MSG_MAGIC_V2:
2752 return req_capsule_rep_swabbed(&req->rq_pill,
2753 MSG_PTLRPC_BODY_OFF);
2755 /* uninitialized yet */
2760 void _debug_req(struct ptlrpc_request *req,
2761 struct libcfs_debug_msg_data *msgdata, const char *fmt, ...)
2763 bool req_ok = req->rq_reqmsg != NULL;
2764 bool rep_ok = false;
2765 lnet_nid_t nid = LNET_NID_ANY;
2766 struct va_format vaf;
2769 int rep_status = -1;
2771 spin_lock(&req->rq_early_free_lock);
2775 if (req_capsule_req_need_swab(&req->rq_pill)) {
2776 req_ok = req_ok && req_ptlrpc_body_swabbed(req);
2777 rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
2781 rep_flags = lustre_msg_get_flags(req->rq_repmsg);
2782 rep_status = lustre_msg_get_status(req->rq_repmsg);
2784 spin_unlock(&req->rq_early_free_lock);
2786 if (req->rq_import && req->rq_import->imp_connection)
2787 nid = req->rq_import->imp_connection->c_peer.nid;
2788 else if (req->rq_export && req->rq_export->exp_connection)
2789 nid = req->rq_export->exp_connection->c_peer.nid;
2791 va_start(args, fmt);
2794 libcfs_debug_msg(msgdata,
2795 "%pV req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d job:'%s'\n",
2797 req, req->rq_xid, req->rq_transno,
2798 req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
2799 req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
2801 req->rq_import->imp_obd->obd_name :
2803 req->rq_export->exp_client_uuid.uuid :
2805 libcfs_nid2str(nid),
2806 req->rq_request_portal, req->rq_reply_portal,
2807 req->rq_reqlen, req->rq_replen,
2808 req->rq_early_count, (s64)req->rq_timedout,
2809 (s64)req->rq_deadline,
2810 atomic_read(&req->rq_refcount),
2811 DEBUG_REQ_FLAGS(req),
2812 req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
2813 rep_flags, req->rq_status, rep_status,
2814 req_ok ? lustre_msg_get_jobid(req->rq_reqmsg) ?: ""
2818 EXPORT_SYMBOL(_debug_req);
2820 void lustre_swab_hsm_user_state(struct hsm_user_state *state)
2822 __swab32s(&state->hus_states);
2823 __swab32s(&state->hus_archive_id);
2826 void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
2828 __swab32s(&hss->hss_valid);
2829 __swab64s(&hss->hss_setmask);
2830 __swab64s(&hss->hss_clearmask);
2831 __swab32s(&hss->hss_archive_id);
2834 static void lustre_swab_hsm_extent(struct hsm_extent *extent)
2836 __swab64s(&extent->offset);
2837 __swab64s(&extent->length);
2840 void lustre_swab_hsm_current_action(struct hsm_current_action *action)
2842 __swab32s(&action->hca_state);
2843 __swab32s(&action->hca_action);
2844 lustre_swab_hsm_extent(&action->hca_location);
2847 void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
2849 lustre_swab_lu_fid(&hui->hui_fid);
2850 lustre_swab_hsm_extent(&hui->hui_extent);
2853 void lustre_swab_lu_extent(struct lu_extent *le)
2855 __swab64s(&le->e_start);
2856 __swab64s(&le->e_end);
2859 void lustre_swab_layout_intent(struct layout_intent *li)
2861 __swab32s(&li->li_opc);
2862 __swab32s(&li->li_flags);
2863 lustre_swab_lu_extent(&li->li_extent);
2866 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
2868 lustre_swab_lu_fid(&hpk->hpk_fid);
2869 __swab64s(&hpk->hpk_cookie);
2870 __swab64s(&hpk->hpk_extent.offset);
2871 __swab64s(&hpk->hpk_extent.length);
2872 __swab16s(&hpk->hpk_flags);
2873 __swab16s(&hpk->hpk_errval);
2876 void lustre_swab_hsm_request(struct hsm_request *hr)
2878 __swab32s(&hr->hr_action);
2879 __swab32s(&hr->hr_archive_id);
2880 __swab64s(&hr->hr_flags);
2881 __swab32s(&hr->hr_itemcount);
2882 __swab32s(&hr->hr_data_len);
2885 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
2887 __swab64s(&msl->msl_flags);
2890 void lustre_swab_close_data(struct close_data *cd)
2892 lustre_swab_lu_fid(&cd->cd_fid);
2893 __swab64s(&cd->cd_data_version);
2896 void lustre_swab_close_data_resync_done(struct close_data_resync_done *resync)
2900 __swab32s(&resync->resync_count);
2901 /* after swab, resync_count must in CPU endian */
2902 if (resync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) {
2903 for (i = 0; i < resync->resync_count; i++)
2904 __swab32s(&resync->resync_ids_inline[i]);
2907 EXPORT_SYMBOL(lustre_swab_close_data_resync_done);
2909 void lustre_swab_lfsck_request(struct lfsck_request *lr)
2911 __swab32s(&lr->lr_event);
2912 __swab32s(&lr->lr_index);
2913 __swab32s(&lr->lr_flags);
2914 __swab32s(&lr->lr_valid);
2915 __swab32s(&lr->lr_speed);
2916 __swab16s(&lr->lr_version);
2917 __swab16s(&lr->lr_active);
2918 __swab16s(&lr->lr_param);
2919 __swab16s(&lr->lr_async_windows);
2920 __swab32s(&lr->lr_flags);
2921 lustre_swab_lu_fid(&lr->lr_fid);
2922 lustre_swab_lu_fid(&lr->lr_fid2);
2923 __swab32s(&lr->lr_comp_id);
2924 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_0) == 0);
2925 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_1) == 0);
2926 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_2) == 0);
2927 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_3) == 0);
2930 void lustre_swab_lfsck_reply(struct lfsck_reply *lr)
2932 __swab32s(&lr->lr_status);
2933 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_1) == 0);
2934 __swab64s(&lr->lr_repaired);
2937 static void lustre_swab_orphan_rec(struct lu_orphan_rec *rec)
2939 lustre_swab_lu_fid(&rec->lor_fid);
2940 __swab32s(&rec->lor_uid);
2941 __swab32s(&rec->lor_gid);
2944 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent)
2946 lustre_swab_lu_fid(&ent->loe_key);
2947 lustre_swab_orphan_rec(&ent->loe_rec);
2949 EXPORT_SYMBOL(lustre_swab_orphan_ent);
2951 void lustre_swab_orphan_ent_v2(struct lu_orphan_ent_v2 *ent)
2953 lustre_swab_lu_fid(&ent->loe_key);
2954 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
2955 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
2956 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding) == 0);
2958 EXPORT_SYMBOL(lustre_swab_orphan_ent_v2);
2960 void lustre_swab_orphan_ent_v3(struct lu_orphan_ent_v3 *ent)
2962 lustre_swab_lu_fid(&ent->loe_key);
2963 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
2964 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
2965 __swab32s(&ent->loe_rec.lor_layout_version);
2966 __swab32s(&ent->loe_rec.lor_range);
2967 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding_1) == 0);
2968 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding_2) == 0);
2970 EXPORT_SYMBOL(lustre_swab_orphan_ent_v3);
2972 void lustre_swab_ladvise(struct lu_ladvise *ladvise)
2974 __swab16s(&ladvise->lla_advice);
2975 __swab16s(&ladvise->lla_value1);
2976 __swab32s(&ladvise->lla_value2);
2977 __swab64s(&ladvise->lla_start);
2978 __swab64s(&ladvise->lla_end);
2979 __swab32s(&ladvise->lla_value3);
2980 __swab32s(&ladvise->lla_value4);
2982 EXPORT_SYMBOL(lustre_swab_ladvise);
2984 void lustre_swab_ladvise_hdr(struct ladvise_hdr *ladvise_hdr)
2986 __swab32s(&ladvise_hdr->lah_magic);
2987 __swab32s(&ladvise_hdr->lah_count);
2988 __swab64s(&ladvise_hdr->lah_flags);
2989 __swab32s(&ladvise_hdr->lah_value1);
2990 __swab32s(&ladvise_hdr->lah_value2);
2991 __swab64s(&ladvise_hdr->lah_value3);
2993 EXPORT_SYMBOL(lustre_swab_ladvise_hdr);