4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/pack_generic.c
33 * (Un)packing of OST requests
35 * Author: Peter J. Braam <braam@clusterfs.com>
36 * Author: Phil Schwan <phil@clusterfs.com>
37 * Author: Eric Barton <eeb@clusterfs.com>
40 #define DEBUG_SUBSYSTEM S_RPC
42 #include <linux/crc32.h>
44 #include <libcfs/libcfs.h>
46 #include <llog_swab.h>
47 #include <lustre_disk.h>
48 #include <lustre_net.h>
49 #include <lustre_swab.h>
50 #include <obd_cksum.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include "ptlrpc_internal.h"
55 static inline __u32 lustre_msg_hdr_size_v2(__u32 count)
57 return round_up(offsetof(struct lustre_msg_v2, lm_buflens[count]), 8);
60 __u32 lustre_msg_hdr_size(__u32 magic, __u32 count)
65 case LUSTRE_MSG_MAGIC_V2:
66 return lustre_msg_hdr_size_v2(count);
68 LASSERTF(0, "incorrect message magic: %08x\n", magic);
73 static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
74 enum lustre_msg_version version)
76 enum lustre_msg_version ver = lustre_msg_get_version(msg);
78 return (ver & LUSTRE_VERSION_MASK) != version;
81 int lustre_msg_check_version(struct lustre_msg *msg,
82 enum lustre_msg_version version)
84 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
85 switch (msg->lm_magic) {
86 case LUSTRE_MSG_MAGIC_V1:
87 CERROR("msg v1 not supported - please upgrade you system\n");
89 case LUSTRE_MSG_MAGIC_V2:
90 return lustre_msg_check_version_v2(msg, version);
92 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
95 #undef LUSTRE_MSG_MAGIC_V1
98 __u32 lustre_msg_early_size;
99 EXPORT_SYMBOL(lustre_msg_early_size);
101 /* early reply size */
102 void lustre_msg_early_size_init(void)
104 __u32 pblen = sizeof(struct ptlrpc_body);
106 lustre_msg_early_size = lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
109 __u32 lustre_msg_size_v2(int count, __u32 *lengths)
115 size = lustre_msg_hdr_size_v2(count);
116 for (i = 0; i < count; i++)
117 size += round_up(lengths[i], 8);
121 EXPORT_SYMBOL(lustre_msg_size_v2);
124 * This returns the size of the buffer that is required to hold a lustre_msg
125 * with the given sub-buffer lengths.
126 * NOTE: this should only be used for NEW requests, and should always be
127 * in the form of a v2 request. If this is a connection to a v1
128 * target then the first buffer will be stripped because the ptlrpc
129 * data is part of the lustre_msg_v1 header. b=14043
131 __u32 lustre_msg_size(__u32 magic, int count, __u32 *lens)
133 __u32 size[] = { sizeof(struct ptlrpc_body) };
141 LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
144 case LUSTRE_MSG_MAGIC_V2:
145 return lustre_msg_size_v2(count, lens);
147 LASSERTF(0, "incorrect message magic: %08x\n", magic);
153 * This is used to determine the size of a buffer that was already packed
154 * and will correctly handle the different message formats.
156 __u32 lustre_packed_msg_size(struct lustre_msg *msg)
158 switch (msg->lm_magic) {
159 case LUSTRE_MSG_MAGIC_V2:
160 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
162 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
166 EXPORT_SYMBOL(lustre_packed_msg_size);
168 void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
176 msg->lm_bufcount = count;
177 /* XXX: lm_secflvr uninitialized here */
178 msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
180 for (i = 0; i < count; i++)
181 msg->lm_buflens[i] = lens[i];
186 ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
187 for (i = 0; i < count; i++) {
191 memcpy(ptr, tmp, lens[i]);
192 ptr += round_up(lens[i], 8);
195 EXPORT_SYMBOL(lustre_init_msg_v2);
197 static int lustre_pack_request_v2(struct ptlrpc_request *req,
198 int count, __u32 *lens, char **bufs)
202 reqlen = lustre_msg_size_v2(count, lens);
204 rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
208 req->rq_reqlen = reqlen;
210 lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
211 lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
215 int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
216 __u32 *lens, char **bufs)
218 __u32 size[] = { sizeof(struct ptlrpc_body) };
226 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
228 /* only use new format, we don't need to be compatible with 1.4 */
229 magic = LUSTRE_MSG_MAGIC_V2;
232 case LUSTRE_MSG_MAGIC_V2:
233 return lustre_pack_request_v2(req, count, lens, bufs);
235 LASSERTF(0, "incorrect message magic: %08x\n", magic);
241 struct list_head ptlrpc_rs_debug_lru =
242 LIST_HEAD_INIT(ptlrpc_rs_debug_lru);
243 spinlock_t ptlrpc_rs_debug_lock;
245 #define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
247 spin_lock(&ptlrpc_rs_debug_lock); \
248 list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
249 spin_unlock(&ptlrpc_rs_debug_lock); \
252 #define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
254 spin_lock(&ptlrpc_rs_debug_lock); \
255 list_del(&(rs)->rs_debug_list); \
256 spin_unlock(&ptlrpc_rs_debug_lock); \
259 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
260 # define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
263 struct ptlrpc_reply_state *
264 lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
266 struct ptlrpc_reply_state *rs = NULL;
268 spin_lock(&svcpt->scp_rep_lock);
270 /* See if we have anything in a pool, and wait if nothing */
271 while (list_empty(&svcpt->scp_rep_idle)) {
274 spin_unlock(&svcpt->scp_rep_lock);
275 /* If we cannot get anything for some long time, we better
276 * bail out instead of waiting infinitely */
277 rc = wait_event_idle_timeout(svcpt->scp_rep_waitq,
278 !list_empty(&svcpt->scp_rep_idle),
279 cfs_time_seconds(10));
282 spin_lock(&svcpt->scp_rep_lock);
285 rs = list_first_entry(&svcpt->scp_rep_idle,
286 struct ptlrpc_reply_state, rs_list);
287 list_del(&rs->rs_list);
289 spin_unlock(&svcpt->scp_rep_lock);
291 memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
292 rs->rs_size = svcpt->scp_service->srv_max_reply_size;
293 rs->rs_svcpt = svcpt;
299 void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
301 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
303 spin_lock(&svcpt->scp_rep_lock);
304 list_add(&rs->rs_list, &svcpt->scp_rep_idle);
305 spin_unlock(&svcpt->scp_rep_lock);
306 wake_up(&svcpt->scp_rep_waitq);
309 int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
310 __u32 *lens, char **bufs, int flags)
312 struct ptlrpc_reply_state *rs;
316 LASSERT(req->rq_reply_state == NULL);
319 if ((flags & LPRFL_EARLY_REPLY) == 0) {
320 spin_lock(&req->rq_lock);
321 req->rq_packed_final = 1;
322 spin_unlock(&req->rq_lock);
325 msg_len = lustre_msg_size_v2(count, lens);
326 rc = sptlrpc_svc_alloc_rs(req, msg_len);
330 rs = req->rq_reply_state;
331 atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
332 rs->rs_cb_id.cbid_fn = reply_out_callback;
333 rs->rs_cb_id.cbid_arg = rs;
334 rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
335 INIT_LIST_HEAD(&rs->rs_exp_list);
336 INIT_LIST_HEAD(&rs->rs_obd_list);
337 INIT_LIST_HEAD(&rs->rs_list);
338 spin_lock_init(&rs->rs_lock);
340 req->rq_replen = msg_len;
341 req->rq_reply_state = rs;
342 req->rq_repmsg = rs->rs_msg;
344 lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
345 lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
347 PTLRPC_RS_DEBUG_LRU_ADD(rs);
351 EXPORT_SYMBOL(lustre_pack_reply_v2);
353 int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
354 char **bufs, int flags)
357 __u32 size[] = { sizeof(struct ptlrpc_body) };
365 LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
367 switch (req->rq_reqmsg->lm_magic) {
368 case LUSTRE_MSG_MAGIC_V2:
369 rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
372 LASSERTF(0, "incorrect message magic: %08x\n",
373 req->rq_reqmsg->lm_magic);
377 CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
378 lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
382 int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
385 return lustre_pack_reply_flags(req, count, lens, bufs, 0);
387 EXPORT_SYMBOL(lustre_pack_reply);
389 void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, __u32 n, __u32 min_size)
391 __u32 i, offset, buflen, bufcount;
394 LASSERT(m->lm_bufcount > 0);
396 bufcount = m->lm_bufcount;
397 if (unlikely(n >= bufcount)) {
398 CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
403 buflen = m->lm_buflens[n];
404 if (unlikely(buflen < min_size)) {
405 CERROR("msg %p buffer[%d] size %d too small "
406 "(required %d, opc=%d)\n", m, n, buflen, min_size,
407 n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
411 offset = lustre_msg_hdr_size_v2(bufcount);
412 for (i = 0; i < n; i++)
413 offset += round_up(m->lm_buflens[i], 8);
415 return (char *)m + offset;
418 void *lustre_msg_buf(struct lustre_msg *m, __u32 n, __u32 min_size)
420 switch (m->lm_magic) {
421 case LUSTRE_MSG_MAGIC_V2:
422 return lustre_msg_buf_v2(m, n, min_size);
424 LASSERTF(0, "incorrect message magic: %08x (msg:%px)\n",
429 EXPORT_SYMBOL(lustre_msg_buf);
431 static int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
432 unsigned int newlen, int move_data)
434 char *tail = NULL, *newpos;
438 LASSERT(msg->lm_bufcount > segment);
439 LASSERT(msg->lm_buflens[segment] >= newlen);
441 if (msg->lm_buflens[segment] == newlen)
444 if (move_data && msg->lm_bufcount > segment + 1) {
445 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
446 for (n = segment + 1; n < msg->lm_bufcount; n++)
447 tail_len += round_up(msg->lm_buflens[n], 8);
450 msg->lm_buflens[segment] = newlen;
452 if (tail && tail_len) {
453 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
454 LASSERT(newpos <= tail);
456 memmove(newpos, tail, tail_len);
459 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
463 * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
464 * we also move data forward from @segment + 1.
466 * if @newlen == 0, we remove the segment completely, but we still keep the
467 * totally bufcount the same to save possible data moving. this will leave a
468 * unused segment with size 0 at the tail, but that's ok.
470 * return new msg size after shrinking.
473 * + if any buffers higher than @segment has been filled in, must call shrink
474 * with non-zero @move_data.
475 * + caller should NOT keep pointers to msg buffers which higher than @segment
478 int lustre_shrink_msg(struct lustre_msg *msg, int segment,
479 unsigned int newlen, int move_data)
481 switch (msg->lm_magic) {
482 case LUSTRE_MSG_MAGIC_V2:
483 return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
485 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
488 EXPORT_SYMBOL(lustre_shrink_msg);
490 static int lustre_grow_msg_v2(struct lustre_msg_v2 *msg, __u32 segment,
493 char *tail = NULL, *newpos;
497 LASSERT(msg->lm_bufcount > segment);
498 LASSERT(msg->lm_buflens[segment] <= newlen);
500 if (msg->lm_buflens[segment] == newlen)
503 if (msg->lm_bufcount > segment + 1) {
504 tail = lustre_msg_buf_v2(msg, segment + 1, 0);
505 for (n = segment + 1; n < msg->lm_bufcount; n++)
506 tail_len += round_up(msg->lm_buflens[n], 8);
509 msg->lm_buflens[segment] = newlen;
511 if (tail && tail_len) {
512 newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
513 memmove(newpos, tail, tail_len);
516 return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
520 * for @msg, grow @segment to size @newlen.
521 * Always move higher buffer forward.
523 * return new msg size after growing.
526 * - caller must make sure there is enough space in allocated message buffer
527 * - caller should NOT keep pointers to msg buffers which higher than @segment
530 int lustre_grow_msg(struct lustre_msg *msg, int segment, unsigned int newlen)
532 switch (msg->lm_magic) {
533 case LUSTRE_MSG_MAGIC_V2:
534 return lustre_grow_msg_v2(msg, segment, newlen);
536 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
539 EXPORT_SYMBOL(lustre_grow_msg);
541 void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
543 PTLRPC_RS_DEBUG_LRU_DEL(rs);
545 LASSERT(atomic_read(&rs->rs_refcount) == 0);
546 LASSERT(!rs->rs_difficult || rs->rs_handled);
547 LASSERT(!rs->rs_difficult || rs->rs_unlinked);
548 LASSERT(!rs->rs_scheduled);
549 LASSERT(rs->rs_export == NULL);
550 LASSERT(rs->rs_nlocks == 0);
551 LASSERT(list_empty(&rs->rs_exp_list));
552 LASSERT(list_empty(&rs->rs_obd_list));
554 sptlrpc_svc_free_rs(rs);
557 static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
559 int swabbed, required_len, i, buflen;
561 /* Now we know the sender speaks my language. */
562 required_len = lustre_msg_hdr_size_v2(0);
563 if (len < required_len) {
564 /* can't even look inside the message */
565 CERROR("message length %d too small for lustre_msg\n", len);
569 swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
572 __swab32s(&m->lm_magic);
573 __swab32s(&m->lm_bufcount);
574 __swab32s(&m->lm_secflvr);
575 __swab32s(&m->lm_repsize);
576 __swab32s(&m->lm_cksum);
577 __swab32s(&m->lm_flags);
578 __swab32s(&m->lm_opc);
579 BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_3) == 0);
582 if (m->lm_bufcount == 0 || m->lm_bufcount > PTLRPC_MAX_BUFCOUNT) {
583 CERROR("message bufcount %d is not valid\n", m->lm_bufcount);
586 required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
587 if (len < required_len) {
588 /* didn't receive all the buffer lengths */
589 CERROR("message length %d too small for %d buflens\n",
590 len, m->lm_bufcount);
594 for (i = 0; i < m->lm_bufcount; i++) {
596 __swab32s(&m->lm_buflens[i]);
597 buflen = round_up(m->lm_buflens[i], 8);
598 if (buflen < 0 || buflen > PTLRPC_MAX_BUFLEN) {
599 CERROR("buffer %d length %d is not valid\n", i, buflen);
602 required_len += buflen;
604 if (len < required_len || required_len > PTLRPC_MAX_BUFLEN) {
605 CERROR("len: %d, required_len %d, bufcount: %d\n",
606 len, required_len, m->lm_bufcount);
607 for (i = 0; i < m->lm_bufcount; i++)
608 CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
615 int __lustre_unpack_msg(struct lustre_msg *m, int len)
617 int required_len, rc;
621 * We can provide a slightly better error log, if we check the
622 * message magic and version first. In the future, struct
623 * lustre_msg may grow, and we'd like to log a version mismatch,
624 * rather than a short message.
626 required_len = offsetof(struct lustre_msg, lm_magic) +
628 if (len < required_len) {
629 /* can't even look inside the message */
630 CERROR("message length %d too small for magic/version check\n",
635 rc = lustre_unpack_msg_v2(m, len);
639 EXPORT_SYMBOL(__lustre_unpack_msg);
641 int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
645 rc = __lustre_unpack_msg(req->rq_reqmsg, len);
647 req_capsule_set_req_swabbed(&req->rq_pill,
648 MSG_PTLRPC_HEADER_OFF);
654 int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
658 rc = __lustre_unpack_msg(req->rq_repmsg, len);
660 req_capsule_set_rep_swabbed(&req->rq_pill,
661 MSG_PTLRPC_HEADER_OFF);
668 lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
669 enum req_location loc, int offset)
671 struct ptlrpc_body *pb;
672 struct lustre_msg_v2 *m;
674 m = loc == RCL_CLIENT ? req->rq_reqmsg : req->rq_repmsg;
676 pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
678 CERROR("error unpacking ptlrpc body\n");
681 if (req_capsule_need_swab(&req->rq_pill, loc, offset)) {
682 lustre_swab_ptlrpc_body(pb);
683 req_capsule_set_swabbed(&req->rq_pill, loc, offset);
686 if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
687 CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
691 if (loc == RCL_SERVER)
692 pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
697 int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
699 switch (req->rq_reqmsg->lm_magic) {
700 case LUSTRE_MSG_MAGIC_V2:
701 return lustre_unpack_ptlrpc_body_v2(req, RCL_CLIENT, offset);
703 CERROR("bad lustre msg magic: %08x\n",
704 req->rq_reqmsg->lm_magic);
709 int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
711 switch (req->rq_repmsg->lm_magic) {
712 case LUSTRE_MSG_MAGIC_V2:
713 return lustre_unpack_ptlrpc_body_v2(req, RCL_SERVER, offset);
715 CERROR("bad lustre msg magic: %08x\n",
716 req->rq_repmsg->lm_magic);
721 static inline __u32 lustre_msg_buflen_v2(struct lustre_msg_v2 *m, __u32 n)
723 if (n >= m->lm_bufcount)
726 return m->lm_buflens[n];
730 * lustre_msg_buflen - return the length of buffer \a n in message \a m
731 * \param m lustre_msg (request or reply) to look at
732 * \param n message index (base 0)
734 * returns zero for non-existent message indices
736 __u32 lustre_msg_buflen(struct lustre_msg *m, __u32 n)
738 switch (m->lm_magic) {
739 case LUSTRE_MSG_MAGIC_V2:
740 return lustre_msg_buflen_v2(m, n);
742 CERROR("incorrect message magic: %08x\n", m->lm_magic);
746 EXPORT_SYMBOL(lustre_msg_buflen);
749 lustre_msg_set_buflen_v2(struct lustre_msg_v2 *m, __u32 n, __u32 len)
751 if (n >= m->lm_bufcount)
754 m->lm_buflens[n] = len;
757 void lustre_msg_set_buflen(struct lustre_msg *m, __u32 n, __u32 len)
759 switch (m->lm_magic) {
760 case LUSTRE_MSG_MAGIC_V2:
761 lustre_msg_set_buflen_v2(m, n, len);
764 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
769 * NB return the bufcount for lustre_msg_v2 format, so if message is packed
770 * in V1 format, the result is one bigger. (add struct ptlrpc_body).
772 __u32 lustre_msg_bufcount(struct lustre_msg *m)
774 switch (m->lm_magic) {
775 case LUSTRE_MSG_MAGIC_V2:
776 return m->lm_bufcount;
778 CERROR("incorrect message magic: %08x\n", m->lm_magic);
783 char *lustre_msg_string(struct lustre_msg *m, __u32 index, __u32 max_len)
785 /* max_len == 0 means the string should fill the buffer */
789 switch (m->lm_magic) {
790 case LUSTRE_MSG_MAGIC_V2:
791 str = lustre_msg_buf_v2(m, index, 0);
792 blen = lustre_msg_buflen_v2(m, index);
795 LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
799 CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
803 slen = strnlen(str, blen);
805 if (slen == blen) { /* not NULL terminated */
806 CERROR("can't unpack non-NULL terminated string in msg %p buffer[%d] len %d\n",
810 if (blen > PTLRPC_MAX_BUFLEN) {
811 CERROR("buffer length of msg %p buffer[%d] is invalid(%d)\n",
817 if (slen != blen - 1) {
818 CERROR("can't unpack short string in msg %p buffer[%d] len %d: strlen %d\n",
819 m, index, blen, slen);
822 } else if (slen > max_len) {
823 CERROR("can't unpack oversized string in msg %p buffer[%d] len %d strlen %d: max %d expected\n",
824 m, index, blen, slen, max_len);
831 /* Wrap up the normal fixed length cases */
832 static inline void *__lustre_swab_buf(struct lustre_msg *msg, __u32 index,
833 __u32 min_size, void *swabber)
837 LASSERT(msg != NULL);
838 switch (msg->lm_magic) {
839 case LUSTRE_MSG_MAGIC_V2:
840 ptr = lustre_msg_buf_v2(msg, index, min_size);
843 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
846 if (ptr != NULL && swabber != NULL)
847 ((void (*)(void *))swabber)(ptr);
852 static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
854 return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
855 sizeof(struct ptlrpc_body_v2));
858 enum lustre_msghdr lustre_msghdr_get_flags(struct lustre_msg *msg)
860 switch (msg->lm_magic) {
861 case LUSTRE_MSG_MAGIC_V2:
862 /* already in host endian */
863 return msg->lm_flags;
865 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
869 EXPORT_SYMBOL(lustre_msghdr_get_flags);
871 void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
873 switch (msg->lm_magic) {
874 case LUSTRE_MSG_MAGIC_V2:
875 msg->lm_flags = flags;
878 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
882 __u32 lustre_msg_get_flags(struct lustre_msg *msg)
884 switch (msg->lm_magic) {
885 case LUSTRE_MSG_MAGIC_V2: {
886 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
890 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
895 * flags might be printed in debug code while message
901 EXPORT_SYMBOL(lustre_msg_get_flags);
903 void lustre_msg_add_flags(struct lustre_msg *msg, __u32 flags)
905 switch (msg->lm_magic) {
906 case LUSTRE_MSG_MAGIC_V2: {
907 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
908 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
909 pb->pb_flags |= flags;
913 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
916 EXPORT_SYMBOL(lustre_msg_add_flags);
918 void lustre_msg_set_flags(struct lustre_msg *msg, __u32 flags)
920 switch (msg->lm_magic) {
921 case LUSTRE_MSG_MAGIC_V2: {
922 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
923 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
924 pb->pb_flags = flags;
928 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
932 void lustre_msg_clear_flags(struct lustre_msg *msg, __u32 flags)
934 switch (msg->lm_magic) {
935 case LUSTRE_MSG_MAGIC_V2: {
936 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
937 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
938 pb->pb_flags &= ~flags;
943 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
946 EXPORT_SYMBOL(lustre_msg_clear_flags);
948 __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
950 switch (msg->lm_magic) {
951 case LUSTRE_MSG_MAGIC_V2: {
952 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
954 return pb->pb_op_flags;
956 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
964 void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags)
966 switch (msg->lm_magic) {
967 case LUSTRE_MSG_MAGIC_V2: {
968 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
969 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
970 pb->pb_op_flags |= flags;
974 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
977 EXPORT_SYMBOL(lustre_msg_add_op_flags);
979 struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
981 switch (msg->lm_magic) {
982 case LUSTRE_MSG_MAGIC_V2: {
983 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
985 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
988 return &pb->pb_handle;
991 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
996 __u32 lustre_msg_get_type(struct lustre_msg *msg)
998 switch (msg->lm_magic) {
999 case LUSTRE_MSG_MAGIC_V2: {
1000 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1002 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1003 return PTL_RPC_MSG_ERR;
1008 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1009 return PTL_RPC_MSG_ERR;
1012 EXPORT_SYMBOL(lustre_msg_get_type);
1014 enum lustre_msg_version lustre_msg_get_version(struct lustre_msg *msg)
1016 switch (msg->lm_magic) {
1017 case LUSTRE_MSG_MAGIC_V2: {
1018 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1020 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1023 return pb->pb_version;
1026 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1031 void lustre_msg_add_version(struct lustre_msg *msg, __u32 version)
1033 switch (msg->lm_magic) {
1034 case LUSTRE_MSG_MAGIC_V2: {
1035 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1036 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1037 pb->pb_version |= version;
1041 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1045 __u32 lustre_msg_get_opc(struct lustre_msg *msg)
1047 switch (msg->lm_magic) {
1048 case LUSTRE_MSG_MAGIC_V2: {
1049 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1051 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1057 CERROR("incorrect message magic: %08x (msg:%p)\n",
1058 msg->lm_magic, msg);
1062 EXPORT_SYMBOL(lustre_msg_get_opc);
1064 __u64 lustre_msg_get_last_xid(struct lustre_msg *msg)
1066 switch (msg->lm_magic) {
1067 case LUSTRE_MSG_MAGIC_V2: {
1068 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1070 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1073 return pb->pb_last_xid;
1076 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1080 EXPORT_SYMBOL(lustre_msg_get_last_xid);
1082 __u16 lustre_msg_get_tag(struct lustre_msg *msg)
1084 switch (msg->lm_magic) {
1085 case LUSTRE_MSG_MAGIC_V2: {
1086 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1088 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1094 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1098 EXPORT_SYMBOL(lustre_msg_get_tag);
1100 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
1102 switch (msg->lm_magic) {
1103 case LUSTRE_MSG_MAGIC_V2: {
1104 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1106 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1109 return pb->pb_last_committed;
1112 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1116 EXPORT_SYMBOL(lustre_msg_get_last_committed);
1118 __u64 *lustre_msg_get_versions(struct lustre_msg *msg)
1120 switch (msg->lm_magic) {
1121 case LUSTRE_MSG_MAGIC_V2: {
1122 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1124 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1127 return pb->pb_pre_versions;
1130 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1134 EXPORT_SYMBOL(lustre_msg_get_versions);
1136 __u64 lustre_msg_get_transno(struct lustre_msg *msg)
1138 switch (msg->lm_magic) {
1139 case LUSTRE_MSG_MAGIC_V2: {
1140 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1142 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1145 return pb->pb_transno;
1148 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1152 EXPORT_SYMBOL(lustre_msg_get_transno);
1154 int lustre_msg_get_status(struct lustre_msg *msg)
1156 switch (msg->lm_magic) {
1157 case LUSTRE_MSG_MAGIC_V2: {
1158 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1160 return pb->pb_status;
1161 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1166 * status might be printed in debug code while message
1172 EXPORT_SYMBOL(lustre_msg_get_status);
1174 __u64 lustre_msg_get_slv(struct lustre_msg *msg)
1176 switch (msg->lm_magic) {
1177 case LUSTRE_MSG_MAGIC_V2: {
1178 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1180 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1186 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1192 void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
1194 switch (msg->lm_magic) {
1195 case LUSTRE_MSG_MAGIC_V2: {
1196 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1198 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1205 CERROR("invalid msg magic %x\n", msg->lm_magic);
1210 __u32 lustre_msg_get_limit(struct lustre_msg *msg)
1212 switch (msg->lm_magic) {
1213 case LUSTRE_MSG_MAGIC_V2: {
1214 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1216 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1219 return pb->pb_limit;
1222 CERROR("invalid msg magic %x\n", msg->lm_magic);
1228 void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
1230 switch (msg->lm_magic) {
1231 case LUSTRE_MSG_MAGIC_V2: {
1232 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1234 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1237 pb->pb_limit = limit;
1241 CERROR("invalid msg magic %08x\n", msg->lm_magic);
1246 __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
1248 switch (msg->lm_magic) {
1249 case LUSTRE_MSG_MAGIC_V2: {
1250 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1252 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1255 return pb->pb_conn_cnt;
1258 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1262 EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
1264 __u32 lustre_msg_get_magic(struct lustre_msg *msg)
1266 switch (msg->lm_magic) {
1267 case LUSTRE_MSG_MAGIC_V2:
1268 return msg->lm_magic;
1270 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1275 timeout_t lustre_msg_get_timeout(struct lustre_msg *msg)
1277 switch (msg->lm_magic) {
1278 case LUSTRE_MSG_MAGIC_V2: {
1279 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1282 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1285 return pb->pb_timeout;
1288 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1293 timeout_t lustre_msg_get_service_timeout(struct lustre_msg *msg)
1295 switch (msg->lm_magic) {
1296 case LUSTRE_MSG_MAGIC_V2: {
1297 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1300 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1303 return pb->pb_service_time;
1306 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1311 int lustre_msg_get_uid_gid(struct lustre_msg *msg, __u32 *uid, __u32 *gid)
1313 switch (msg->lm_magic) {
1314 case LUSTRE_MSG_MAGIC_V2: {
1315 struct ptlrpc_body *pb;
1317 /* the old pltrpc_body_v2 is smaller; doesn't include uid/gid */
1318 if (msg->lm_buflens[MSG_PTLRPC_BODY_OFF] <
1319 sizeof(struct ptlrpc_body))
1322 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1323 sizeof(struct ptlrpc_body));
1325 if (!pb || !(pb->pb_flags & MSG_PACK_UID_GID))
1336 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1340 EXPORT_SYMBOL(lustre_msg_get_uid_gid);
1342 char *lustre_msg_get_jobid(struct lustre_msg *msg)
1344 switch (msg->lm_magic) {
1345 case LUSTRE_MSG_MAGIC_V2: {
1346 struct ptlrpc_body *pb;
1348 /* the old pltrpc_body_v2 is smaller; doesn't include jobid */
1349 if (msg->lm_buflens[MSG_PTLRPC_BODY_OFF] <
1350 sizeof(struct ptlrpc_body))
1353 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1354 sizeof(struct ptlrpc_body));
1358 /* If clients send unterminated jobids, terminate them here
1359 * so that there is no chance of string overflow later.
1361 if (unlikely(pb->pb_jobid[LUSTRE_JOBID_SIZE - 1] != '\0'))
1362 pb->pb_jobid[LUSTRE_JOBID_SIZE - 1] = '\0';
1364 return pb->pb_jobid;
1367 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1371 EXPORT_SYMBOL(lustre_msg_get_jobid);
1373 __u32 lustre_msg_get_cksum(struct lustre_msg *msg)
1375 switch (msg->lm_magic) {
1376 case LUSTRE_MSG_MAGIC_V2:
1377 return msg->lm_cksum;
1379 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1384 __u64 lustre_msg_get_mbits(struct lustre_msg *msg)
1386 switch (msg->lm_magic) {
1387 case LUSTRE_MSG_MAGIC_V2: {
1388 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1390 CERROR("invalid msg %p: no ptlrpc body!\n", msg);
1393 return pb->pb_mbits;
1396 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1401 __u32 lustre_msg_calc_cksum(struct lustre_msg *msg, __u32 buf)
1403 switch (msg->lm_magic) {
1404 case LUSTRE_MSG_MAGIC_V2: {
1405 struct ptlrpc_body *pb = lustre_msg_buf_v2(msg, buf, 0);
1406 __u32 len = lustre_msg_buflen(msg, buf);
1409 #if IS_ENABLED(CONFIG_CRC32)
1410 /* about 10x faster than crypto_hash for small buffers */
1411 crc = crc32_le(~(__u32)0, (unsigned char *)pb, len);
1412 #elif IS_ENABLED(CONFIG_CRYPTO_CRC32)
1413 unsigned int hsize = 4;
1415 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
1416 len, NULL, 0, (unsigned char *)&crc,
1419 #error "need either CONFIG_CRC32 or CONFIG_CRYPTO_CRC32 enabled in the kernel"
1424 CERROR("incorrect message magic: %08x\n", msg->lm_magic);
1429 void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
1431 switch (msg->lm_magic) {
1432 case LUSTRE_MSG_MAGIC_V2: {
1433 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1434 LASSERTF(pb, "invalid msg %px: no ptlrpc body!\n", msg);
1435 pb->pb_handle = *handle;
1439 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1443 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
1445 switch (msg->lm_magic) {
1446 case LUSTRE_MSG_MAGIC_V2: {
1447 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1448 LASSERTF(pb, "invalid msg %px: no ptlrpc body!\n", msg);
1453 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1457 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
1459 switch (msg->lm_magic) {
1460 case LUSTRE_MSG_MAGIC_V2: {
1461 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1462 LASSERTF(pb, "invalid msg %px: no ptlrpc body!\n", msg);
1467 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1471 void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid)
1473 switch (msg->lm_magic) {
1474 case LUSTRE_MSG_MAGIC_V2: {
1475 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1476 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1477 pb->pb_last_xid = last_xid;
1481 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1484 EXPORT_SYMBOL(lustre_msg_set_last_xid);
1486 void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
1488 switch (msg->lm_magic) {
1489 case LUSTRE_MSG_MAGIC_V2: {
1490 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1491 LASSERTF(pb, "invalid msg %px: no ptlrpc body!\n", msg);
1496 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1499 EXPORT_SYMBOL(lustre_msg_set_tag);
1501 void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed)
1503 switch (msg->lm_magic) {
1504 case LUSTRE_MSG_MAGIC_V2: {
1505 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1506 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1507 pb->pb_last_committed = last_committed;
1511 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1515 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
1517 switch (msg->lm_magic) {
1518 case LUSTRE_MSG_MAGIC_V2: {
1519 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1520 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1521 pb->pb_pre_versions[0] = versions[0];
1522 pb->pb_pre_versions[1] = versions[1];
1523 pb->pb_pre_versions[2] = versions[2];
1524 pb->pb_pre_versions[3] = versions[3];
1528 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1531 EXPORT_SYMBOL(lustre_msg_set_versions);
1533 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
1535 switch (msg->lm_magic) {
1536 case LUSTRE_MSG_MAGIC_V2: {
1537 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1538 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1539 pb->pb_transno = transno;
1543 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1546 EXPORT_SYMBOL(lustre_msg_set_transno);
1548 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
1550 switch (msg->lm_magic) {
1551 case LUSTRE_MSG_MAGIC_V2: {
1552 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1553 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1554 pb->pb_status = status;
1558 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1561 EXPORT_SYMBOL(lustre_msg_set_status);
1563 void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
1565 switch (msg->lm_magic) {
1566 case LUSTRE_MSG_MAGIC_V2: {
1567 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1568 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1569 pb->pb_conn_cnt = conn_cnt;
1573 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1577 void lustre_msg_set_timeout(struct lustre_msg *msg, timeout_t timeout)
1579 switch (msg->lm_magic) {
1580 case LUSTRE_MSG_MAGIC_V2: {
1581 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1583 LASSERT(timeout >= 0);
1584 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1585 pb->pb_timeout = timeout;
1589 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1593 void lustre_msg_set_service_timeout(struct lustre_msg *msg,
1594 timeout_t service_timeout)
1596 switch (msg->lm_magic) {
1597 case LUSTRE_MSG_MAGIC_V2: {
1598 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1600 LASSERT(service_timeout >= 0);
1601 LASSERTF(pb, "invalid msg %px: no ptlrpc body!\n", msg);
1602 pb->pb_service_time = service_timeout;
1606 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1610 void lustre_msg_set_uid_gid(struct lustre_msg *msg, __u32 *uid, __u32 *gid)
1612 switch (msg->lm_magic) {
1613 case LUSTRE_MSG_MAGIC_V2: {
1614 __u32 opc = lustre_msg_get_opc(msg);
1615 struct ptlrpc_body *pb;
1617 /* Don't set uid/gid for ldlm ast RPCs */
1618 if (!opc || opc == LDLM_BL_CALLBACK ||
1619 opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
1622 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1623 sizeof(struct ptlrpc_body));
1624 LASSERTF(pb, "invalid msg %px: no ptlrpc body!\n", msg);
1629 pb->pb_flags |= MSG_PACK_UID_GID;
1630 } else if (!(pb->pb_flags & MSG_PACK_UID_GID)) {
1631 pb->pb_uid = from_kuid(&init_user_ns, current_uid());
1632 pb->pb_gid = from_kgid(&init_user_ns, current_gid());
1633 pb->pb_flags |= MSG_PACK_UID_GID;
1639 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1642 EXPORT_SYMBOL(lustre_msg_set_uid_gid);
1644 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
1646 switch (msg->lm_magic) {
1647 case LUSTRE_MSG_MAGIC_V2: {
1648 __u32 opc = lustre_msg_get_opc(msg);
1649 struct ptlrpc_body *pb;
1651 /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
1652 * See the comment in ptlrpc_request_pack(). */
1653 if (!opc || opc == LDLM_BL_CALLBACK ||
1654 opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
1657 pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
1658 sizeof(struct ptlrpc_body));
1659 LASSERTF(pb, "invalid msg %px: no ptlrpc body!\n", msg);
1662 memcpy(pb->pb_jobid, jobid, sizeof(pb->pb_jobid));
1663 else if (pb->pb_jobid[0] == '\0')
1664 lustre_get_jobid(pb->pb_jobid, sizeof(pb->pb_jobid));
1668 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1671 EXPORT_SYMBOL(lustre_msg_set_jobid);
1673 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
1675 switch (msg->lm_magic) {
1676 case LUSTRE_MSG_MAGIC_V2:
1677 msg->lm_cksum = cksum;
1680 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1684 void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
1686 switch (msg->lm_magic) {
1687 case LUSTRE_MSG_MAGIC_V2: {
1688 struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
1690 LASSERTF(pb != NULL, "invalid msg %px: no ptlrpc body!\n", msg);
1691 pb->pb_mbits = mbits;
1695 LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
1699 void ptlrpc_request_set_replen(struct ptlrpc_request *req)
1701 int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
1703 req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
1704 req->rq_pill.rc_area[RCL_SERVER]);
1705 if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1706 req->rq_reqmsg->lm_repsize = req->rq_replen;
1708 EXPORT_SYMBOL(ptlrpc_request_set_replen);
1711 * Send a remote set_info_async.
1713 * This may go from client to server or server to client.
1715 int do_set_info_async(struct obd_import *imp,
1716 int opcode, int version,
1717 size_t keylen, void *key,
1718 size_t vallen, void *val,
1719 struct ptlrpc_request_set *set)
1721 struct ptlrpc_request *req;
1727 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_CHANGELOG_CLEAR) ?
1733 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
1734 RCL_CLIENT, keylen);
1735 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
1736 RCL_CLIENT, vallen);
1737 rc = ptlrpc_request_pack(req, version, opcode);
1739 ptlrpc_request_free(req);
1743 if (KEY_IS(KEY_CHANGELOG_CLEAR))
1746 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
1747 memcpy(tmp, key, keylen);
1748 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
1749 memcpy(tmp, val, vallen);
1751 ptlrpc_request_set_replen(req);
1754 ptlrpc_set_add_req(set, req);
1755 ptlrpc_check_set(NULL, set);
1757 rc = ptlrpc_queue_wait(req);
1758 ptlrpc_req_finished(req);
1763 EXPORT_SYMBOL(do_set_info_async);
1765 /* byte flipping routines for all wire types declared in
1766 * lustre_idl.h implemented here.
1768 void lustre_swab_ptlrpc_body(struct ptlrpc_body *body)
1770 __swab32s(&body->pb_type);
1771 __swab32s(&body->pb_version);
1772 __swab32s(&body->pb_opc);
1773 __swab32s(&body->pb_status);
1774 __swab64s(&body->pb_last_xid);
1775 __swab16s(&body->pb_tag);
1776 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding0) == 0);
1777 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding1) == 0);
1778 __swab64s(&body->pb_last_committed);
1779 __swab64s(&body->pb_transno);
1780 __swab32s(&body->pb_flags);
1781 __swab32s(&body->pb_op_flags);
1782 __swab32s(&body->pb_conn_cnt);
1783 __swab32s(&body->pb_timeout);
1784 __swab32s(&body->pb_service_time);
1785 __swab32s(&body->pb_limit);
1786 __swab64s(&body->pb_slv);
1787 __swab64s(&body->pb_pre_versions[0]);
1788 __swab64s(&body->pb_pre_versions[1]);
1789 __swab64s(&body->pb_pre_versions[2]);
1790 __swab64s(&body->pb_pre_versions[3]);
1791 __swab64s(&body->pb_mbits);
1792 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_0) == 0);
1793 BUILD_BUG_ON(offsetof(typeof(*body), pb_padding64_1) == 0);
1794 __swab32s(&body->pb_uid);
1795 __swab32s(&body->pb_gid);
1797 * While we need to maintain compatibility between
1798 * clients and servers without ptlrpc_body_v2 (< 2.3)
1799 * do not swab any fields beyond pb_jobid, as we are
1800 * using this swab function for both ptlrpc_body
1801 * and ptlrpc_body_v2.
1803 /* pb_jobid is an ASCII string and should not be swabbed */
1804 BUILD_BUG_ON(offsetof(typeof(*body), pb_jobid) == 0);
1807 void lustre_swab_connect(struct obd_connect_data *ocd)
1809 __swab64s(&ocd->ocd_connect_flags);
1810 __swab32s(&ocd->ocd_version);
1811 __swab32s(&ocd->ocd_grant);
1812 __swab64s(&ocd->ocd_ibits_known);
1813 __swab32s(&ocd->ocd_index);
1814 __swab32s(&ocd->ocd_brw_size);
1816 * ocd_blocksize and ocd_inodespace don't need to be swabbed because
1817 * they are 8-byte values
1819 __swab16s(&ocd->ocd_grant_tax_kb);
1820 __swab32s(&ocd->ocd_grant_max_blks);
1821 __swab64s(&ocd->ocd_transno);
1822 __swab32s(&ocd->ocd_group);
1823 __swab32s(&ocd->ocd_cksum_types);
1824 __swab32s(&ocd->ocd_instance);
1826 * Fields after ocd_cksum_types are only accessible by the receiver
1827 * if the corresponding flag in ocd_connect_flags is set. Accessing
1828 * any field after ocd_maxbytes on the receiver without a valid flag
1829 * may result in out-of-bound memory access and kernel oops.
1831 if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
1832 __swab32s(&ocd->ocd_max_easize);
1833 if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
1834 __swab64s(&ocd->ocd_maxbytes);
1835 if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
1836 __swab16s(&ocd->ocd_maxmodrpcs);
1837 BUILD_BUG_ON(offsetof(typeof(*ocd), padding0) == 0);
1838 BUILD_BUG_ON(offsetof(typeof(*ocd), padding1) == 0);
1839 if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2) {
1840 __swab64s(&ocd->ocd_connect_flags2);
1841 if (ocd->ocd_connect_flags2 & OBD_CONNECT2_COMPRESS)
1842 __swab64s(&ocd->ocd_compr_type);
1844 BUILD_BUG_ON(offsetof(typeof(*ocd), padding4) == 0);
1845 BUILD_BUG_ON(offsetof(typeof(*ocd), padding5) == 0);
1846 BUILD_BUG_ON(offsetof(typeof(*ocd), padding6) == 0);
1847 BUILD_BUG_ON(offsetof(typeof(*ocd), padding7) == 0);
1848 BUILD_BUG_ON(offsetof(typeof(*ocd), padding8) == 0);
1849 BUILD_BUG_ON(offsetof(typeof(*ocd), padding9) == 0);
1850 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingA) == 0);
1851 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingB) == 0);
1852 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingC) == 0);
1853 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingD) == 0);
1854 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingE) == 0);
1855 BUILD_BUG_ON(offsetof(typeof(*ocd), paddingF) == 0);
1858 static void lustre_swab_ost_layout(struct ost_layout *ol)
1860 __swab32s(&ol->ol_stripe_size);
1861 __swab32s(&ol->ol_stripe_count);
1862 __swab64s(&ol->ol_comp_start);
1863 __swab64s(&ol->ol_comp_end);
1864 __swab32s(&ol->ol_comp_id);
1867 void lustre_swab_obdo(struct obdo *o)
1869 __swab64s(&o->o_valid);
1870 lustre_swab_ost_id(&o->o_oi);
1871 __swab64s(&o->o_parent_seq);
1872 __swab64s(&o->o_size);
1873 __swab64s(&o->o_mtime);
1874 __swab64s(&o->o_atime);
1875 __swab64s(&o->o_ctime);
1876 __swab64s(&o->o_blocks);
1877 __swab64s(&o->o_grant);
1878 __swab32s(&o->o_blksize);
1879 __swab32s(&o->o_mode);
1880 __swab32s(&o->o_uid);
1881 __swab32s(&o->o_gid);
1882 __swab32s(&o->o_flags);
1883 __swab32s(&o->o_nlink);
1884 __swab32s(&o->o_parent_oid);
1885 __swab32s(&o->o_misc);
1886 __swab64s(&o->o_ioepoch);
1887 __swab32s(&o->o_stripe_idx);
1888 __swab32s(&o->o_parent_ver);
1889 lustre_swab_ost_layout(&o->o_layout);
1890 __swab32s(&o->o_layout_version);
1891 __swab32s(&o->o_uid_h);
1892 __swab32s(&o->o_gid_h);
1893 __swab64s(&o->o_data_version);
1894 __swab32s(&o->o_projid);
1895 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_4) == 0);
1896 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_5) == 0);
1897 BUILD_BUG_ON(offsetof(typeof(*o), o_padding_6) == 0);
1900 EXPORT_SYMBOL(lustre_swab_obdo);
1902 void lustre_swab_obd_statfs(struct obd_statfs *os)
1904 __swab64s(&os->os_type);
1905 __swab64s(&os->os_blocks);
1906 __swab64s(&os->os_bfree);
1907 __swab64s(&os->os_bavail);
1908 __swab64s(&os->os_files);
1909 __swab64s(&os->os_ffree);
1910 /* no need to swab os_fsid */
1911 __swab32s(&os->os_bsize);
1912 __swab32s(&os->os_namelen);
1913 __swab64s(&os->os_maxbytes);
1914 __swab32s(&os->os_state);
1915 __swab32s(&os->os_fprecreated);
1916 __swab32s(&os->os_granted);
1917 BUILD_BUG_ON(offsetof(typeof(*os), os_spare3) == 0);
1918 BUILD_BUG_ON(offsetof(typeof(*os), os_spare4) == 0);
1919 BUILD_BUG_ON(offsetof(typeof(*os), os_spare5) == 0);
1920 BUILD_BUG_ON(offsetof(typeof(*os), os_spare6) == 0);
1921 BUILD_BUG_ON(offsetof(typeof(*os), os_spare7) == 0);
1922 BUILD_BUG_ON(offsetof(typeof(*os), os_spare8) == 0);
1923 BUILD_BUG_ON(offsetof(typeof(*os), os_spare9) == 0);
1926 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
1928 lustre_swab_ost_id(&ioo->ioo_oid);
1929 __swab32s(&ioo->ioo_max_brw);
1930 __swab32s(&ioo->ioo_bufcnt);
1933 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr)
1935 __swab64s(&nbr->rnb_offset);
1936 __swab32s(&nbr->rnb_len);
1937 __swab32s(&nbr->rnb_flags);
1940 void lustre_swab_ost_body(struct ost_body *b)
1942 lustre_swab_obdo(&b->oa);
1945 void lustre_swab_ost_last_id(u64 *id)
1950 void lustre_swab_generic_32s(__u32 *val)
1955 void lustre_swab_gl_lquota_desc(struct ldlm_gl_lquota_desc *desc)
1957 lustre_swab_lu_fid(&desc->gl_id.qid_fid);
1958 __swab64s(&desc->gl_flags);
1959 __swab64s(&desc->gl_ver);
1960 __swab64s(&desc->gl_hardlimit);
1961 __swab64s(&desc->gl_softlimit);
1962 __swab64s(&desc->gl_time);
1963 BUILD_BUG_ON(offsetof(typeof(*desc), gl_pad2) == 0);
1965 EXPORT_SYMBOL(lustre_swab_gl_lquota_desc);
1967 void lustre_swab_gl_barrier_desc(struct ldlm_gl_barrier_desc *desc)
1969 __swab32s(&desc->lgbd_status);
1970 __swab32s(&desc->lgbd_timeout);
1971 BUILD_BUG_ON(offsetof(typeof(*desc), lgbd_padding) == 0);
1973 EXPORT_SYMBOL(lustre_swab_gl_barrier_desc);
1975 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
1977 __swab64s(&lvb->lvb_size);
1978 __swab64s(&lvb->lvb_mtime);
1979 __swab64s(&lvb->lvb_atime);
1980 __swab64s(&lvb->lvb_ctime);
1981 __swab64s(&lvb->lvb_blocks);
1983 EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
1985 void lustre_swab_ost_lvb(struct ost_lvb *lvb)
1987 __swab64s(&lvb->lvb_size);
1988 __swab64s(&lvb->lvb_mtime);
1989 __swab64s(&lvb->lvb_atime);
1990 __swab64s(&lvb->lvb_ctime);
1991 __swab64s(&lvb->lvb_blocks);
1992 __swab32s(&lvb->lvb_mtime_ns);
1993 __swab32s(&lvb->lvb_atime_ns);
1994 __swab32s(&lvb->lvb_ctime_ns);
1995 __swab32s(&lvb->lvb_padding);
1997 EXPORT_SYMBOL(lustre_swab_ost_lvb);
1999 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
2001 __swab64s(&lvb->lvb_flags);
2002 __swab64s(&lvb->lvb_id_may_rel);
2003 __swab64s(&lvb->lvb_id_rel);
2004 __swab64s(&lvb->lvb_id_qunit);
2005 __swab64s(&lvb->lvb_pad1);
2007 EXPORT_SYMBOL(lustre_swab_lquota_lvb);
2009 void lustre_swab_barrier_lvb(struct barrier_lvb *lvb)
2011 __swab32s(&lvb->lvb_status);
2012 __swab32s(&lvb->lvb_index);
2013 BUILD_BUG_ON(offsetof(typeof(*lvb), lvb_padding) == 0);
2015 EXPORT_SYMBOL(lustre_swab_barrier_lvb);
2017 void lustre_swab_mdt_body(struct mdt_body *b)
2019 lustre_swab_lu_fid(&b->mbo_fid1);
2020 lustre_swab_lu_fid(&b->mbo_fid2);
2021 /* handle is opaque */
2022 __swab64s(&b->mbo_valid);
2023 __swab64s(&b->mbo_size);
2024 __swab64s(&b->mbo_mtime);
2025 __swab64s(&b->mbo_atime);
2026 __swab64s(&b->mbo_ctime);
2027 __swab64s(&b->mbo_blocks);
2028 __swab64s(&b->mbo_version);
2029 __swab64s(&b->mbo_t_state);
2030 __swab32s(&b->mbo_fsuid);
2031 __swab32s(&b->mbo_fsgid);
2032 __swab32s(&b->mbo_capability);
2033 __swab32s(&b->mbo_mode);
2034 __swab32s(&b->mbo_uid);
2035 __swab32s(&b->mbo_gid);
2036 __swab32s(&b->mbo_flags);
2037 __swab32s(&b->mbo_rdev);
2038 __swab32s(&b->mbo_nlink);
2039 __swab32s(&b->mbo_layout_gen);
2040 __swab32s(&b->mbo_suppgid);
2041 __swab32s(&b->mbo_eadatasize);
2042 __swab32s(&b->mbo_aclsize);
2043 __swab32s(&b->mbo_max_mdsize);
2044 BUILD_BUG_ON(offsetof(typeof(*b), mbo_unused3) == 0);
2045 __swab32s(&b->mbo_uid_h);
2046 __swab32s(&b->mbo_gid_h);
2047 __swab32s(&b->mbo_projid);
2048 __swab64s(&b->mbo_dom_size);
2049 __swab64s(&b->mbo_dom_blocks);
2050 __swab64s(&b->mbo_btime);
2051 BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_9) == 0);
2052 BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_10) == 0);
2055 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
2057 /* mio_open_handle is opaque */
2058 BUILD_BUG_ON(offsetof(typeof(*b), mio_unused1) == 0);
2059 BUILD_BUG_ON(offsetof(typeof(*b), mio_unused2) == 0);
2060 BUILD_BUG_ON(offsetof(typeof(*b), mio_padding) == 0);
2063 void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
2067 __swab32s(&mti->mti_lustre_ver);
2068 __swab32s(&mti->mti_stripe_index);
2069 __swab32s(&mti->mti_config_ver);
2070 __swab32s(&mti->mti_flags);
2071 __swab32s(&mti->mti_instance);
2072 __swab32s(&mti->mti_nid_count);
2073 BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(u64));
2075 /* For NID string we never need to swab */
2076 if (target_supports_large_nid(mti))
2079 for (i = 0; i < MTI_NIDS_MAX; i++)
2080 __swab64s(&mti->mti_nids[i]);
2083 void lustre_swab_mgs_nidtbl_entry_header(struct mgs_nidtbl_entry *entry)
2085 __swab64s(&entry->mne_version);
2086 __swab32s(&entry->mne_instance);
2087 __swab32s(&entry->mne_index);
2088 __swab32s(&entry->mne_length);
2090 /* mne_nid_(count|type) must be one byte size because we're going to
2091 * access it w/o swapping.
2093 BUILD_BUG_ON(sizeof(entry->mne_nid_count) != sizeof(u8));
2094 BUILD_BUG_ON(sizeof(entry->mne_nid_type) != sizeof(u8));
2097 EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry_header);
2099 void lustre_swab_mgs_nidtbl_entry_content(struct mgs_nidtbl_entry *entry)
2103 /* Large NIDs are always big endian so we don't need swapping */
2104 if (entry->mne_nid_type)
2107 for (i = 0; i < entry->mne_nid_count; i++)
2108 __swab64s(&entry->u.nids[i]);
2110 EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry_content);
2112 void lustre_swab_mgs_config_body(struct mgs_config_body *body)
2114 __swab64s(&body->mcb_offset);
2115 __swab32s(&body->mcb_units);
2116 __swab16s(&body->mcb_type);
2119 void lustre_swab_mgs_config_res(struct mgs_config_res *body)
2121 __swab64s(&body->mcr_offset);
2122 __swab64s(&body->mcr_size);
2125 static void lustre_swab_obd_dqinfo(struct obd_dqinfo *i)
2127 __swab64s(&i->dqi_bgrace);
2128 __swab64s(&i->dqi_igrace);
2129 __swab32s(&i->dqi_flags);
2130 __swab32s(&i->dqi_valid);
2133 static void lustre_swab_obd_dqblk(struct obd_dqblk *b)
2135 __swab64s(&b->dqb_ihardlimit);
2136 __swab64s(&b->dqb_isoftlimit);
2137 __swab64s(&b->dqb_curinodes);
2138 __swab64s(&b->dqb_bhardlimit);
2139 __swab64s(&b->dqb_bsoftlimit);
2140 __swab64s(&b->dqb_curspace);
2141 __swab64s(&b->dqb_btime);
2142 __swab64s(&b->dqb_itime);
2143 __swab32s(&b->dqb_valid);
2144 BUILD_BUG_ON(offsetof(typeof(*b), dqb_padding) == 0);
2147 int lustre_swab_obd_quotactl(struct obd_quotactl *q, __u32 len)
2149 if (unlikely(len <= sizeof(struct obd_quotactl)))
2152 __swab32s(&q->qc_cmd);
2153 __swab32s(&q->qc_type);
2154 __swab32s(&q->qc_id);
2155 __swab32s(&q->qc_stat);
2156 lustre_swab_obd_dqinfo(&q->qc_dqinfo);
2157 lustre_swab_obd_dqblk(&q->qc_dqblk);
2162 void lustre_swab_fid2path(struct getinfo_fid2path *gf)
2164 lustre_swab_lu_fid(&gf->gf_fid);
2165 __swab64s(&gf->gf_recno);
2166 __swab32s(&gf->gf_linkno);
2167 __swab32s(&gf->gf_pathlen);
2169 EXPORT_SYMBOL(lustre_swab_fid2path);
2171 static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
2173 __swab64s(&fm_extent->fe_logical);
2174 __swab64s(&fm_extent->fe_physical);
2175 __swab64s(&fm_extent->fe_length);
2176 __swab32s(&fm_extent->fe_flags);
2177 __swab32s(&fm_extent->fe_device);
2180 static void lustre_swab_fiemap_hdr(struct fiemap *fiemap)
2182 __swab64s(&fiemap->fm_start);
2183 __swab64s(&fiemap->fm_length);
2184 __swab32s(&fiemap->fm_flags);
2185 __swab32s(&fiemap->fm_mapped_extents);
2186 __swab32s(&fiemap->fm_extent_count);
2187 __swab32s(&fiemap->fm_reserved);
2190 int lustre_swab_fiemap(struct fiemap *fiemap, __u32 len)
2192 __u32 i, size, count;
2194 lustre_swab_fiemap_hdr(fiemap);
2196 size = fiemap_count_to_size(fiemap->fm_mapped_extents);
2197 count = fiemap->fm_mapped_extents;
2198 if (unlikely(size > len)) {
2199 count = (len - sizeof(struct fiemap)) /
2200 sizeof(struct fiemap_extent);
2201 fiemap->fm_mapped_extents = count;
2204 /* still swab extents as we cannot yet pass rc to callers */
2205 for (i = 0; i < count; i++)
2206 lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
2211 void lustre_swab_fiemap_info_key(struct ll_fiemap_info_key *fiemap_info)
2213 lustre_swab_obdo(&fiemap_info->lfik_oa);
2214 lustre_swab_fiemap_hdr(&fiemap_info->lfik_fiemap);
2217 void lustre_swab_idx_info(struct idx_info *ii)
2219 __swab32s(&ii->ii_magic);
2220 __swab32s(&ii->ii_flags);
2221 __swab16s(&ii->ii_count);
2222 __swab32s(&ii->ii_attrs);
2223 lustre_swab_lu_fid(&ii->ii_fid);
2224 __swab64s(&ii->ii_version);
2225 __swab64s(&ii->ii_hash_start);
2226 __swab64s(&ii->ii_hash_end);
2227 __swab16s(&ii->ii_keysize);
2228 __swab16s(&ii->ii_recsize);
2231 void lustre_swab_lip_header(struct lu_idxpage *lip)
2234 __swab32s(&lip->lip_magic);
2235 __swab16s(&lip->lip_flags);
2236 __swab16s(&lip->lip_nr);
2238 EXPORT_SYMBOL(lustre_swab_lip_header);
2240 void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
2242 __swab32s(&rr->rr_opcode);
2243 __swab32s(&rr->rr_cap);
2244 __swab32s(&rr->rr_fsuid);
2245 /* rr_fsuid_h is unused */
2246 __swab32s(&rr->rr_fsgid);
2247 /* rr_fsgid_h is unused */
2248 __swab32s(&rr->rr_suppgid1);
2249 /* rr_suppgid1_h is unused */
2250 __swab32s(&rr->rr_suppgid2);
2251 /* rr_suppgid2_h is unused */
2252 lustre_swab_lu_fid(&rr->rr_fid1);
2253 lustre_swab_lu_fid(&rr->rr_fid2);
2254 __swab64s(&rr->rr_mtime);
2255 __swab64s(&rr->rr_atime);
2256 __swab64s(&rr->rr_ctime);
2257 __swab64s(&rr->rr_size);
2258 __swab64s(&rr->rr_blocks);
2259 __swab32s(&rr->rr_bias);
2260 __swab32s(&rr->rr_mode);
2261 __swab32s(&rr->rr_flags);
2262 __swab32s(&rr->rr_flags_h);
2263 __swab32s(&rr->rr_umask);
2264 __swab16s(&rr->rr_mirror_id);
2266 BUILD_BUG_ON(offsetof(typeof(*rr), rr_padding_4) == 0);
2269 void lustre_swab_lov_desc(struct lov_desc *ld)
2271 __swab32s(&ld->ld_tgt_count);
2272 __swab32s(&ld->ld_active_tgt_count);
2273 __swab32s(&ld->ld_default_stripe_count);
2274 __swab32s(&ld->ld_pattern);
2275 __swab64s(&ld->ld_default_stripe_size);
2276 __swab64s(&ld->ld_default_stripe_offset);
2277 __swab32s(&ld->ld_qos_maxage);
2278 /* uuid endian insensitive */
2280 EXPORT_SYMBOL(lustre_swab_lov_desc);
2282 void lustre_swab_lmv_desc(struct lmv_desc *ld)
2284 __swab32s(&ld->ld_tgt_count);
2285 __swab32s(&ld->ld_active_tgt_count);
2286 __swab32s(&ld->ld_default_stripe_count);
2287 __swab32s(&ld->ld_pattern);
2288 __swab64s(&ld->ld_default_hash_size);
2289 __swab32s(&ld->ld_qos_maxage);
2290 /* uuid endian insensitive */
2293 /* This structure is always in little-endian */
2294 static void lustre_swab_lmv_mds_md_v1(struct lmv_mds_md_v1 *lmm1)
2298 __swab32s(&lmm1->lmv_magic);
2299 __swab32s(&lmm1->lmv_stripe_count);
2300 __swab32s(&lmm1->lmv_master_mdt_index);
2301 __swab32s(&lmm1->lmv_hash_type);
2302 __swab32s(&lmm1->lmv_layout_version);
2303 for (i = 0; i < lmm1->lmv_stripe_count; i++)
2304 lustre_swab_lu_fid(&lmm1->lmv_stripe_fids[i]);
2307 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm)
2309 switch (lmm->lmv_magic) {
2311 lustre_swab_lmv_mds_md_v1(&lmm->lmv_md_v1);
2317 EXPORT_SYMBOL(lustre_swab_lmv_mds_md);
2319 void lustre_swab_lmv_user_md_objects(struct lmv_user_mds_data *lmd,
2324 for (i = 0; i < stripe_count; i++)
2325 __swab32s(&(lmd[i].lum_mds));
2327 EXPORT_SYMBOL(lustre_swab_lmv_user_md_objects);
2330 void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
2334 if (lum->lum_magic == LMV_MAGIC_FOREIGN) {
2335 __swab32s(&lum->lum_magic);
2336 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_length);
2337 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_type);
2338 __swab32s(&((struct lmv_foreign_md *)lum)->lfm_flags);
2342 count = lum->lum_stripe_count;
2343 __swab32s(&lum->lum_magic);
2344 __swab32s(&lum->lum_stripe_count);
2345 __swab32s(&lum->lum_stripe_offset);
2346 __swab32s(&lum->lum_hash_type);
2347 __swab32s(&lum->lum_type);
2348 /* lum_max_inherit and lum_max_inherit_rr do not need to be swabbed */
2349 BUILD_BUG_ON(offsetof(typeof(*lum), lum_padding1) == 0);
2350 BUILD_BUG_ON(offsetof(typeof(*lum), lum_padding2) == 0);
2351 BUILD_BUG_ON(offsetof(typeof(*lum), lum_padding3) == 0);
2352 switch (lum->lum_magic) {
2353 case LMV_USER_MAGIC_SPECIFIC:
2354 count = lum->lum_stripe_count;
2356 case __swab32(LMV_USER_MAGIC_SPECIFIC):
2357 lustre_swab_lmv_user_md_objects(lum->lum_objects, count);
2363 EXPORT_SYMBOL(lustre_swab_lmv_user_md);
2365 static void lustre_print_v1v3(unsigned int lvl, struct lov_user_md *lum,
2368 CDEBUG(lvl, "%s lov_user_md %p:\n", msg, lum);
2369 CDEBUG(lvl, "\tlmm_magic: %#x\n", lum->lmm_magic);
2370 CDEBUG(lvl, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
2371 CDEBUG(lvl, "\tlmm_object_id: %llu\n", lmm_oi_id(&lum->lmm_oi));
2372 CDEBUG(lvl, "\tlmm_object_gr: %llu\n", lmm_oi_seq(&lum->lmm_oi));
2373 CDEBUG(lvl, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
2374 CDEBUG(lvl, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
2375 CDEBUG(lvl, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
2376 lum->lmm_stripe_offset);
2377 if (lum->lmm_magic == LOV_USER_MAGIC_V3) {
2378 struct lov_user_md_v3 *v3 = (void *)lum;
2379 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2381 if (lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2382 struct lov_user_md_v3 *v3 = (void *)lum;
2385 if (v3->lmm_pool_name[0] != '\0')
2386 CDEBUG(lvl, "\tlmm_pool_name: %s\n", v3->lmm_pool_name);
2388 CDEBUG(lvl, "\ttarget list:\n");
2389 for (i = 0; i < v3->lmm_stripe_count; i++)
2390 CDEBUG(lvl, "\t\t%u\n", v3->lmm_objects[i].l_ost_idx);
2394 static void lustre_print_foreign(unsigned int lvl, struct lov_foreign_md *lfm,
2397 CDEBUG(lvl, "%s lov_foreign_md %p:\n", msg, lfm);
2398 CDEBUG(lvl, "\tlfm_magic: %#X\n", lfm->lfm_magic);
2399 CDEBUG(lvl, "\tlfm_length: %u\n", lfm->lfm_length);
2400 CDEBUG(lvl, "\tlfm_type: %#X\n", lfm->lfm_type);
2401 CDEBUG(lvl, "\tlfm_flags: %#X\n", lfm->lfm_flags);
2404 void lustre_print_user_md(unsigned int lvl, struct lov_user_md *lum,
2407 struct lov_comp_md_v1 *comp_v1;
2410 if (likely(!cfs_cdebug_show(lvl, DEBUG_SUBSYSTEM)))
2413 if (lum->lmm_magic == LOV_USER_MAGIC_V1 ||
2414 lum->lmm_magic == LOV_USER_MAGIC_V3) {
2415 lustre_print_v1v3(lvl, lum, msg);
2419 if (lum->lmm_magic != LOV_USER_MAGIC_COMP_V1) {
2420 CDEBUG(lvl, "%s: bad magic: %x\n", msg, lum->lmm_magic);
2424 comp_v1 = (struct lov_comp_md_v1 *)lum;
2425 CDEBUG(lvl, "%s: lov_comp_md_v1 %p:\n", msg, lum);
2426 CDEBUG(lvl, "\tlcm_magic: %#x\n", comp_v1->lcm_magic);
2427 CDEBUG(lvl, "\tlcm_size: %#x\n", comp_v1->lcm_size);
2428 CDEBUG(lvl, "\tlcm_layout_gen: %#x\n", comp_v1->lcm_layout_gen);
2429 CDEBUG(lvl, "\tlcm_flags: %#x\n", comp_v1->lcm_flags);
2430 CDEBUG(lvl, "\tlcm_entry_count: %#x\n\n", comp_v1->lcm_entry_count);
2431 CDEBUG(lvl, "\tlcm_mirror_count: %#x\n\n", comp_v1->lcm_mirror_count);
2433 for (i = 0; i < comp_v1->lcm_entry_count; i++) {
2434 struct lov_comp_md_entry_v1 *ent = &comp_v1->lcm_entries[i];
2435 struct lov_user_md *v1;
2437 CDEBUG(lvl, "\tentry %d:\n", i);
2438 CDEBUG(lvl, "\tlcme_id: %#x\n", ent->lcme_id);
2439 CDEBUG(lvl, "\tlcme_flags: %#x\n", ent->lcme_flags);
2440 if (ent->lcme_flags & LCME_FL_NOSYNC)
2441 CDEBUG(lvl, "\tlcme_timestamp: %llu\n",
2442 ent->lcme_timestamp);
2443 CDEBUG(lvl, "\tlcme_extent.e_start: %llu\n",
2444 ent->lcme_extent.e_start);
2445 CDEBUG(lvl, "\tlcme_extent.e_end: %llu\n",
2446 ent->lcme_extent.e_end);
2447 CDEBUG(lvl, "\tlcme_offset: %#x\n", ent->lcme_offset);
2448 CDEBUG(lvl, "\tlcme_size: %#x\n\n", ent->lcme_size);
2450 v1 = (struct lov_user_md *)((char *)comp_v1 +
2451 comp_v1->lcm_entries[i].lcme_offset);
2452 if (v1->lmm_magic == LOV_MAGIC_FOREIGN)
2453 lustre_print_foreign(lvl, (struct lov_foreign_md *)v1,
2456 lustre_print_v1v3(lvl, v1, msg);
2459 EXPORT_SYMBOL(lustre_print_user_md);
2461 static void lustre_swab_lmm_oi(struct ost_id *oi)
2463 __swab64s(&oi->oi.oi_id);
2464 __swab64s(&oi->oi.oi_seq);
2467 static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
2470 __swab32s(&lum->lmm_magic);
2471 __swab32s(&lum->lmm_pattern);
2472 lustre_swab_lmm_oi(&lum->lmm_oi);
2473 __swab32s(&lum->lmm_stripe_size);
2474 __swab16s(&lum->lmm_stripe_count);
2475 __swab16s(&lum->lmm_stripe_offset);
2479 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
2482 CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
2483 lustre_swab_lov_user_md_common(lum);
2486 EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
2488 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
2491 CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
2492 lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
2493 /* lmm_pool_name nothing to do with char */
2496 EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
2498 static void lustre_swab_lov_hsm_md(struct lov_hsm_md *lhm)
2501 CDEBUG(D_IOCTL, "swabbing lov_hsm_md\n");
2502 __swab32s(&lhm->lhm_magic);
2503 __swab32s(&lhm->lhm_length);
2504 __swab32s(&lhm->lhm_type);
2505 __swab32s(&lhm->lhm_flags);
2507 if (lov_hsm_type_supported(lhm->lhm_type)) {
2508 __swab64s(&lhm->lhm_archive_id);
2509 __swab64s(&lhm->lhm_archive_ver);
2514 void lustre_swab_lov_comp_md_v1(struct lov_comp_md_v1 *lum)
2516 struct lov_comp_md_entry_v1 *ent;
2517 struct lov_user_md_v1 *v1;
2518 struct lov_user_md_v3 *v3;
2522 __u16 ent_count, stripe_count;
2525 cpu_endian = lum->lcm_magic == LOV_USER_MAGIC_COMP_V1;
2526 ent_count = lum->lcm_entry_count;
2528 __swab16s(&ent_count);
2530 CDEBUG(D_IOCTL, "swabbing lov_user_comp_md v1\n");
2531 __swab32s(&lum->lcm_magic);
2532 __swab32s(&lum->lcm_size);
2533 __swab32s(&lum->lcm_layout_gen);
2534 __swab16s(&lum->lcm_flags);
2535 __swab16s(&lum->lcm_entry_count);
2536 __swab16s(&lum->lcm_mirror_count);
2537 /* no need to swab lcm_ec_count */
2538 BUILD_BUG_ON(offsetof(typeof(*lum), lcm_padding1) == 0);
2539 BUILD_BUG_ON(offsetof(typeof(*lum), lcm_padding2) == 0);
2540 BUILD_BUG_ON(offsetof(typeof(*lum), lcm_padding3) == 0);
2542 for (i = 0; i < ent_count; i++) {
2543 ent = &lum->lcm_entries[i];
2544 off = ent->lcme_offset;
2545 size = ent->lcme_size;
2551 __swab32s(&ent->lcme_id);
2552 __swab32s(&ent->lcme_flags);
2553 __swab64s(&ent->lcme_timestamp);
2554 __swab64s(&ent->lcme_extent.e_start);
2555 __swab64s(&ent->lcme_extent.e_end);
2556 __swab32s(&ent->lcme_offset);
2557 __swab32s(&ent->lcme_size);
2558 __swab32s(&ent->lcme_layout_gen);
2559 /* no need to swab lcme_dstripe_count */
2560 /* no need to swab lcme_cstripe_count */
2562 v1 = (struct lov_user_md_v1 *)((char *)lum + off);
2563 if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_FOREIGN) ||
2564 v1->lmm_magic == LOV_USER_MAGIC_FOREIGN) {
2565 lustre_swab_lov_hsm_md((struct lov_hsm_md *)v1);
2569 stripe_count = v1->lmm_stripe_count;
2571 __swab16s(&stripe_count);
2573 if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V1) ||
2574 v1->lmm_magic == LOV_USER_MAGIC_V1) {
2575 lustre_swab_lov_user_md_v1(v1);
2576 if (size > sizeof(*v1))
2577 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2579 } else if (v1->lmm_magic == __swab32(LOV_USER_MAGIC_V3) ||
2580 v1->lmm_magic == LOV_USER_MAGIC_V3 ||
2581 v1->lmm_magic == __swab32(LOV_USER_MAGIC_SPECIFIC) ||
2582 v1->lmm_magic == LOV_USER_MAGIC_SPECIFIC) {
2583 v3 = (struct lov_user_md_v3 *)v1;
2584 lustre_swab_lov_user_md_v3(v3);
2585 if (size > sizeof(*v3))
2586 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2589 CERROR("Invalid magic %#x\n", v1->lmm_magic);
2593 EXPORT_SYMBOL(lustre_swab_lov_comp_md_v1);
2595 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
2601 for (i = 0; i < stripe_count; i++) {
2602 lustre_swab_ost_id(&(lod[i].l_ost_oi));
2603 __swab32s(&(lod[i].l_ost_gen));
2604 __swab32s(&(lod[i].l_ost_idx));
2608 EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
2610 void lustre_swab_lov_user_md(struct lov_user_md *lum, size_t size)
2612 struct lov_user_md_v1 *v1;
2613 struct lov_user_md_v3 *v3;
2614 struct lov_foreign_md *lfm;
2618 CDEBUG(D_IOCTL, "swabbing lov_user_md\n");
2619 switch (lum->lmm_magic) {
2620 case __swab32(LOV_MAGIC_V1):
2621 case LOV_USER_MAGIC_V1:
2623 v1 = (struct lov_user_md_v1 *)lum;
2624 stripe_count = v1->lmm_stripe_count;
2626 if (lum->lmm_magic != LOV_USER_MAGIC_V1)
2627 __swab16s(&stripe_count);
2629 lustre_swab_lov_user_md_v1(v1);
2630 if (size > sizeof(*v1))
2631 lustre_swab_lov_user_md_objects(v1->lmm_objects,
2636 case __swab32(LOV_MAGIC_V3):
2637 case LOV_USER_MAGIC_V3:
2639 v3 = (struct lov_user_md_v3 *)lum;
2640 stripe_count = v3->lmm_stripe_count;
2642 if (lum->lmm_magic != LOV_USER_MAGIC_V3)
2643 __swab16s(&stripe_count);
2645 lustre_swab_lov_user_md_v3(v3);
2646 if (size > sizeof(*v3))
2647 lustre_swab_lov_user_md_objects(v3->lmm_objects,
2651 case __swab32(LOV_USER_MAGIC_SPECIFIC):
2652 case LOV_USER_MAGIC_SPECIFIC:
2654 v3 = (struct lov_user_md_v3 *)lum;
2655 stripe_count = v3->lmm_stripe_count;
2657 if (lum->lmm_magic != LOV_USER_MAGIC_SPECIFIC)
2658 __swab16s(&stripe_count);
2660 lustre_swab_lov_user_md_v3(v3);
2661 lustre_swab_lov_user_md_objects(v3->lmm_objects, stripe_count);
2664 case __swab32(LOV_MAGIC_COMP_V1):
2665 case LOV_USER_MAGIC_COMP_V1:
2666 lustre_swab_lov_comp_md_v1((struct lov_comp_md_v1 *)lum);
2668 case __swab32(LOV_MAGIC_FOREIGN):
2669 case LOV_USER_MAGIC_FOREIGN:
2671 lfm = (struct lov_foreign_md *)lum;
2672 __swab32s(&lfm->lfm_magic);
2673 __swab32s(&lfm->lfm_length);
2674 __swab32s(&lfm->lfm_type);
2675 __swab32s(&lfm->lfm_flags);
2679 CDEBUG(D_IOCTL, "Invalid LOV magic %08x\n", lum->lmm_magic);
2682 EXPORT_SYMBOL(lustre_swab_lov_user_md);
2684 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
2687 CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
2688 __swab32s(&lmm->lmm_magic);
2689 __swab32s(&lmm->lmm_pattern);
2690 lustre_swab_lmm_oi(&lmm->lmm_oi);
2691 __swab32s(&lmm->lmm_stripe_size);
2692 __swab16s(&lmm->lmm_stripe_count);
2693 __swab16s(&lmm->lmm_layout_gen);
2696 EXPORT_SYMBOL(lustre_swab_lov_mds_md);
2698 void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
2702 for (i = 0; i < RES_NAME_SIZE; i++)
2703 __swab64s(&id->name[i]);
2706 void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
2708 /* the lock data is a union and the first two fields are always an
2709 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
2710 * data the same way.
2712 __swab64s(&d->l_extent.start);
2713 __swab64s(&d->l_extent.end);
2714 __swab64s(&d->l_extent.gid);
2715 __swab32s(&d->l_flock.lfw_padding);
2716 __swab32s(&d->l_flock.lfw_pid);
2719 void lustre_swab_ldlm_intent(struct ldlm_intent *i)
2724 void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r)
2726 __swab32s(&r->lr_type);
2727 BUILD_BUG_ON(offsetof(typeof(*r), lr_pad) == 0);
2728 lustre_swab_ldlm_res_id(&r->lr_name);
2731 void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l)
2733 lustre_swab_ldlm_resource_desc(&l->l_resource);
2734 __swab32s(&l->l_req_mode);
2735 __swab32s(&l->l_granted_mode);
2736 lustre_swab_ldlm_policy_data(&l->l_policy_data);
2739 void lustre_swab_ldlm_request(struct ldlm_request *rq)
2741 __swab32s(&rq->lock_flags);
2742 lustre_swab_ldlm_lock_desc(&rq->lock_desc);
2743 __swab32s(&rq->lock_count);
2744 /* lock_handle[] opaque */
2747 void lustre_swab_ldlm_reply(struct ldlm_reply *r)
2749 __swab32s(&r->lock_flags);
2750 BUILD_BUG_ON(offsetof(typeof(*r), lock_padding) == 0);
2751 lustre_swab_ldlm_lock_desc(&r->lock_desc);
2752 /* lock_handle opaque */
2753 __swab64s(&r->lock_policy_res1);
2754 __swab64s(&r->lock_policy_res2);
2757 void lustre_swab_quota_body(struct quota_body *b)
2759 lustre_swab_lu_fid(&b->qb_fid);
2760 lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
2761 __swab32s(&b->qb_flags);
2762 __swab64s(&b->qb_count);
2763 __swab64s(&b->qb_usage);
2764 __swab64s(&b->qb_slv_ver);
2767 /* Dump functions */
2768 void dump_ioo(struct obd_ioobj *ioo)
2771 "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
2772 "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
2776 void dump_rniobuf(struct niobuf_remote *nb)
2778 CDEBUG(D_RPCTRACE, "niobuf_remote: offset=%llu, len=%d, flags=%x\n",
2779 nb->rnb_offset, nb->rnb_len, nb->rnb_flags);
2782 static void dump_obdo(struct obdo *oa)
2784 u64 valid = oa->o_valid;
2786 CDEBUG(D_RPCTRACE, "obdo: o_valid = %#llx\n", valid);
2787 if (valid & OBD_MD_FLID)
2788 CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
2789 if (valid & OBD_MD_FLFID)
2790 CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
2792 if (valid & OBD_MD_FLSIZE)
2793 CDEBUG(D_RPCTRACE, "obdo: o_size = %lld\n", oa->o_size);
2794 if (valid & OBD_MD_FLMTIME)
2795 CDEBUG(D_RPCTRACE, "obdo: o_mtime = %lld\n", oa->o_mtime);
2796 if (valid & OBD_MD_FLATIME)
2797 CDEBUG(D_RPCTRACE, "obdo: o_atime = %lld\n", oa->o_atime);
2798 if (valid & OBD_MD_FLCTIME)
2799 CDEBUG(D_RPCTRACE, "obdo: o_ctime = %lld\n", oa->o_ctime);
2800 if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
2801 CDEBUG(D_RPCTRACE, "obdo: o_blocks = %lld\n", oa->o_blocks);
2802 if (valid & OBD_MD_FLGRANT)
2803 CDEBUG(D_RPCTRACE, "obdo: o_grant = %lld\n", oa->o_grant);
2804 if (valid & OBD_MD_FLBLKSZ)
2805 CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
2806 if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
2807 CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
2808 oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
2809 (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
2810 if (valid & OBD_MD_FLUID)
2811 CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
2812 if (valid & OBD_MD_FLUID)
2813 CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
2814 if (valid & OBD_MD_FLGID)
2815 CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
2816 if (valid & OBD_MD_FLGID)
2817 CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
2818 if (valid & OBD_MD_FLFLAGS)
2819 CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
2820 if (valid & OBD_MD_FLNLINK)
2821 CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
2822 else if (valid & OBD_MD_FLCKSUM)
2823 CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
2825 if (valid & OBD_MD_FLPARENT)
2826 CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
2828 if (valid & OBD_MD_FLFID) {
2829 CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
2831 CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
2834 if (valid & OBD_MD_FLHANDLE)
2835 CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
2836 oa->o_handle.cookie);
2839 void dump_ost_body(struct ost_body *ob)
2844 void dump_rcs(__u32 *rc)
2846 CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
2849 static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2851 LASSERT(req->rq_reqmsg);
2853 switch (req->rq_reqmsg->lm_magic) {
2854 case LUSTRE_MSG_MAGIC_V2:
2855 return req_capsule_req_swabbed(&req->rq_pill,
2856 MSG_PTLRPC_BODY_OFF);
2858 CERROR("bad lustre msg magic: %#08X\n",
2859 req->rq_reqmsg->lm_magic);
2864 static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
2866 if (unlikely(!req->rq_repmsg))
2869 switch (req->rq_repmsg->lm_magic) {
2870 case LUSTRE_MSG_MAGIC_V2:
2871 return req_capsule_rep_swabbed(&req->rq_pill,
2872 MSG_PTLRPC_BODY_OFF);
2874 /* uninitialized yet */
2879 void _debug_req(struct ptlrpc_request *req,
2880 struct libcfs_debug_msg_data *msgdata, const char *fmt, ...)
2882 bool req_ok = req->rq_reqmsg != NULL;
2883 bool rep_ok = false;
2884 struct lnet_nid *nid = NULL;
2885 struct va_format vaf;
2888 int rep_status = -1;
2889 __u64 req_transno = 0;
2891 __u32 req_flags = (__u32) -1;
2892 __u32 req_uid = (__u32) -1;
2893 __u32 req_gid = (__u32) -1;
2894 char *req_jobid = NULL;
2896 spin_lock(&req->rq_early_free_lock);
2900 if (req_capsule_req_need_swab(&req->rq_pill)) {
2901 req_ok = req_ok && req_ptlrpc_body_swabbed(req);
2902 rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
2906 rep_flags = lustre_msg_get_flags(req->rq_repmsg);
2907 rep_status = lustre_msg_get_status(req->rq_repmsg);
2909 spin_unlock(&req->rq_early_free_lock);
2911 if (req->rq_import && req->rq_import->imp_connection)
2912 nid = &req->rq_import->imp_connection->c_peer.nid;
2913 else if (req->rq_export && req->rq_export->exp_connection)
2914 nid = &req->rq_export->exp_connection->c_peer.nid;
2917 req_transno = lustre_msg_get_transno(req->rq_reqmsg);
2918 req_opc = lustre_msg_get_opc(req->rq_reqmsg);
2919 req_jobid = lustre_msg_get_jobid(req->rq_reqmsg);
2920 lustre_msg_get_uid_gid(req->rq_reqmsg, &req_uid, &req_gid);
2921 req_flags = lustre_msg_get_flags(req->rq_reqmsg);
2924 va_start(args, fmt);
2927 libcfs_debug_msg(msgdata,
2928 "%pV req@%p x%llu/t%lld(%llu) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d job:'%s' uid:%u gid:%u\n",
2930 req, req->rq_xid, req->rq_transno, req_transno,
2933 req->rq_import->imp_obd->obd_name :
2935 req->rq_export->exp_client_uuid.uuid :
2937 nid ? libcfs_nidstr(nid) : "<unknown>",
2938 req->rq_request_portal, req->rq_reply_portal,
2939 req->rq_reqlen, req->rq_replen,
2940 req->rq_early_count, (s64)req->rq_timedout,
2941 (s64)req->rq_deadline,
2942 atomic_read(&req->rq_refcount),
2943 DEBUG_REQ_FLAGS(req), req_flags, rep_flags,
2944 req->rq_status, rep_status,
2945 req_jobid ?: "", req_uid, req_gid);
2948 EXPORT_SYMBOL(_debug_req);
2950 void lustre_swab_hsm_user_state(struct hsm_user_state *state)
2952 __swab32s(&state->hus_states);
2953 __swab32s(&state->hus_archive_id);
2956 void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
2958 __swab32s(&hss->hss_valid);
2959 __swab64s(&hss->hss_setmask);
2960 __swab64s(&hss->hss_clearmask);
2961 __swab32s(&hss->hss_archive_id);
2964 static void lustre_swab_hsm_extent(struct hsm_extent *extent)
2966 __swab64s(&extent->offset);
2967 __swab64s(&extent->length);
2970 void lustre_swab_hsm_current_action(struct hsm_current_action *action)
2972 __swab32s(&action->hca_state);
2973 __swab32s(&action->hca_action);
2974 lustre_swab_hsm_extent(&action->hca_location);
2977 void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
2979 lustre_swab_lu_fid(&hui->hui_fid);
2980 lustre_swab_hsm_extent(&hui->hui_extent);
2983 static void lustre_swab_lu_extent(struct lu_extent *le)
2985 __swab64s(&le->e_start);
2986 __swab64s(&le->e_end);
2989 void lustre_swab_layout_intent(struct layout_intent *li)
2991 __swab32s(&li->lai_opc);
2992 __swab32s(&li->lai_flags);
2993 lustre_swab_lu_extent(&li->lai_extent);
2996 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
2998 lustre_swab_lu_fid(&hpk->hpk_fid);
2999 __swab64s(&hpk->hpk_cookie);
3000 __swab64s(&hpk->hpk_extent.offset);
3001 __swab64s(&hpk->hpk_extent.length);
3002 __swab16s(&hpk->hpk_flags);
3003 __swab16s(&hpk->hpk_errval);
3006 void lustre_swab_hsm_request(struct hsm_request *hr)
3008 __swab32s(&hr->hr_action);
3009 __swab32s(&hr->hr_archive_id);
3010 __swab64s(&hr->hr_flags);
3011 __swab32s(&hr->hr_itemcount);
3012 __swab32s(&hr->hr_data_len);
3015 /* TODO: swab each sub request message */
3016 void lustre_swab_batch_update_request(struct batch_update_request *bur)
3018 __swab32s(&bur->burq_magic);
3019 __swab16s(&bur->burq_count);
3020 __swab16s(&bur->burq_padding);
3023 /* TODO: swab each sub reply message. */
3024 void lustre_swab_batch_update_reply(struct batch_update_reply *bur)
3026 __swab32s(&bur->burp_magic);
3027 __swab16s(&bur->burp_count);
3028 __swab16s(&bur->burp_padding);
3031 void lustre_swab_but_update_header(struct but_update_header *buh)
3033 __swab32s(&buh->buh_magic);
3034 __swab32s(&buh->buh_count);
3035 __swab32s(&buh->buh_inline_length);
3036 __swab32s(&buh->buh_reply_size);
3037 __swab32s(&buh->buh_update_count);
3039 EXPORT_SYMBOL(lustre_swab_but_update_header);
3041 void lustre_swab_but_update_buffer(struct but_update_buffer *bub)
3043 __swab32s(&bub->bub_size);
3044 __swab32s(&bub->bub_padding);
3046 EXPORT_SYMBOL(lustre_swab_but_update_buffer);
3048 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
3050 __swab64s(&msl->msl_flags);
3053 void lustre_swab_close_data(struct close_data *cd)
3056 __swab64s(&cd->cd_handle.cookie);
3057 lustre_swab_lu_fid(&cd->cd_fid);
3058 __swab64s(&cd->cd_data_version);
3061 void lustre_swab_close_data_resync_done(struct close_data_resync_done *resync)
3065 __swab32s(&resync->resync_count);
3066 /* after swab, resync_count must in CPU endian */
3067 if (resync->resync_count <= INLINE_RESYNC_ARRAY_SIZE) {
3068 for (i = 0; i < resync->resync_count; i++)
3069 __swab32s(&resync->resync_ids_inline[i]);
3073 void lustre_swab_close_data_special(struct close_data *cd, enum mds_op_bias b)
3075 if (b & MDS_CLOSE_RESYNC_DONE)
3076 lustre_swab_close_data_resync_done(&cd->cd_resync);
3077 else if (b & MDS_CLOSE_LAYOUT_SPLIT)
3078 __swab16s(&cd->cd_mirror_id);
3079 else if (b & MDS_PCC_ATTACH)
3080 swab32s(&cd->cd_archive_id);
3081 else if (b & MDS_CLOSE_LAYOUT_SWAP)
3082 swab64s(&cd->cd_data_version2);
3084 EXPORT_SYMBOL(lustre_swab_close_data_special);
3086 void lustre_swab_lfsck_request(struct lfsck_request *lr)
3088 __swab32s(&lr->lr_event);
3089 __swab32s(&lr->lr_index);
3090 __swab32s(&lr->lr_flags);
3091 __swab32s(&lr->lr_valid);
3092 __swab32s(&lr->lr_speed);
3093 __swab16s(&lr->lr_version);
3094 __swab16s(&lr->lr_active);
3095 __swab16s(&lr->lr_param);
3096 __swab16s(&lr->lr_async_windows);
3097 __swab32s(&lr->lr_flags);
3098 lustre_swab_lu_fid(&lr->lr_fid);
3099 lustre_swab_lu_fid(&lr->lr_fid2);
3100 __swab32s(&lr->lr_comp_id);
3101 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_0) == 0);
3102 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_1) == 0);
3103 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_2) == 0);
3104 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_3) == 0);
3107 void lustre_swab_lfsck_reply(struct lfsck_reply *lr)
3109 __swab32s(&lr->lr_status);
3110 BUILD_BUG_ON(offsetof(typeof(*lr), lr_padding_1) == 0);
3111 __swab64s(&lr->lr_repaired);
3114 static void lustre_swab_orphan_rec(struct lu_orphan_rec *rec)
3116 lustre_swab_lu_fid(&rec->lor_fid);
3117 __swab32s(&rec->lor_uid);
3118 __swab32s(&rec->lor_gid);
3121 void lustre_swab_orphan_ent(struct lu_orphan_ent *ent)
3123 lustre_swab_lu_fid(&ent->loe_key);
3124 lustre_swab_orphan_rec(&ent->loe_rec);
3126 EXPORT_SYMBOL(lustre_swab_orphan_ent);
3128 void lustre_swab_orphan_ent_v2(struct lu_orphan_ent_v2 *ent)
3130 lustre_swab_lu_fid(&ent->loe_key);
3131 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
3132 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
3133 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding) == 0);
3135 EXPORT_SYMBOL(lustre_swab_orphan_ent_v2);
3137 void lustre_swab_orphan_ent_v3(struct lu_orphan_ent_v3 *ent)
3139 lustre_swab_lu_fid(&ent->loe_key);
3140 lustre_swab_orphan_rec(&ent->loe_rec.lor_rec);
3141 lustre_swab_ost_layout(&ent->loe_rec.lor_layout);
3142 __swab32s(&ent->loe_rec.lor_layout_version);
3143 __swab32s(&ent->loe_rec.lor_range);
3144 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding_1) == 0);
3145 BUILD_BUG_ON(offsetof(typeof(ent->loe_rec), lor_padding_2) == 0);
3147 EXPORT_SYMBOL(lustre_swab_orphan_ent_v3);
3149 void lustre_swab_ladvise(struct lu_ladvise *ladvise)
3151 __swab16s(&ladvise->lla_advice);
3152 __swab16s(&ladvise->lla_value1);
3153 __swab32s(&ladvise->lla_value2);
3154 __swab64s(&ladvise->lla_start);
3155 __swab64s(&ladvise->lla_end);
3156 __swab32s(&ladvise->lla_value3);
3157 __swab32s(&ladvise->lla_value4);
3159 EXPORT_SYMBOL(lustre_swab_ladvise);
3161 void lustre_swab_ladvise_hdr(struct ladvise_hdr *ladvise_hdr)
3163 __swab32s(&ladvise_hdr->lah_magic);
3164 __swab32s(&ladvise_hdr->lah_count);
3165 __swab64s(&ladvise_hdr->lah_flags);
3166 __swab32s(&ladvise_hdr->lah_value1);
3167 __swab32s(&ladvise_hdr->lah_value2);
3168 __swab64s(&ladvise_hdr->lah_value3);
3170 EXPORT_SYMBOL(lustre_swab_ladvise_hdr);