4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2014, Intel Corporation.
28 * Copyright 2015 Cray Inc, all rights reserved.
31 * Define ost_id associated functions
34 #ifndef _LUSTRE_OSTID_H_
35 #define _LUSTRE_OSTID_H_
37 #include <libcfs/libcfs.h>
38 #include <lustre/lustre_fid.h>
39 #include <lustre/lustre_idl.h>
41 static inline __u64 lmm_oi_id(const struct ost_id *oi)
46 static inline __u64 lmm_oi_seq(const struct ost_id *oi)
51 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
56 static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
61 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
62 const struct ost_id *src_oi)
64 dst_oi->oi.oi_id = __le64_to_cpu(src_oi->oi.oi_id);
65 dst_oi->oi.oi_seq = __le64_to_cpu(src_oi->oi.oi_seq);
68 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
69 const struct ost_id *src_oi)
71 dst_oi->oi.oi_id = __cpu_to_le64(src_oi->oi.oi_id);
72 dst_oi->oi.oi_seq = __cpu_to_le64(src_oi->oi.oi_seq);
75 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
76 static inline __u64 ostid_seq(const struct ost_id *ostid)
78 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
79 return FID_SEQ_OST_MDT0;
81 if (fid_seq_is_default(ostid->oi.oi_seq))
82 return FID_SEQ_LOV_DEFAULT;
84 if (fid_is_idif(&ostid->oi_fid))
85 return FID_SEQ_OST_MDT0;
87 return fid_seq(&ostid->oi_fid);
90 /* extract OST objid from a wire ost_id (id/seq) pair */
91 static inline __u64 ostid_id(const struct ost_id *ostid)
93 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
94 return ostid->oi.oi_id & IDIF_OID_MASK;
96 if (fid_seq_is_default(ostid->oi.oi_seq))
97 return ostid->oi.oi_id;
99 if (fid_is_idif(&ostid->oi_fid))
100 return fid_idif_id(fid_seq(&ostid->oi_fid),
101 fid_oid(&ostid->oi_fid), 0);
103 return fid_oid(&ostid->oi_fid);
106 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
108 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
111 oi->oi_fid.f_seq = seq;
113 * Note: if f_oid + f_ver is zero, we need init it
114 * to be 1, otherwise, ostid_seq will treat this
115 * as old ostid (oi_seq == 0)
117 if (!oi->oi_fid.f_oid && !oi->oi_fid.f_ver)
118 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
122 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
124 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
127 static inline void ostid_set_seq_echo(struct ost_id *oi)
129 ostid_set_seq(oi, FID_SEQ_ECHO);
132 static inline void ostid_set_seq_llog(struct ost_id *oi)
134 ostid_set_seq(oi, FID_SEQ_LLOG);
138 * Note: we need check oi_seq to decide where to set oi_id,
139 * so oi_seq should always be set ahead of oi_id.
141 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
143 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
144 if (oid >= IDIF_MAX_OID) {
145 CERROR("Bad %llu to set "DOSTID"\n",
146 (unsigned long long)oid, POSTID(oi));
150 } else if (fid_is_idif(&oi->oi_fid)) {
151 if (oid >= IDIF_MAX_OID) {
152 CERROR("Bad %llu to set "DOSTID"\n",
153 (unsigned long long)oid, POSTID(oi));
156 oi->oi_fid.f_seq = fid_idif_seq(oid,
157 fid_idif_ost_idx(&oi->oi_fid));
158 oi->oi_fid.f_oid = oid;
159 oi->oi_fid.f_ver = oid >> 48;
161 if (oid > OBIF_MAX_OID) {
162 CERROR("Bad %llu to set "DOSTID"\n",
163 (unsigned long long)oid, POSTID(oi));
166 oi->oi_fid.f_oid = oid;
170 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
171 struct ost_id *dst_oi)
173 if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
174 dst_oi->oi.oi_id = __cpu_to_le64(src_oi->oi.oi_id);
175 dst_oi->oi.oi_seq = __cpu_to_le64(src_oi->oi.oi_seq);
177 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
181 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
182 struct ost_id *dst_oi)
184 if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
185 dst_oi->oi.oi_id = __le64_to_cpu(src_oi->oi.oi_id);
186 dst_oi->oi.oi_seq = __le64_to_cpu(src_oi->oi.oi_seq);
188 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
192 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
193 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
195 if (fid_seq_is_igif(fid->f_seq)) {
196 CERROR("bad IGIF, "DFID"\n", PFID(fid));
200 if (fid_is_idif(fid)) {
201 ostid_set_seq_mdt0(ostid);
202 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
205 ostid->oi_fid = *fid;
212 * Sigh, because pre-2.4 uses
213 * struct lov_mds_md_v1 {
215 * __u64 lmm_object_id;
216 * __u64 lmm_object_seq;
219 * to identify the LOV(MDT) object, and lmm_object_seq will
220 * be normal_fid, which make it hard to combine these conversion
221 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
223 * We can tell the lmm_oi by this way,
224 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
225 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
226 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
229 * But currently lmm_oi/lsm_oi does not have any "real" usages,
230 * except for printing some information, and the user can always
231 * get the real FID from LMA, besides this multiple case check might
232 * make swab more complicate. So we will keep using id/seq for lmm_oi.
235 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
238 oi->oi.oi_id = fid_oid(fid);
239 oi->oi.oi_seq = fid_seq(fid);
243 * Unpack an OST object id/seq (group) into a FID. This is needed for
244 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
245 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
246 * be passed through unchanged. Only legacy OST objects in "group 0"
247 * will be mapped into the IDIF namespace so that they can fit into the
248 * struct lu_fid fields without loss.
250 static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid,
253 __u64 seq = ostid_seq(ostid);
255 if (ost_idx > 0xffff) {
256 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
261 if (fid_seq_is_mdt0(seq)) {
262 __u64 oid = ostid_id(ostid);
264 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
265 * that we map into the IDIF namespace. It allows up to 2^48
266 * objects per OST, as this is the object namespace that has
267 * been in production for years. This can handle create rates
268 * of 1M objects/s/OST for 9 years, or combinations thereof.
270 if (oid >= IDIF_MAX_OID) {
271 CERROR("bad MDT0 id(1), "DOSTID" ost_idx:%u\n",
272 POSTID(ostid), ost_idx);
275 fid->f_seq = fid_idif_seq(oid, ost_idx);
276 /* truncate to 32 bits by assignment */
278 /* in theory, not currently used */
279 fid->f_ver = oid >> 48;
280 } else if (!fid_seq_is_default(seq)) {
281 /* This is either an IDIF object, which identifies objects
282 * across all OSTs, or a regular FID. The IDIF namespace
283 * maps legacy OST objects into the FID namespace. In both
284 * cases, we just pass the FID through, no conversion needed.
286 if (ostid->oi_fid.f_ver) {
287 CERROR("bad MDT0 id(2), "DOSTID" ost_idx:%u\n",
288 POSTID(ostid), ost_idx);
291 *fid = ostid->oi_fid;