4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
22 * Copyright 2020, DataDirect Networks Storage.
24 * This file is part of Lustre, http://www.lustre.org/
26 * Author: John L. Hammond <jhammond@whamcloud.com>
28 * lustre/utils/ofd_access_batch.c
30 * Access log entry batching for ofd_access_log_reader.
36 #include <linux/lustre/lustre_access_log.h>
37 #include <linux/lustre/lustre_fid.h>
38 #include <linux/lustre/lustre_idl.h>
39 #include <libcfs/util/hash.h>
40 #include <libcfs/util/list.h>
42 #include "ofd_access_batch.h"
44 /* XXX Weird param order to be consistent with list_replace_init(). */
45 static inline void hlist_replace_init(struct hlist_node *old_node,
46 struct hlist_node *new_node)
48 hlist_add_before(new_node, old_node);
49 hlist_del_init(old_node);
52 struct fid_hash_node {
53 struct hlist_node fhn_node;
54 struct lu_fid fhn_fid;
57 static inline bool fid_eq(const struct lu_fid *f1, const struct lu_fid *f2)
59 return f1->f_seq == f2->f_seq && f1->f_oid == f2->f_oid &&
60 f1->f_ver == f2->f_ver;
63 static inline __u64 fid_flatten(const struct lu_fid *fid)
68 if (fid_is_igif(fid)) {
69 ino = lu_igif_ino(fid);
75 ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
77 return ino != 0 ? ino : fid_oid(fid);
81 * map fid to 32 bit value for ino on 32bit systems.
83 static inline __u32 fid_flatten32(const struct lu_fid *fid)
88 if (fid_is_igif(fid)) {
89 ino = lu_igif_ino(fid);
93 seq = fid_seq(fid) - FID_SEQ_START;
95 /* Map the high bits of the OID into higher bits of the inode number so
96 * that inodes generated at about the same time have a reduced chance
97 * of collisions. This will give a period of 2^12 = 1024 unique clients
98 * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
99 * (from OID), or up to 128M inodes without collisions for new files.
101 ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
102 (seq >> (64 - (40-8)) & 0xffffff00) +
103 (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
105 return ino != 0 ? ino : fid_oid(fid);
108 static unsigned long fid_hash(const struct lu_fid *f, unsigned int shift)
110 #if __BITS_PER_LONG == 32
111 return hash_long(fid_flatten32(f), shift);
112 #elif __BITS_PER_LONG == 64
113 return hash_long(fid_flatten(f), shift);
115 # error "Wordsize not 32 or 64"
119 static void fhn_init(struct fid_hash_node *fhn, const struct lu_fid *fid)
121 INIT_HLIST_NODE(&fhn->fhn_node);
125 static bool fhn_is_hashed(const struct fid_hash_node *fhn)
127 return !hlist_unhashed(&fhn->fhn_node);
130 static void fhn_del_init(struct fid_hash_node *fhn)
132 if (fhn_is_hashed(fhn))
133 hlist_del_init(&fhn->fhn_node);
136 static inline void fhn_replace_init(struct fid_hash_node *old_fhn,
137 struct fid_hash_node *new_fhn)
139 hlist_add_before(&new_fhn->fhn_node, &old_fhn->fhn_node);
140 hlist_del_init(&old_fhn->fhn_node);
143 void fid_hash_add(struct hlist_head *head, unsigned int shift,
144 struct fid_hash_node *fhn)
146 assert(!fhn_is_hashed(fhn));
148 hlist_add_head(&fhn->fhn_node, &head[fid_hash(&fhn->fhn_fid, shift)]);
151 struct fid_hash_node *
152 fid_hash_find(struct hlist_head *head, unsigned int shift, const struct lu_fid *fid)
154 struct hlist_head *hash_list;
155 struct hlist_node *node, *next;
156 struct fid_hash_node *fhn;
158 hash_list = &head[fid_hash(fid, shift)];
159 hlist_for_each_entry_safe(fhn, node, next, hash_list, fhn_node) {
160 assert(fhn_is_hashed(fhn));
162 if (fid_eq(fid, &fhn->fhn_fid))
169 struct fid_hash_node *
170 fid_hash_insert(struct hlist_head *head, unsigned int shift, struct fid_hash_node *new_fhn)
172 struct hlist_head *list;
173 struct hlist_node *node, *next;
174 struct fid_hash_node *old_fhn;
176 list = &head[fid_hash(&new_fhn->fhn_fid, shift)];
177 hlist_for_each_entry_safe(old_fhn, node, next, list, fhn_node) {
178 assert(fhn_is_hashed(old_fhn));
180 if (fid_eq(&old_fhn->fhn_fid, &new_fhn->fhn_fid))
184 hlist_add_head(&new_fhn->fhn_node, list);
189 int fid_hash_init(struct hlist_head **phead, unsigned int *pshift, unsigned int shift)
191 struct hlist_head *new_head;
194 new_head = malloc(sizeof(*new_head) << shift);
195 if (new_head == NULL)
198 for (i = 0; i < (1 << shift); i++)
199 INIT_HLIST_HEAD(&new_head[i]);
207 int fid_hash_resize(struct hlist_head **phead, unsigned int *pshift, unsigned int new_shift)
209 struct hlist_head *new_head;
213 if (*pshift == new_shift)
216 rc = fid_hash_init(&new_head, &new_shift, new_shift);
220 for (i = 0; i < (1 << *pshift); i++) {
221 struct hlist_head *list = &(*phead)[i];
222 struct hlist_node *node, *next;
223 struct fid_hash_node *fhn;
225 hlist_for_each_entry_safe(fhn, node, next, list, fhn_node) {
227 fid_hash_add(new_head, new_shift, fhn);
243 /* Entry in the batching hash. */
245 struct fid_hash_node alre_fid_hash_node;
246 time_t alre_time[2]; /* Not strictly needed. */
250 __u64 alre_segment_count[2];
252 char alre_obd_name[];
256 ALR_BATCH_HASH_SHIFT_DEFAULT = 10,
257 ALR_BATCH_HASH_SHIFT_MAX = 30,
261 struct hlist_head *alrb_hash;
262 unsigned int alrb_hash_shift;
263 unsigned int alrb_count;
266 static void alre_del_init(struct alr_entry *alre)
268 fhn_del_init(&alre->alre_fid_hash_node);
271 static void alre_update(struct alr_entry *alre, time_t time, __u64 begin,
272 __u64 end, __u32 size, __u32 segment_count, __u32 flags)
274 unsigned int d = (flags & OFD_ACCESS_READ) ? ALR_READ : ALR_WRITE;
276 alre->alre_time[d] = max_t(time_t, alre->alre_time[d], time);
277 alre->alre_begin[d] = min_t(__u64, alre->alre_begin[d], begin);
278 alre->alre_end[d] = max_t(__u64, alre->alre_end[d], end);
279 alre->alre_size[d] += size;
280 alre->alre_segment_count[d] += segment_count;
281 alre->alre_count[d] += 1;
284 int alr_batch_add(struct alr_batch *alrb, const char *obd_name,
285 const struct lu_fid *pfid, time_t time, __u64 begin, __u64 end,
286 __u32 size, __u32 segment_count, __u32 flags)
288 struct fid_hash_node fhn, *p;
289 struct alr_entry *alre;
295 assert(sizeof(time_t) == sizeof(__u64));
297 fhn_init(&fhn, pfid);
299 /* Find old or insert sentinel (fhn). Replace sentinel if returned. */
300 p = fid_hash_insert(alrb->alrb_hash, alrb->alrb_hash_shift, &fhn);
302 size_t alre_size = sizeof(*alre) + strlen(obd_name) + 1;
304 alre = calloc(1, alre_size);
310 fhn_init(&alre->alre_fid_hash_node, pfid);
311 strcpy(alre->alre_obd_name, obd_name);
312 fhn_replace_init(&fhn, &alre->alre_fid_hash_node);
315 alre = container_of(p, struct alr_entry, alre_fid_hash_node);
318 alre_update(alre, time, begin, end, size, segment_count, flags);
326 /* Print, clear, and resize the batch. */
327 int alr_batch_print(struct alr_batch *alrb, FILE *file)
330 unsigned int new_hash_shift;
336 for (i = 0; i < (1 << alrb->alrb_hash_shift); i++) {
337 struct hlist_head *list = &alrb->alrb_hash[i];
338 struct hlist_node *node, *next;
339 struct alr_entry *alre;
341 hlist_for_each_entry_safe(alre, node, next, list,
342 alre_fid_hash_node.fhn_node) {
345 for (d = 0; d < 2; d++) {
348 if (alre->alre_count[d] == 0)
351 /* stdio stream error state is sticky. */
353 "%s "DFID" %lld %llu %llu %llu %llu %llu %c\n",
355 PFID(&alre->alre_fid_hash_node.fhn_fid),
356 (long long)alre->alre_time[d],
357 (unsigned long long)alre->alre_begin[d],
358 (unsigned long long)alre->alre_end[d],
359 (unsigned long long)alre->alre_size[d],
360 (unsigned long long)alre->alre_segment_count[d],
361 (unsigned long long)alre->alre_count[d],
362 (d == ALR_READ) ? 'r' : 'w');
372 /* Resize hash based on previous count. */
373 new_hash_shift = alrb->alrb_hash_shift;
375 while (new_hash_shift < ALR_BATCH_HASH_SHIFT_MAX &&
376 (1 << new_hash_shift) < alrb->alrb_count)
379 fid_hash_resize(&alrb->alrb_hash, &alrb->alrb_hash_shift,
382 alrb->alrb_count = 0;
387 struct alr_batch *alr_batch_create(unsigned int shift)
389 struct alr_batch *alrb;
393 shift = ALR_BATCH_HASH_SHIFT_DEFAULT;
395 alrb = calloc(1, sizeof(*alrb));
399 rc = fid_hash_init(&alrb->alrb_hash, &alrb->alrb_hash_shift, shift);
408 void alr_batch_destroy(struct alr_batch *alrb)
415 for (i = 0; i < (1 << alrb->alrb_hash_shift); i++) {
416 struct hlist_head *list = &alrb->alrb_hash[i];
417 struct hlist_node *node, *next;
418 struct alr_entry *alre;
420 hlist_for_each_entry_safe(alre, node, next, list, alre_fid_hash_node.fhn_node) {
426 free(alrb->alrb_hash);