4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/fld/fld_index.c
34 * Author: WangDi <wangdi@clusterfs.com>
35 * Author: Yury Umanets <umka@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_FLD
40 #include <libcfs/libcfs.h>
41 #include <linux/module.h>
42 #include <obd_support.h>
43 #include <dt_object.h>
44 #include <lustre_fid.h>
45 #include <lustre_fld.h>
46 #include "fld_internal.h"
48 static const char fld_index_name[] = "fld";
50 static const struct lu_seq_range IGIF_FLD_RANGE = {
51 .lsr_start = FID_SEQ_IGIF,
52 .lsr_end = FID_SEQ_IGIF_MAX + 1,
54 .lsr_flags = LU_SEQ_RANGE_MDT
57 static const struct lu_seq_range DOT_LUSTRE_FLD_RANGE = {
58 .lsr_start = FID_SEQ_DOT_LUSTRE,
59 .lsr_end = FID_SEQ_DOT_LUSTRE + 1,
61 .lsr_flags = LU_SEQ_RANGE_MDT
64 static const struct lu_seq_range ROOT_FLD_RANGE = {
65 .lsr_start = FID_SEQ_ROOT,
66 .lsr_end = FID_SEQ_ROOT + 1,
68 .lsr_flags = LU_SEQ_RANGE_MDT
71 static const struct dt_index_features fld_index_features = {
72 .dif_flags = DT_IND_UPDATE,
73 .dif_keysize_min = sizeof(u64),
74 .dif_keysize_max = sizeof(u64),
75 .dif_recsize_min = sizeof(struct lu_seq_range),
76 .dif_recsize_max = sizeof(struct lu_seq_range),
80 extern struct lu_context_key fld_thread_key;
82 int fld_declare_index_create(const struct lu_env *env,
83 struct lu_server_fld *fld,
84 const struct lu_seq_range *new_range,
87 struct lu_seq_range *tmp;
88 struct lu_seq_range *range;
89 struct fld_thread_info *info;
94 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
95 range = &info->fti_lrange;
96 tmp = &info->fti_irange;
97 memset(range, 0, sizeof(*range));
99 rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
101 /* In case of duplicate entry, the location must be same */
102 LASSERT((lu_seq_range_compare_loc(new_range, range) == 0));
103 GOTO(out, rc = -EEXIST);
107 CERROR("%s: lookup range "DRANGE" error: rc = %d\n",
108 fld->lsf_name, PRANGE(range), rc);
112 /* Check for merge case, since the fld entry can only be increamental,
113 * so we will only check whether it can be merged from the left. */
114 if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
115 lu_seq_range_compare_loc(new_range, range) == 0) {
116 range_cpu_to_be(tmp, range);
117 rc = dt_declare_delete(env, fld->lsf_obj,
118 (struct dt_key *)&tmp->lsr_start, th);
120 CERROR("%s: declare record "DRANGE" failed: rc = %d\n",
121 fld->lsf_name, PRANGE(range), rc);
125 tmp->lsr_start = range->lsr_start;
130 range_cpu_to_be(tmp, tmp);
131 rc = dt_declare_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
132 (struct dt_key *)&tmp->lsr_start, th);
138 * insert range in fld store.
140 * \param range range to be inserted
141 * \param th transaction for this operation as it could compound
147 * The whole fld index insertion is protected by seq->lss_mutex (see
148 * seq_server_alloc_super), i.e. only one thread will access fldb each
149 * time, so we do not need worry the fld file and cache will being
150 * changed between declare and create.
151 * Because the fld entry can only be increamental, so we will only check
152 * whether it can be merged from the left.
154 * Caller must hold fld->lsf_lock
156 int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
157 const struct lu_seq_range *new_range, struct thandle *th)
159 struct lu_seq_range *range;
160 struct lu_seq_range *tmp;
161 struct fld_thread_info *info;
164 struct fld_cache_entry *flde;
167 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
169 LASSERT(mutex_is_locked(&fld->lsf_lock));
171 range = &info->fti_lrange;
172 memset(range, 0, sizeof(*range));
173 tmp = &info->fti_irange;
174 rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
176 rc = rc == 0 ? -EEXIST : rc;
180 if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
181 lu_seq_range_compare_loc(new_range, range) == 0) {
182 range_cpu_to_be(tmp, range);
183 rc = dt_delete(env, fld->lsf_obj,
184 (struct dt_key *)&tmp->lsr_start, th);
188 tmp->lsr_start = range->lsr_start;
194 range_cpu_to_be(tmp, tmp);
195 rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
196 (struct dt_key *)&tmp->lsr_start, th, 1);
198 CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
199 fld->lsf_name, PRANGE(new_range), rc);
203 flde = fld_cache_entry_create(new_range);
205 GOTO(out, rc = PTR_ERR(flde));
207 write_lock(&fld->lsf_cache->fci_lock);
209 fld_cache_delete_nolock(fld->lsf_cache, new_range);
210 rc = fld_cache_insert_nolock(fld->lsf_cache, flde);
211 write_unlock(&fld->lsf_cache->fci_lock);
219 * lookup range for a seq passed. note here we only care about the start/end,
220 * caller should handle the attached location data (flags, index).
222 * \param seq seq for lookup.
223 * \param range result of lookup.
225 * \retval 0 found, \a range is the matched range;
226 * \retval -ENOENT not found, \a range is the left-side range;
227 * \retval -ve other error;
229 int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
230 u64 seq, struct lu_seq_range *range)
232 struct lu_seq_range *fld_rec;
233 struct fld_thread_info *info;
238 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
239 fld_rec = &info->fti_rec;
241 rc = fld_cache_lookup(fld->lsf_cache, seq, fld_rec);
244 if (lu_seq_range_within(range, seq))
250 CDEBUG(D_INFO, "%s: lookup seq = %#llx range : "DRANGE" rc = %d\n",
251 fld->lsf_name, seq, PRANGE(range), rc);
257 * insert entry in fld store.
259 * \param env relevant lu_env
260 * \param fld fld store
261 * \param range range to be inserted
266 * Caller must hold fld->lsf_lock
269 int fld_insert_entry(const struct lu_env *env,
270 struct lu_server_fld *fld,
271 const struct lu_seq_range *range)
274 struct dt_device *dt = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
278 LASSERT(mutex_is_locked(&fld->lsf_lock));
283 th = dt_trans_create(env, dt);
287 rc = fld_declare_index_create(env, fld, range, th);
294 rc = dt_trans_start_local(env, dt, th);
298 rc = fld_index_create(env, fld, range, th);
302 dt_trans_stop(env, dt, th);
305 EXPORT_SYMBOL(fld_insert_entry);
307 static int fld_insert_special_entries(const struct lu_env *env,
308 struct lu_server_fld *fld)
312 rc = fld_insert_entry(env, fld, &IGIF_FLD_RANGE);
316 rc = fld_insert_entry(env, fld, &DOT_LUSTRE_FLD_RANGE);
320 rc = fld_insert_entry(env, fld, &ROOT_FLD_RANGE);
325 int fld_index_init(const struct lu_env *env, struct lu_server_fld *fld,
326 struct dt_device *dt, int type)
328 struct dt_object *dt_obj = NULL;
330 struct lu_attr *attr = NULL;
331 struct lu_seq_range *range = NULL;
332 struct fld_thread_info *info;
333 struct dt_object_format dof;
335 const struct dt_it_ops *iops;
341 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
342 LASSERT(info != NULL);
344 lu_local_obj_fid(&fid, FLD_INDEX_OID);
349 memset(attr, 0, sizeof(*attr));
350 attr->la_valid = LA_MODE;
351 attr->la_mode = S_IFREG | 0666;
352 dof.dof_type = DFT_INDEX;
353 dof.u.dof_idx.di_feat = &fld_index_features;
355 dt_obj = dt_locate(env, dt, &fid);
356 if (IS_ERR(dt_obj)) {
357 rc = PTR_ERR(dt_obj);
362 LASSERT(dt_obj != NULL);
363 if (!dt_object_exists(dt_obj)) {
364 dt_object_put(env, dt_obj);
365 dt_obj = dt_find_or_create(env, dt, &fid, &dof, attr);
367 if (IS_ERR(dt_obj)) {
368 rc = PTR_ERR(dt_obj);
369 CERROR("%s: Can't find \"%s\" obj %d\n", fld->lsf_name,
376 fld->lsf_obj = dt_obj;
377 rc = dt_obj->do_ops->do_index_try(env, dt_obj, &fld_index_features);
379 CERROR("%s: File \"%s\" is not an index: rc = %d!\n",
380 fld->lsf_name, fld_index_name, rc);
384 range = &info->fti_rec;
385 /* Load fld entry to cache */
386 iops = &dt_obj->do_index_ops->dio_it;
387 it = iops->init(env, dt_obj, 0);
389 GOTO(out, rc = PTR_ERR(it));
391 rc = iops->load(env, it, 0);
395 rc = iops->next(env, it);
398 GOTO(out_it_fini, rc);
401 rc = iops->rec(env, it, (struct dt_rec *)range, 0);
403 GOTO(out_it_put, rc);
405 range_be_to_cpu(range, range);
407 /* Newly created ldiskfs IAM indexes may include a
408 * zeroed-out key and record. Ignore it here. */
409 if (range->lsr_start < range->lsr_end) {
410 rc = fld_cache_insert(fld->lsf_cache, range);
412 GOTO(out_it_put, rc);
417 rc = iops->next(env, it);
419 GOTO(out_it_fini, rc);
422 if (range_count == 0)
425 rc = fld_name_to_index(fld->lsf_name, &index);
427 GOTO(out_it_put, rc);
431 if (index == 0 && type == LU_SEQ_RANGE_MDT) {
432 /* Note: fld_insert_entry will detect whether these
433 * special entries already exist inside FLDB */
434 mutex_lock(&fld->lsf_lock);
435 rc = fld_insert_special_entries(env, fld);
436 mutex_unlock(&fld->lsf_lock);
438 CERROR("%s: insert special entries failed!: rc = %d\n",
440 GOTO(out_it_put, rc);
453 dt_object_put(env, dt_obj);
459 void fld_index_fini(const struct lu_env *env, struct lu_server_fld *fld)
462 if (fld->lsf_obj != NULL) {
463 if (!IS_ERR(fld->lsf_obj))
464 dt_object_put(env, fld->lsf_obj);
470 int fld_server_read(const struct lu_env *env, struct lu_server_fld *fld,
471 struct lu_seq_range *range, void *data, int data_len)
473 struct lu_seq_range_array *lsra = data;
474 struct fld_thread_info *info;
475 struct dt_object *dt_obj = fld->lsf_obj;
476 struct lu_seq_range *entry;
478 const struct dt_it_ops *iops;
483 lsra->lsra_count = 0;
484 iops = &dt_obj->do_index_ops->dio_it;
485 it = iops->init(env, dt_obj, 0);
489 rc = iops->load(env, it, range->lsr_end);
491 GOTO(out_it_fini, rc);
493 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
494 LASSERT(info != NULL);
495 entry = &info->fti_rec;
497 rc = iops->rec(env, it, (struct dt_rec *)entry, 0);
499 GOTO(out_it_put, rc);
501 if (offsetof(typeof(*lsra), lsra_lsr[lsra->lsra_count + 1]) >
503 GOTO(out, rc = -EAGAIN);
505 range_be_to_cpu(entry, entry);
506 if (entry->lsr_index == range->lsr_index &&
507 entry->lsr_flags == range->lsr_flags &&
508 entry->lsr_start > range->lsr_start) {
509 lsra->lsra_lsr[lsra->lsra_count] = *entry;
513 rc = iops->next(env, it);
518 range_array_cpu_to_le(lsra, lsra);