4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/fld/fld_index.c
34 * Author: WangDi <wangdi@clusterfs.com>
35 * Author: Yury Umanets <umka@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_FLD
40 #include <libcfs/libcfs.h>
41 #include <linux/module.h>
42 #include <obd_support.h>
43 #include <dt_object.h>
44 #include <lustre_fid.h>
45 #include <lustre_fld.h>
46 #include "fld_internal.h"
48 static const char fld_index_name[] = "fld";
50 static const struct lu_seq_range IGIF_FLD_RANGE = {
51 .lsr_start = FID_SEQ_IGIF,
52 .lsr_end = FID_SEQ_IGIF_MAX + 1,
54 .lsr_flags = LU_SEQ_RANGE_MDT
57 static const struct lu_seq_range DOT_LUSTRE_FLD_RANGE = {
58 .lsr_start = FID_SEQ_DOT_LUSTRE,
59 .lsr_end = FID_SEQ_DOT_LUSTRE + 1,
61 .lsr_flags = LU_SEQ_RANGE_MDT
64 static const struct lu_seq_range ROOT_FLD_RANGE = {
65 .lsr_start = FID_SEQ_ROOT,
66 .lsr_end = FID_SEQ_ROOT + 1,
68 .lsr_flags = LU_SEQ_RANGE_MDT
71 static const struct dt_index_features fld_index_features = {
72 .dif_flags = DT_IND_UPDATE,
73 .dif_keysize_min = sizeof(u64),
74 .dif_keysize_max = sizeof(u64),
75 .dif_recsize_min = sizeof(struct lu_seq_range),
76 .dif_recsize_max = sizeof(struct lu_seq_range),
80 extern struct lu_context_key fld_thread_key;
82 int fld_declare_index_create(const struct lu_env *env,
83 struct lu_server_fld *fld,
84 const struct lu_seq_range *new_range,
87 struct lu_seq_range *tmp;
88 struct lu_seq_range *range;
89 struct fld_thread_info *info;
94 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
95 range = &info->fti_lrange;
96 tmp = &info->fti_irange;
97 memset(range, 0, sizeof(*range));
99 rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
101 /* In case of duplicate entry, the location must be same */
102 LASSERT((lu_seq_range_compare_loc(new_range, range) == 0));
103 GOTO(out, rc = -EEXIST);
107 CERROR("%s: lookup range "DRANGE" error: rc = %d\n",
108 fld->lsf_name, PRANGE(range), rc);
112 /* Check for merge case, since the fld entry can only be increamental,
113 * so we will only check whether it can be merged from the left. */
114 if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
115 lu_seq_range_compare_loc(new_range, range) == 0) {
116 range_cpu_to_be(tmp, range);
117 rc = dt_declare_delete(env, fld->lsf_obj,
118 (struct dt_key *)&tmp->lsr_start, th);
120 CERROR("%s: declare record "DRANGE" failed: rc = %d\n",
121 fld->lsf_name, PRANGE(range), rc);
125 tmp->lsr_start = range->lsr_start;
130 range_cpu_to_be(tmp, tmp);
131 rc = dt_declare_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
132 (struct dt_key *)&tmp->lsr_start, th);
138 * insert range in fld store.
140 * \param range range to be inserted
141 * \param th transaction for this operation as it could compound
147 * The whole fld index insertion is protected by seq->lss_mutex (see
148 * seq_server_alloc_super), i.e. only one thread will access fldb each
149 * time, so we do not need worry the fld file and cache will being
150 * changed between declare and create.
151 * Because the fld entry can only be increamental, so we will only check
152 * whether it can be merged from the left.
154 * Caller must hold fld->lsf_lock
156 int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
157 const struct lu_seq_range *new_range, struct thandle *th)
159 struct lu_seq_range *range;
160 struct lu_seq_range *tmp;
161 struct fld_thread_info *info;
164 struct fld_cache_entry *flde;
167 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
169 LASSERT(mutex_is_locked(&fld->lsf_lock));
171 range = &info->fti_lrange;
172 memset(range, 0, sizeof(*range));
173 tmp = &info->fti_irange;
174 rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
176 rc = rc == 0 ? -EEXIST : rc;
180 if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
181 lu_seq_range_compare_loc(new_range, range) == 0) {
182 range_cpu_to_be(tmp, range);
183 rc = dt_delete(env, fld->lsf_obj,
184 (struct dt_key *)&tmp->lsr_start, th);
188 tmp->lsr_start = range->lsr_start;
194 range_cpu_to_be(tmp, tmp);
195 rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
196 (struct dt_key *)&tmp->lsr_start, th, 1);
198 CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
199 fld->lsf_name, PRANGE(new_range), rc);
203 flde = fld_cache_entry_create(new_range);
205 GOTO(out, rc = PTR_ERR(flde));
207 write_lock(&fld->lsf_cache->fci_lock);
209 fld_cache_delete_nolock(fld->lsf_cache, new_range);
210 rc = fld_cache_insert_nolock(fld->lsf_cache, flde);
211 write_unlock(&fld->lsf_cache->fci_lock);
219 * lookup range for a seq passed. note here we only care about the start/end,
220 * caller should handle the attached location data (flags, index).
222 * \param seq seq for lookup.
223 * \param range result of lookup.
225 * \retval 0 found, \a range is the matched range;
226 * \retval -ENOENT not found, \a range is the left-side range;
227 * \retval -ve other error;
229 int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
230 u64 seq, struct lu_seq_range *range)
232 struct lu_seq_range *fld_rec;
233 struct fld_thread_info *info;
238 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
239 fld_rec = &info->fti_rec;
241 rc = fld_cache_lookup(fld->lsf_cache, seq, fld_rec);
244 if (lu_seq_range_within(range, seq))
250 CDEBUG(D_INFO, "%s: lookup seq = %#llx range : "DRANGE" rc = %d\n",
251 fld->lsf_name, seq, PRANGE(range), rc);
257 * insert entry in fld store.
259 * \param env relevant lu_env
260 * \param fld fld store
261 * \param range range to be inserted
266 * Caller must hold fld->lsf_lock
269 int fld_insert_entry(const struct lu_env *env,
270 struct lu_server_fld *fld,
271 const struct lu_seq_range *range)
277 LASSERT(mutex_is_locked(&fld->lsf_lock));
279 th = dt_trans_create(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev));
283 rc = fld_declare_index_create(env, fld, range, th);
290 rc = dt_trans_start_local(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev),
295 rc = fld_index_create(env, fld, range, th);
299 dt_trans_stop(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev), th);
302 EXPORT_SYMBOL(fld_insert_entry);
304 static int fld_insert_special_entries(const struct lu_env *env,
305 struct lu_server_fld *fld)
309 rc = fld_insert_entry(env, fld, &IGIF_FLD_RANGE);
313 rc = fld_insert_entry(env, fld, &DOT_LUSTRE_FLD_RANGE);
317 rc = fld_insert_entry(env, fld, &ROOT_FLD_RANGE);
322 int fld_index_init(const struct lu_env *env, struct lu_server_fld *fld,
323 struct dt_device *dt, int type)
325 struct dt_object *dt_obj = NULL;
327 struct lu_attr *attr = NULL;
328 struct lu_seq_range *range = NULL;
329 struct fld_thread_info *info;
330 struct dt_object_format dof;
332 const struct dt_it_ops *iops;
337 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
338 LASSERT(info != NULL);
340 lu_local_obj_fid(&fid, FLD_INDEX_OID);
345 memset(attr, 0, sizeof(*attr));
346 attr->la_valid = LA_MODE;
347 attr->la_mode = S_IFREG | 0666;
348 dof.dof_type = DFT_INDEX;
349 dof.u.dof_idx.di_feat = &fld_index_features;
351 dt_obj = dt_locate(env, dt, &fid);
352 if (IS_ERR(dt_obj)) {
353 rc = PTR_ERR(dt_obj);
358 LASSERT(dt_obj != NULL);
359 if (!dt_object_exists(dt_obj)) {
360 lu_object_put(env, &dt_obj->do_lu);
361 dt_obj = dt_find_or_create(env, dt, &fid, &dof, attr);
363 if (IS_ERR(dt_obj)) {
364 rc = PTR_ERR(dt_obj);
365 CERROR("%s: Can't find \"%s\" obj %d\n", fld->lsf_name,
372 fld->lsf_obj = dt_obj;
373 rc = dt_obj->do_ops->do_index_try(env, dt_obj, &fld_index_features);
375 CERROR("%s: File \"%s\" is not an index: rc = %d!\n",
376 fld->lsf_name, fld_index_name, rc);
380 range = &info->fti_rec;
381 /* Load fld entry to cache */
382 iops = &dt_obj->do_index_ops->dio_it;
383 it = iops->init(env, dt_obj, 0);
385 GOTO(out, rc = PTR_ERR(it));
387 rc = iops->load(env, it, 0);
389 GOTO(out_it_fini, rc);
392 /* Load FLD entry into server cache */
394 rc = iops->rec(env, it, (struct dt_rec *)range, 0);
396 GOTO(out_it_put, rc);
397 LASSERT(range != NULL);
398 range_be_to_cpu(range, range);
399 rc = fld_cache_insert(fld->lsf_cache, range);
401 GOTO(out_it_put, rc);
402 rc = iops->next(env, it);
408 rc = fld_name_to_index(fld->lsf_name, &index);
410 GOTO(out_it_put, rc);
414 if (index == 0 && type == LU_SEQ_RANGE_MDT) {
415 /* Note: fld_insert_entry will detect whether these
416 * special entries already exist inside FLDB */
417 mutex_lock(&fld->lsf_lock);
418 rc = fld_insert_special_entries(env, fld);
419 mutex_unlock(&fld->lsf_lock);
421 CERROR("%s: insert special entries failed!: rc = %d\n",
423 GOTO(out_it_put, rc);
436 lu_object_put(env, &dt_obj->do_lu);
442 void fld_index_fini(const struct lu_env *env, struct lu_server_fld *fld)
445 if (fld->lsf_obj != NULL) {
446 if (!IS_ERR(fld->lsf_obj))
447 lu_object_put(env, &fld->lsf_obj->do_lu);
453 int fld_server_read(const struct lu_env *env, struct lu_server_fld *fld,
454 struct lu_seq_range *range, void *data, int data_len)
456 struct lu_seq_range_array *lsra = data;
457 struct fld_thread_info *info;
458 struct dt_object *dt_obj = fld->lsf_obj;
459 struct lu_seq_range *entry;
461 const struct dt_it_ops *iops;
466 lsra->lsra_count = 0;
467 iops = &dt_obj->do_index_ops->dio_it;
468 it = iops->init(env, dt_obj, 0);
472 rc = iops->load(env, it, range->lsr_end);
474 GOTO(out_it_fini, rc);
476 info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
477 LASSERT(info != NULL);
478 entry = &info->fti_rec;
480 rc = iops->rec(env, it, (struct dt_rec *)entry, 0);
482 GOTO(out_it_put, rc);
484 if (offsetof(typeof(*lsra), lsra_lsr[lsra->lsra_count + 1]) >
486 GOTO(out, rc = -EAGAIN);
488 range_be_to_cpu(entry, entry);
489 if (entry->lsr_index == range->lsr_index &&
490 entry->lsr_flags == range->lsr_flags &&
491 entry->lsr_start > range->lsr_start) {
492 lsra->lsra_lsr[lsra->lsra_count] = *entry;
496 rc = iops->next(env, it);
501 range_array_cpu_to_le(lsra, lsra);