1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # define EXPORT_SYMTAB
25 #define DEBUG_SUBSYSTEM S_LMV
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/pagemap.h>
32 #include <asm/div64.h>
33 #include <linux/seq_file.h>
35 #include <liblustre.h>
38 #include <linux/obd_support.h>
39 #include <linux/lustre_lib.h>
40 #include <linux/lustre_net.h>
41 #include <linux/lustre_idl.h>
42 #include <linux/lustre_dlm.h>
43 #include <linux/lustre_mds.h>
44 #include <linux/obd_class.h>
45 #include <linux/obd_ost.h>
46 #include <linux/lprocfs_status.h>
47 #include <linux/lustre_fsfilt.h>
48 #include <linux/obd_lmv.h>
49 #include "lmv_internal.h"
52 extern kmem_cache_t *obj_cache;
53 extern atomic_t obj_cache_count;
55 /* object list and its guard. */
56 static LIST_HEAD(obj_list);
57 static spinlock_t obj_list_lock = SPIN_LOCK_UNLOCKED;
59 /* creates new obj on passed @id and @mea. */
61 lmv_alloc_obj(struct obd_device *obd,
67 unsigned int obj_size;
68 struct lmv_obd *lmv = &obd->u.lmv;
70 LASSERT(mea->mea_magic == MEA_MAGIC_LAST_CHAR
71 || mea->mea_magic == MEA_MAGIC_ALL_CHARS);
73 OBD_SLAB_ALLOC(obj, obj_cache, GFP_NOFS,
78 atomic_inc(&obj_cache_count);
83 obj->hashtype = mea->mea_magic;
85 init_MUTEX(&obj->guard);
86 atomic_set(&obj->count, 0);
87 obj->objcount = mea->mea_count;
89 obj_size = sizeof(struct lmv_inode) *
90 lmv->desc.ld_tgt_count;
92 OBD_ALLOC(obj->objs, obj_size);
96 memset(obj->objs, 0, obj_size);
99 for (i = 0; i < mea->mea_count; i++) {
100 CDEBUG(D_OTHER, "subobj "DLID4"\n",
101 OLID4(&mea->mea_ids[i]));
102 obj->objs[i].id = mea->mea_ids[i];
103 LASSERT(id_ino(&obj->objs[i].id));
104 LASSERT(id_fid(&obj->objs[i].id));
110 OBD_FREE(obj, sizeof(*obj));
114 /* destroy passed @obj. */
116 lmv_free_obj(struct lmv_obj *obj)
118 unsigned int obj_size;
119 struct lmv_obd *lmv = &obj->obd->u.lmv;
121 LASSERT(!atomic_read(&obj->count));
123 obj_size = sizeof(struct lmv_inode) *
124 lmv->desc.ld_tgt_count;
126 OBD_FREE(obj->objs, obj_size);
127 OBD_SLAB_FREE(obj, obj_cache, sizeof(*obj));
128 atomic_dec(&obj_cache_count);
132 __add_obj(struct lmv_obj *obj)
134 atomic_inc(&obj->count);
135 list_add(&obj->list, &obj_list);
139 lmv_add_obj(struct lmv_obj *obj)
141 spin_lock(&obj_list_lock);
143 spin_unlock(&obj_list_lock);
147 __del_obj(struct lmv_obj *obj)
149 list_del(&obj->list);
154 lmv_del_obj(struct lmv_obj *obj)
156 spin_lock(&obj_list_lock);
158 spin_unlock(&obj_list_lock);
161 static struct lmv_obj *
162 __get_obj(struct lmv_obj *obj)
164 LASSERT(obj != NULL);
165 atomic_inc(&obj->count);
170 lmv_get_obj(struct lmv_obj *obj)
172 spin_lock(&obj_list_lock);
174 spin_unlock(&obj_list_lock);
179 __put_obj(struct lmv_obj *obj)
183 if (atomic_dec_and_test(&obj->count)) {
184 struct lustre_id *id = &obj->id;
185 CDEBUG(D_OTHER, "last reference to "DLID4" - "
186 "destroying\n", OLID4(id));
192 lmv_put_obj(struct lmv_obj *obj)
194 spin_lock(&obj_list_lock);
196 spin_unlock(&obj_list_lock);
199 static struct lmv_obj *
200 __grab_obj(struct obd_device *obd, struct lustre_id *id)
203 struct list_head *cur;
205 list_for_each(cur, &obj_list) {
206 obj = list_entry(cur, struct lmv_obj, list);
208 /* check if object is in progress of destroying. If so - skip
210 if (obj->state & O_FREEING)
214 * we should make sure, that we have found object belong to
215 * passed obd. It is possible that, object manager will have two
216 * objects with the same fid belong to different obds, if client
217 * and mds runs on the same host. May be it is good idea to have
218 * objects list assosiated with obd.
223 /* check if this is what we're looking for. */
224 if (id_equal_fid(&obj->id, id))
225 return __get_obj(obj);
232 lmv_grab_obj(struct obd_device *obd, struct lustre_id *id)
237 spin_lock(&obj_list_lock);
238 obj = __grab_obj(obd, id);
239 spin_unlock(&obj_list_lock);
244 /* looks in objects list for an object that matches passed @id. If it is not
245 * found -- creates it using passed @mea and puts onto list. */
246 static struct lmv_obj *
247 __create_obj(struct obd_device *obd, struct lustre_id *id, struct mea *mea)
249 struct lmv_obj *new, *obj;
252 obj = lmv_grab_obj(obd, id);
256 /* no such object yet, allocate and initialize it. */
257 new = lmv_alloc_obj(obd, id, mea);
261 /* check if someone create it already while we were dealing with
262 * allocating @obj. */
263 spin_lock(&obj_list_lock);
264 obj = __grab_obj(obd, id);
266 /* someone created it already - put @obj and getting out. */
268 spin_unlock(&obj_list_lock);
275 spin_unlock(&obj_list_lock);
277 CDEBUG(D_OTHER, "new obj in lmv cache: "DLID4"\n",
284 /* creates object from passed @id and @mea. If @mea is NULL, it will be
285 * obtained from correct MDT and used for constructing the object. */
287 lmv_create_obj(struct obd_export *exp, struct lustre_id *id, struct mea *mea)
289 struct obd_device *obd = exp->exp_obd;
290 struct lmv_obd *lmv = &obd->u.lmv;
291 struct ptlrpc_request *req = NULL;
297 CDEBUG(D_OTHER, "get mea for "DLID4" and create lmv obj\n",
305 CDEBUG(D_OTHER, "mea isn't passed in, get it now\n");
306 mealen = MEA_SIZE_LMV(lmv);
308 /* time to update mea of parent id */
310 valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA | OBD_MD_MEA;
312 rc = md_getattr(lmv->tgts[id_group(id)].ltd_exp,
313 id, valid, NULL, NULL, 0, mealen, NULL, &req);
315 CERROR("md_getattr() failed, error %d\n", rc);
316 GOTO(cleanup, obj = ERR_PTR(rc));
319 rc = mdc_req2lustre_md(exp, req, 0, NULL, &md);
321 CERROR("mdc_req2lustre_md() failed, error %d\n", rc);
322 GOTO(cleanup, obj = ERR_PTR(rc));
326 GOTO(cleanup, obj = ERR_PTR(-ENODATA));
331 /* got mea, now create obj for it. */
332 obj = __create_obj(obd, id, mea);
334 CERROR("Can't create new object "DLID4"\n",
336 GOTO(cleanup, obj = ERR_PTR(-ENOMEM));
340 obd_free_memmd(exp, (struct lov_stripe_md **)&md.mea);
345 ptlrpc_req_finished(req);
350 * looks for object with @id and orders to destroy it. It is possible the object
351 * will not be destroyed right now, because it is still using by someone. In
352 * this case it will be marked as "freeing" and will not be accessible anymore
353 * for subsequent callers of lmv_grab_obj().
356 lmv_delete_obj(struct obd_export *exp, struct lustre_id *id)
358 struct obd_device *obd = exp->exp_obd;
363 spin_lock(&obj_list_lock);
364 obj = __grab_obj(obd, id);
366 obj->state |= O_FREEING;
371 spin_unlock(&obj_list_lock);
377 lmv_setup_mgr(struct obd_device *obd)
380 LASSERT(obd != NULL);
382 CDEBUG(D_INFO, "LMV object manager setup (%s)\n",
389 lmv_cleanup_mgr(struct obd_device *obd)
391 struct list_head *cur, *tmp;
395 CDEBUG(D_INFO, "LMV object manager cleanup (%s)\n",
398 spin_lock(&obj_list_lock);
399 list_for_each_safe(cur, tmp, &obj_list) {
400 obj = list_entry(cur, struct lmv_obj, list);
405 obj->state |= O_FREEING;
406 if (atomic_read(&obj->count) > 1) {
407 CERROR("obj "DLID4" has count > 1 (%d)\n",
408 OLID4(&obj->id), atomic_read(&obj->count));
412 spin_unlock(&obj_list_lock);