1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # define EXPORT_SYMTAB
25 #define DEBUG_SUBSYSTEM S_LMV
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/pagemap.h>
32 #include <asm/div64.h>
33 #include <linux/seq_file.h>
35 #include <liblustre.h>
38 #include <linux/obd_support.h>
39 #include <linux/lustre_lib.h>
40 #include <linux/lustre_net.h>
41 #include <linux/lustre_idl.h>
42 #include <linux/lustre_dlm.h>
43 //#include <linux/lustre_mds.h>
44 #include <linux/obd_class.h>
45 //#include <linux/obd_ost.h>
46 #include <linux/lprocfs_status.h>
47 //#include <linux/lustre_fsfilt.h>
48 #include "lmv_internal.h"
51 extern kmem_cache_t *obj_cache;
52 extern atomic_t obj_cache_count;
54 /* object list and its guard. */
55 static LIST_HEAD(obj_list);
56 static spinlock_t obj_list_lock = SPIN_LOCK_UNLOCKED;
58 /* creates new obj on passed @id and @mea. */
60 lmv_alloc_obj(struct obd_device *obd,
66 unsigned int obj_size;
67 struct lmv_obd *lmv = &obd->u.lmv;
69 LASSERT(mea->mea_magic == MEA_MAGIC_LAST_CHAR
70 || mea->mea_magic == MEA_MAGIC_ALL_CHARS);
72 OBD_SLAB_ALLOC(obj, obj_cache, GFP_NOFS,
77 atomic_inc(&obj_cache_count);
82 obj->hashtype = mea->mea_magic;
84 init_MUTEX(&obj->guard);
85 atomic_set(&obj->count, 0);
86 obj->objcount = mea->mea_count;
88 obj_size = sizeof(struct lmv_inode) *
89 lmv->desc.ld_tgt_count;
91 OBD_ALLOC(obj->objs, obj_size);
95 memset(obj->objs, 0, obj_size);
98 for (i = 0; i < mea->mea_count; i++) {
99 CDEBUG(D_OTHER, "subobj "DLID4"\n",
100 OLID4(&mea->mea_ids[i]));
101 obj->objs[i].id = mea->mea_ids[i];
102 LASSERT(id_ino(&obj->objs[i].id));
103 LASSERT(id_fid(&obj->objs[i].id));
109 OBD_FREE(obj, sizeof(*obj));
113 /* destroy passed @obj. */
115 lmv_free_obj(struct lmv_obj *obj)
117 unsigned int obj_size;
118 struct lmv_obd *lmv = &obj->obd->u.lmv;
120 LASSERT(!atomic_read(&obj->count));
122 obj_size = sizeof(struct lmv_inode) *
123 lmv->desc.ld_tgt_count;
125 OBD_FREE(obj->objs, obj_size);
126 OBD_SLAB_FREE(obj, obj_cache, sizeof(*obj));
127 atomic_dec(&obj_cache_count);
131 __add_obj(struct lmv_obj *obj)
133 atomic_inc(&obj->count);
134 list_add(&obj->list, &obj_list);
138 lmv_add_obj(struct lmv_obj *obj)
140 spin_lock(&obj_list_lock);
142 spin_unlock(&obj_list_lock);
146 __del_obj(struct lmv_obj *obj)
148 list_del(&obj->list);
153 lmv_del_obj(struct lmv_obj *obj)
155 spin_lock(&obj_list_lock);
157 spin_unlock(&obj_list_lock);
160 static struct lmv_obj *
161 __get_obj(struct lmv_obj *obj)
163 LASSERT(obj != NULL);
164 atomic_inc(&obj->count);
169 lmv_get_obj(struct lmv_obj *obj)
171 spin_lock(&obj_list_lock);
173 spin_unlock(&obj_list_lock);
178 __put_obj(struct lmv_obj *obj)
182 if (atomic_dec_and_test(&obj->count)) {
183 struct lustre_id *id = &obj->id;
184 CDEBUG(D_OTHER, "last reference to "DLID4" - "
185 "destroying\n", OLID4(id));
191 lmv_put_obj(struct lmv_obj *obj)
193 spin_lock(&obj_list_lock);
195 spin_unlock(&obj_list_lock);
198 static struct lmv_obj *
199 __grab_obj(struct obd_device *obd, struct lustre_id *id)
202 struct list_head *cur;
204 list_for_each(cur, &obj_list) {
205 obj = list_entry(cur, struct lmv_obj, list);
207 /* check if object is in progress of destroying. If so - skip
209 if (obj->state & O_FREEING)
213 * we should make sure, that we have found object belong to
214 * passed obd. It is possible that, object manager will have two
215 * objects with the same fid belong to different obds, if client
216 * and mds runs on the same host. May be it is good idea to have
217 * objects list assosiated with obd.
222 /* check if this is what we're looking for. */
223 if (id_equal_fid(&obj->id, id))
224 return __get_obj(obj);
231 lmv_grab_obj(struct obd_device *obd, struct lustre_id *id)
236 spin_lock(&obj_list_lock);
237 obj = __grab_obj(obd, id);
238 spin_unlock(&obj_list_lock);
243 /* looks in objects list for an object that matches passed @id. If it is not
244 * found -- creates it using passed @mea and puts onto list. */
245 static struct lmv_obj *
246 __create_obj(struct obd_device *obd, struct lustre_id *id, struct mea *mea)
248 struct lmv_obj *new, *obj;
251 obj = lmv_grab_obj(obd, id);
255 /* no such object yet, allocate and initialize it. */
256 new = lmv_alloc_obj(obd, id, mea);
260 /* check if someone create it already while we were dealing with
261 * allocating @obj. */
262 spin_lock(&obj_list_lock);
263 obj = __grab_obj(obd, id);
265 /* someone created it already - put @obj and getting out. */
267 spin_unlock(&obj_list_lock);
274 spin_unlock(&obj_list_lock);
276 CDEBUG(D_OTHER, "new obj in lmv cache: "DLID4"\n",
283 /* creates object from passed @id and @mea. If @mea is NULL, it will be
284 * obtained from correct MDT and used for constructing the object. */
286 lmv_create_obj(struct obd_export *exp, struct lustre_id *id, struct mea *mea)
288 struct obd_device *obd = exp->exp_obd;
289 struct lmv_obd *lmv = &obd->u.lmv;
290 struct ptlrpc_request *req = NULL;
296 CDEBUG(D_OTHER, "get mea for "DLID4" and create lmv obj\n",
304 CDEBUG(D_OTHER, "mea isn't passed in, get it now\n");
305 mealen = MEA_SIZE_LMV(lmv);
307 /* time to update mea of parent id */
309 valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA | OBD_MD_MEA;
311 rc = md_getattr(lmv->tgts[id_group(id)].ltd_exp,
312 id, valid, NULL, NULL, 0, mealen, NULL, &req);
314 CERROR("md_getattr() failed, error %d\n", rc);
315 GOTO(cleanup, obj = ERR_PTR(rc));
318 rc = mdc_req2lustre_md(exp, req, 0, NULL, &md);
320 CERROR("mdc_req2lustre_md() failed, error %d\n", rc);
321 GOTO(cleanup, obj = ERR_PTR(rc));
325 GOTO(cleanup, obj = ERR_PTR(-ENODATA));
330 /* got mea, now create obj for it. */
331 obj = __create_obj(obd, id, mea);
333 CERROR("Can't create new object "DLID4"\n",
335 GOTO(cleanup, obj = ERR_PTR(-ENOMEM));
339 obd_free_memmd(exp, (struct lov_stripe_md **)&md.mea);
344 ptlrpc_req_finished(req);
349 * looks for object with @id and orders to destroy it. It is possible the object
350 * will not be destroyed right now, because it is still using by someone. In
351 * this case it will be marked as "freeing" and will not be accessible anymore
352 * for subsequent callers of lmv_grab_obj().
355 lmv_delete_obj(struct obd_export *exp, struct lustre_id *id)
357 struct obd_device *obd = exp->exp_obd;
362 spin_lock(&obj_list_lock);
363 obj = __grab_obj(obd, id);
365 obj->state |= O_FREEING;
370 spin_unlock(&obj_list_lock);
376 lmv_setup_mgr(struct obd_device *obd)
379 LASSERT(obd != NULL);
381 CDEBUG(D_INFO, "LMV object manager setup (%s)\n",
388 lmv_cleanup_mgr(struct obd_device *obd)
390 struct list_head *cur, *tmp;
394 CDEBUG(D_INFO, "LMV object manager cleanup (%s)\n",
397 spin_lock(&obj_list_lock);
398 list_for_each_safe(cur, tmp, &obj_list) {
399 obj = list_entry(cur, struct lmv_obj, list);
404 obj->state |= O_FREEING;
405 if (atomic_read(&obj->count) > 1) {
406 CERROR("obj "DLID4" has count > 1 (%d)\n",
407 OLID4(&obj->id), atomic_read(&obj->count));
411 spin_unlock(&obj_list_lock);