1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # define EXPORT_SYMTAB
25 #define DEBUG_SUBSYSTEM S_LMV
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/pagemap.h>
32 #include <asm/div64.h>
33 #include <linux/seq_file.h>
35 #include <liblustre.h>
38 #include <linux/obd_support.h>
39 #include <linux/lustre_lib.h>
40 #include <linux/lustre_net.h>
41 #include <linux/lustre_idl.h>
42 #include <linux/lustre_dlm.h>
43 #include <linux/lustre_mds.h>
44 #include <linux/obd_class.h>
45 #include <linux/obd_ost.h>
46 #include <linux/lprocfs_status.h>
47 #include <linux/lustre_fsfilt.h>
48 #include <linux/obd_lmv.h>
49 #include "lmv_internal.h"
52 extern kmem_cache_t *obj_cache;
53 extern atomic_t obj_cache_count;
55 /* object list and its guard. */
56 static LIST_HEAD(obj_list);
57 static spinlock_t obj_list_lock = SPIN_LOCK_UNLOCKED;
59 /* creates new obj on passed @id and @mea. */
61 lmv_alloc_obj(struct obd_device *obd,
67 unsigned int obj_size;
68 struct lmv_obd *lmv = &obd->u.lmv;
70 LASSERT(mea->mea_magic == MEA_MAGIC_LAST_CHAR
71 || mea->mea_magic == MEA_MAGIC_ALL_CHARS);
73 OBD_SLAB_ALLOC(obj, obj_cache, GFP_NOFS, sizeof(*obj));
77 atomic_inc(&obj_cache_count);
82 obj->hashtype = mea->mea_magic;
84 init_MUTEX(&obj->guard);
85 atomic_set(&obj->count, 0);
86 obj->objcount = mea->mea_count;
88 obj_size = sizeof(struct lmv_inode) *
89 lmv->desc.ld_tgt_count;
91 OBD_ALLOC(obj->objs, obj_size);
95 memset(obj->objs, 0, obj_size);
98 for (i = 0; i < mea->mea_count; i++) {
99 CDEBUG(D_OTHER, "subobj "DLID4"\n",
100 OLID4(&mea->mea_ids[i]));
101 obj->objs[i].id = mea->mea_ids[i];
102 LASSERT(id_ino(&obj->objs[i].id));
103 LASSERT(id_fid(&obj->objs[i].id));
109 OBD_FREE(obj, sizeof(*obj));
113 /* destroy passed @obj. */
115 lmv_free_obj(struct lmv_obj *obj)
117 unsigned int obj_size;
118 struct lmv_obd *lmv = &obj->obd->u.lmv;
120 LASSERT(!atomic_read(&obj->count));
122 obj_size = sizeof(struct lmv_inode) *
123 lmv->desc.ld_tgt_count;
125 OBD_FREE(obj->objs, obj_size);
126 OBD_SLAB_FREE(obj, obj_cache, sizeof(*obj));
127 atomic_dec(&obj_cache_count);
131 __add_obj(struct lmv_obj *obj)
133 atomic_inc(&obj->count);
134 list_add(&obj->list, &obj_list);
138 lmv_add_obj(struct lmv_obj *obj)
140 spin_lock(&obj_list_lock);
142 spin_unlock(&obj_list_lock);
146 __del_obj(struct lmv_obj *obj)
148 list_del(&obj->list);
153 lmv_del_obj(struct lmv_obj *obj)
155 spin_lock(&obj_list_lock);
157 spin_unlock(&obj_list_lock);
160 static struct lmv_obj *
161 __get_obj(struct lmv_obj *obj)
163 LASSERT(obj != NULL);
164 atomic_inc(&obj->count);
169 lmv_get_obj(struct lmv_obj *obj)
171 spin_lock(&obj_list_lock);
173 spin_unlock(&obj_list_lock);
178 __put_obj(struct lmv_obj *obj)
182 if (atomic_dec_and_test(&obj->count)) {
183 struct lustre_id *id = &obj->id;
184 CDEBUG(D_OTHER, "last reference to "DLID4" - "
185 "destroying\n", OLID4(id));
191 lmv_put_obj(struct lmv_obj *obj)
193 spin_lock(&obj_list_lock);
195 spin_unlock(&obj_list_lock);
198 static struct lmv_obj *
199 __grab_obj(struct obd_device *obd, struct lustre_id *id)
202 struct list_head *cur;
204 list_for_each(cur, &obj_list) {
205 obj = list_entry(cur, struct lmv_obj, list);
207 /* check if object is in progress of destroying. If so - skip
209 if (obj->state & O_FREEING)
212 /* check if this is what we're looking for. */
213 if (id_equal_fid(&obj->id, id))
214 return __get_obj(obj);
221 lmv_grab_obj(struct obd_device *obd, struct lustre_id *id)
226 spin_lock(&obj_list_lock);
227 obj = __grab_obj(obd, id);
228 spin_unlock(&obj_list_lock);
233 /* looks in objects list for an object that matches passed @id. If it is not
234 * found -- creates it using passed @mea and puts onto list. */
235 static struct lmv_obj *
236 __create_obj(struct obd_device *obd, struct lustre_id *id, struct mea *mea)
238 struct lmv_obj *new, *obj;
241 obj = lmv_grab_obj(obd, id);
245 /* no such object yet, allocate and initialize it. */
246 new = lmv_alloc_obj(obd, id, mea);
250 /* check if someone create it already while we were dealing with
251 * allocating @obj. */
252 spin_lock(&obj_list_lock);
253 obj = __grab_obj(obd, id);
255 /* someone created it already - put @obj and getting out. */
257 spin_unlock(&obj_list_lock);
264 spin_unlock(&obj_list_lock);
266 CDEBUG(D_OTHER, "new obj in lmv cache: "DLID4"\n",
273 /* creates object from passed @id and @mea. If @mea is NULL, it will be
274 * obtained from correct MDT and used for constructing the object. */
276 lmv_create_obj(struct obd_export *exp, struct lustre_id *id, struct mea *mea)
278 struct obd_device *obd = exp->exp_obd;
279 struct lmv_obd *lmv = &obd->u.lmv;
280 struct ptlrpc_request *req = NULL;
286 CDEBUG(D_OTHER, "get mea for "DLID4" and create lmv obj\n",
292 CDEBUG(D_OTHER, "mea isn't passed in, get it now\n");
293 mealen = MEA_SIZE_LMV(lmv);
295 /* time to update mea of parent id */
297 valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
299 rc = md_getattr(lmv->tgts[id_group(id)].ltd_exp,
300 id, valid, mealen, &req);
302 CERROR("md_getattr() failed, error %d\n", rc);
303 GOTO(cleanup, obj = ERR_PTR(rc));
306 rc = mdc_req2lustre_md(exp, req, 0, NULL, &md);
308 CERROR("mdc_req2lustre_md() failed, error %d\n", rc);
309 GOTO(cleanup, obj = ERR_PTR(rc));
313 GOTO(cleanup, obj = ERR_PTR(-ENODATA));
318 /* got mea, now create obj for it. */
319 obj = __create_obj(obd, id, mea);
321 CERROR("Can't create new object "DLID4"\n",
323 GOTO(cleanup, obj = ERR_PTR(-ENOMEM));
328 ptlrpc_req_finished(req);
332 /* looks for object with @id and orders to destroy it. It is possible the
333 * object will not be destroyed right now, because it is still using by
334 * someone. In this case it will be marked as "freeing" and will not be
335 * accessible anymore for subsequent callers of lmv_grab_obj(). */
337 lmv_delete_obj(struct obd_export *exp, struct lustre_id *id)
339 struct obd_device *obd = exp->exp_obd;
344 spin_lock(&obj_list_lock);
345 obj = __grab_obj(obd, id);
347 obj->state |= O_FREEING;
353 spin_unlock(&obj_list_lock);
358 lmv_setup_mgr(struct obd_device *obd)
360 LASSERT(obd != NULL);
362 CDEBUG(D_INFO, "LMV object manager setup (%s)\n",
369 lmv_cleanup_mgr(struct obd_device *obd)
372 struct list_head *cur, *tmp;
374 CDEBUG(D_INFO, "LMV object manager cleanup (%s)\n",
377 spin_lock(&obj_list_lock);
378 list_for_each_safe(cur, tmp, &obj_list) {
379 obj = list_entry(cur, struct lmv_obj, list);
384 obj->state |= O_FREEING;
385 if (atomic_read(&obj->count) > 1) {
386 CERROR("obj "DLID4" has count > 1 (%d)\n",
387 OLID4(&obj->id), atomic_read(&obj->count));
391 spin_unlock(&obj_list_lock);