1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # define EXPORT_SYMTAB
25 #define DEBUG_SUBSYSTEM S_LMV
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/pagemap.h>
32 #include <asm/div64.h>
33 #include <linux/seq_file.h>
35 #include <liblustre.h>
38 #include <linux/obd_support.h>
39 #include <linux/lustre_lib.h>
40 #include <linux/lustre_net.h>
41 #include <linux/lustre_idl.h>
42 #include <linux/lustre_dlm.h>
43 #include <linux/lustre_mds.h>
44 #include <linux/obd_class.h>
45 #include <linux/obd_ost.h>
46 #include <linux/lprocfs_status.h>
47 #include <linux/lustre_fsfilt.h>
48 #include <linux/obd_lmv.h>
49 #include "lmv_internal.h"
51 static LIST_HEAD(lmv_obj_list);
52 static spinlock_t lmv_obj_list_lock = SPIN_LOCK_UNLOCKED;
54 /* creates new obj on passed @fid and @mea. */
56 lmv_alloc_obj(struct obd_device *obd, struct ll_fid *fid,
61 unsigned int obj_size;
62 struct lmv_obd *lmv = &obd->u.lmv;
64 OBD_ALLOC(obj, sizeof(*obj));
72 init_MUTEX(&obj->guard);
73 atomic_set(&obj->count, 0);
74 obj->objcount = mea->mea_count;
76 obj_size = sizeof(struct lmv_inode) *
77 lmv->desc.ld_tgt_count;
79 OBD_ALLOC(obj->objs, obj_size);
83 memset(obj->objs, 0, obj_size);
86 for (i = 0; i < mea->mea_count; i++) {
87 CDEBUG(D_OTHER, "subobj %lu/%lu/%lu\n",
88 (unsigned long)mea->mea_fids[i].mds,
89 (unsigned long)mea->mea_fids[i].id,
90 (unsigned long)mea->mea_fids[i].generation);
91 obj->objs[i].fid = mea->mea_fids[i];
97 OBD_FREE(obj, sizeof(*obj));
101 /* destroys passed @obj. */
103 lmv_free_obj(struct lmv_obj *obj)
105 unsigned int obj_size;
106 struct lmv_obd *lmv = &obj->obd->u.lmv;
108 obj_size = sizeof(struct lmv_inode) *
109 lmv->desc.ld_tgt_count;
111 OBD_FREE(obj->objs, obj_size);
112 OBD_FREE(obj, sizeof(*obj));
116 __add_obj(struct lmv_obj *obj)
118 atomic_inc(&obj->count);
119 list_add(&obj->list, &lmv_obj_list);
123 lmv_add_obj(struct lmv_obj *obj)
125 spin_lock(&lmv_obj_list_lock);
127 spin_unlock(&lmv_obj_list_lock);
131 __del_obj(struct lmv_obj *obj)
133 if (!(obj->state & O_FREEING))
136 list_del(&obj->list);
141 lmv_del_obj(struct lmv_obj *obj)
143 spin_lock(&lmv_obj_list_lock);
145 spin_unlock(&lmv_obj_list_lock);
148 static struct lmv_obj *
149 __get_obj(struct lmv_obj *obj)
152 atomic_inc(&obj->count);
157 lmv_get_obj(struct lmv_obj *obj)
159 spin_lock(&lmv_obj_list_lock);
161 spin_unlock(&lmv_obj_list_lock);
167 __put_obj(struct lmv_obj *obj)
171 if (atomic_dec_and_test(&obj->count)) {
172 struct ll_fid *fid = &obj->fid;
173 CDEBUG(D_OTHER, "last reference to %lu/%lu/%lu - destroying\n",
174 (unsigned long)fid->mds, (unsigned long)fid->id,
175 (unsigned long)fid->generation);
181 lmv_put_obj(struct lmv_obj *obj)
183 spin_lock(&lmv_obj_list_lock);
185 spin_unlock(&lmv_obj_list_lock);
188 static struct lmv_obj *
189 __grab_obj(struct obd_device *obd, struct ll_fid *fid)
192 struct list_head *cur;
194 list_for_each(cur, &lmv_obj_list) {
195 obj = list_entry(cur, struct lmv_obj, list);
197 /* check if object is in progress of destroying. If so - skip
199 if (obj->state & O_FREEING)
202 /* check if this is waht we're looking for. */
203 if (fid_equal(&obj->fid, fid))
204 return __get_obj(obj);
211 lmv_grab_obj(struct obd_device *obd, struct ll_fid *fid)
216 spin_lock(&lmv_obj_list_lock);
217 obj = __grab_obj(obd, fid);
218 spin_unlock(&lmv_obj_list_lock);
223 /* looks in objects list for an object that matches passed @fid. If it is not
224 * found -- creates it using passed @mea and puts onto list. */
225 static struct lmv_obj *
226 __create_obj(struct obd_device *obd, struct ll_fid *fid, struct mea *mea)
228 struct lmv_obj *new, *obj;
231 obj = lmv_grab_obj(obd, fid);
235 /* no such object yet, allocate and initialize it. */
236 new = lmv_alloc_obj(obd, fid, mea);
240 /* check if someone create it already while we were dealing with
241 * allocating @obj. */
242 spin_lock(&lmv_obj_list_lock);
243 obj = __grab_obj(obd, fid);
245 /* someone created it already - put @obj and getting out. */
247 spin_unlock(&lmv_obj_list_lock);
254 spin_unlock(&lmv_obj_list_lock);
256 CDEBUG(D_OTHER, "new obj in lmv cache: %lu/%lu/%lu\n",
257 (unsigned long)fid->mds, (unsigned long)fid->id,
258 (unsigned long)fid->generation);
264 /* creates object from passed @fid and @mea. If @mea is NULL, it will be
265 * obtained from correct MDT and used for constructing the object. */
267 lmv_create_obj(struct obd_export *exp, struct ll_fid *fid, struct mea *mea)
269 struct obd_device *obd = exp->exp_obd;
270 struct lmv_obd *lmv = &obd->u.lmv;
271 struct ptlrpc_request *req = NULL;
277 CDEBUG(D_OTHER, "get mea for %lu/%lu/%lu and create lmv obj\n",
278 (unsigned long)fid->mds, (unsigned long)fid->id,
279 (unsigned long)fid->generation);
284 CDEBUG(D_OTHER, "mea isn't passed in, get it now\n");
285 mealen = MEA_SIZE_LMV(lmv);
287 /* time to update mea of parent fid */
291 valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
292 rc = md_getattr(lmv->tgts[fid->mds].ltd_exp, fid,
293 valid, mealen, &req);
295 CERROR("md_getattr() failed, error %d\n", rc);
296 GOTO(cleanup, obj = ERR_PTR(rc));
299 rc = mdc_req2lustre_md(exp, req, 0, NULL, &md);
301 CERROR("mdc_req2lustre_md() failed, error %d\n", rc);
302 GOTO(cleanup, obj = ERR_PTR(rc));
306 GOTO(cleanup, obj = ERR_PTR(-ENODATA));
311 /* got mea, now create obj for it. */
312 obj = __create_obj(obd, fid, mea);
314 CERROR("Can't create new object %lu/%lu/%lu\n",
315 (unsigned long)fid->mds, (unsigned long)fid->id,
316 (unsigned long)fid->generation);
317 GOTO(cleanup, obj = ERR_PTR(-ENOMEM));
321 ptlrpc_req_finished(req);
325 /* looks for object with @fid and orders to destroy it. It possible the object
326 * will not be destroyed right now, because it is still using by someone. In
327 * this case it will be marked as "freeing" and will not be accessible anymore
328 * for subsequent callers of lmv_grab_obj(). */
330 lmv_delete_obj(struct obd_export *exp, struct ll_fid *fid)
332 struct obd_device *obd = exp->exp_obd;
337 spin_lock(&lmv_obj_list_lock);
339 obj = __grab_obj(obd, fid);
341 obj->state |= O_FREEING;
348 spin_unlock(&lmv_obj_list_lock);
353 lmv_setup_mgr(struct obd_device *obd)
355 CWARN("LMV object manager setup (%s)\n",
361 lmv_cleanup_mgr(struct obd_device *obd)
364 struct list_head *cur, *tmp;
366 CWARN("LMV object manager cleanup (%s)\n",
369 spin_lock(&lmv_obj_list_lock);
370 list_for_each_safe(cur, tmp, &lmv_obj_list) {
371 obj = list_entry(cur, struct lmv_obj, list);
376 obj->state |= O_FREEING;
379 spin_unlock(&lmv_obj_list_lock);