1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/cmm/cmm_split.c
7 * Copyright (c) 2006 Cluster File Systems, Inc.
8 * Author: Alex thomas <alex@clusterfs.com>
9 * Wang Di <wangdi@clusterfs.com>
11 * This file is part of the Lustre file system, http://www.lustre.org
12 * Lustre is a trademark of Cluster File Systems, Inc.
14 * You may have signed or agreed to another license before downloading
15 * this software. If so, you are bound by the terms and conditions
16 * of that agreement, and the following does not apply to you. See the
17 * LICENSE file included with this distribution for more information.
19 * If you did not agree to a different license, then this copy of Lustre
20 * is open source software; you can redistribute it and/or modify it
21 * under the terms of version 2 of the GNU General Public License as
22 * published by the Free Software Foundation.
24 * In either case, Lustre is distributed in the hope that it will be
25 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
26 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * license text for more details.
31 # define EXPORT_SYMTAB
34 #define DEBUG_SUBSYSTEM S_MDS
36 #include <obd_class.h>
37 #include <lustre_fid.h>
38 #include <lustre_mds.h>
39 #include <lustre_idl.h>
40 #include "cmm_internal.h"
41 #include "mdc_internal.h"
43 #define CMM_NO_SPLIT_EXPECTED 0
44 #define CMM_EXPECT_SPLIT 1
45 #define CMM_NO_SPLITTABLE 2
51 static inline struct lu_fid* cmm2_fid(struct cmm_object *obj)
53 return &(obj->cmo_obj.mo_lu.lo_header->loh_fid);
56 static int cmm_expect_splitting(const struct lu_context *ctx,
57 struct md_object *mo, struct md_attr *ma)
59 struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
60 struct lu_fid *fid = NULL;
61 int rc = CMM_EXPECT_SPLIT;
64 if (cmm->cmm_tgt_count == 0)
65 GOTO(cleanup, rc = CMM_NO_SPLIT_EXPECTED);
67 if (ma->ma_attr.la_size < SPLIT_SIZE)
68 GOTO(cleanup, rc = CMM_NO_SPLIT_EXPECTED);
71 GOTO(cleanup, rc = CMM_NO_SPLIT_EXPECTED);
73 rc = cmm_child_ops(cmm)->mdo_root_get(ctx, cmm->cmm_child,
78 rc = CMM_EXPECT_SPLIT;
80 if (lu_fid_eq(fid, cmm2_fid(md2cmm_obj(mo))))
81 GOTO(cleanup, rc = CMM_NO_SPLIT_EXPECTED);
89 #define cmm_md_size(stripes) \
90 (sizeof(struct lmv_stripe_md) + (stripes) * sizeof(struct lu_fid))
92 static int cmm_alloc_fid(const struct lu_context *ctx, struct cmm_device *cmm,
93 struct lu_fid *fid, int count)
95 struct mdc_device *mc, *tmp;
98 LASSERT(count == cmm->cmm_tgt_count);
99 /* FIXME: this spin_lock maybe not proper,
100 * because fid_alloc may need RPC */
101 spin_lock(&cmm->cmm_tgt_guard);
102 list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
104 LASSERT(cmm->cmm_local_num != mc->mc_num);
106 rc = obd_fid_alloc(mc->mc_desc.cl_exp, &fid[i], NULL);
110 ls = cmm->cmm_md_dev.md_lu_dev.ld_site;
111 rc = fld_client_create(ls->ls_client_fld,
116 spin_unlock(&cmm->cmm_tgt_guard);
121 spin_unlock(&cmm->cmm_tgt_guard);
128 struct cmm_object *cmm_object_find(const struct lu_context *ctxt,
129 struct cmm_device *d,
130 const struct lu_fid *f)
133 struct cmm_object *m;
136 o = lu_object_find(ctxt, d->cmm_md_dev.md_lu_dev.ld_site, f);
138 m = (struct cmm_object *)o;
140 m = lu2cmm_obj(lu_object_locate(o->lo_header,
141 d->cmm_md_dev.md_lu_dev.ld_type));
145 static inline void cmm_object_put(const struct lu_context *ctxt,
146 struct cmm_object *o)
148 lu_object_put(ctxt, &o->cmo_obj.mo_lu);
151 static int cmm_creat_remote_obj(const struct lu_context *ctx,
152 struct cmm_device *cmm,
153 struct lu_fid *fid, struct md_attr *ma,
154 const struct lmv_stripe_md *lmv,
157 struct cmm_object *obj;
158 struct md_create_spec *spec;
162 obj = cmm_object_find(ctx, cmm, fid);
164 RETURN(PTR_ERR(obj));
168 spec->u.sp_ea.fid = fid;
169 spec->u.sp_ea.eadata = lmv;
170 spec->u.sp_ea.eadatalen = lmv_size;
171 spec->sp_cr_flags |= MDS_CREATE_SLAVE_OBJ;
172 rc = mo_object_create(ctx, md_object_next(&obj->cmo_obj),
176 cmm_object_put(ctx, obj);
180 static int cmm_create_slave_objects(const struct lu_context *ctx,
181 struct md_object *mo, struct md_attr *ma)
183 struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
184 struct lmv_stripe_md *lmv = NULL, *slave_lmv = NULL;
186 struct lu_fid *lf = cmm2_fid(md2cmm_obj(mo));
189 lmv_size = cmm_md_size(cmm->cmm_tgt_count + 1);
191 /* This lmv will be free after finish splitting. */
192 OBD_ALLOC(lmv, lmv_size);
196 lmv->mea_master = cmm->cmm_local_num;
197 lmv->mea_magic = MEA_MAGIC_HASH_SEGMENT;
198 lmv->mea_count = cmm->cmm_tgt_count + 1;
200 lmv->mea_ids[0] = *lf;
202 rc = cmm_alloc_fid(ctx, cmm, &lmv->mea_ids[1], cmm->cmm_tgt_count);
206 OBD_ALLOC_PTR(slave_lmv);
208 GOTO(cleanup, rc = -ENOMEM);
210 slave_lmv->mea_master = cmm->cmm_local_num;
211 slave_lmv->mea_magic = MEA_MAGIC_HASH_SEGMENT;
212 slave_lmv->mea_count = 0;
213 for (i = 1; i < cmm->cmm_tgt_count + 1; i ++) {
214 rc = cmm_creat_remote_obj(ctx, cmm, &lmv->mea_ids[i], ma,
215 slave_lmv, sizeof(slave_lmv));
220 ma->ma_lmv_size = lmv_size;
224 OBD_FREE_PTR(slave_lmv);
228 static int cmm_send_split_pages(const struct lu_context *ctx,
229 struct md_object *mo, struct lu_rdpg *rdpg,
230 struct lu_fid *fid, int len)
232 struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
233 struct cmm_object *obj;
237 obj = cmm_object_find(ctx, cmm, fid);
239 RETURN(PTR_ERR(obj));
241 rc = mdc_send_page(cmm, ctx, md_object_next(&obj->cmo_obj),
242 rdpg->rp_pages[0], len);
243 cmm_object_put(ctx, obj);
247 static int cmm_remove_entries(const struct lu_context *ctx,
248 struct md_object *mo, struct lu_rdpg *rdpg,
249 __u32 hash_end, __u32 *len)
251 struct lu_dirpage *dp;
252 struct lu_dirent *ent;
256 kmap(rdpg->rp_pages[0]);
257 dp = page_address(rdpg->rp_pages[0]);
258 for (ent = lu_dirent_start(dp); ent != NULL;
259 ent = lu_dirent_next(ent)) {
260 if (ent->lde_hash < hash_end) {
261 if (strncmp(ent->lde_name, ".", ent->lde_namelen) &&
262 strncmp(ent->lde_name, "..", ent->lde_namelen)) {
264 /* FIXME: Here we allocate name for each name,
265 * maybe stupid, but can not find better way.
266 * will find better way */
267 OBD_ALLOC(name, ent->lde_namelen + 1);
268 memcpy(name, ent->lde_name, ent->lde_namelen);
269 rc = mdo_name_remove(ctx, md_object_next(mo),
271 OBD_FREE(name, ent->lde_namelen + 1);
274 /* FIXME: Do not know why it return -ENOENT
281 if (ent != lu_dirent_start(dp))
282 *len = (int)((__u32)ent - (__u32)dp);
288 *len = CFS_PAGE_SIZE;
290 kunmap(rdpg->rp_pages[i]);
294 static int cmm_split_entries(const struct lu_context *ctx, struct md_object *mo,
295 struct lu_rdpg *rdpg, struct lu_fid *lf,
301 LASSERTF(rdpg->rp_npages == 1, "Now Only support split 1 page each time"
302 "npages %d \n", rdpg->rp_npages);
303 /* Read splitted page and send them to the slave master */
305 struct lu_dirpage *ldp;
308 /* init page with '0' */
309 memset(kmap(rdpg->rp_pages[0]), 0, CFS_PAGE_SIZE);
310 kunmap(rdpg->rp_pages[0]);
312 rc = mo_readpage(ctx, md_object_next(mo), rdpg);
313 /* -E2BIG means it already reach the end of the dir */
315 if (rc == -E2BIG || rc == -ERANGE)
320 /* Remove the old entries */
321 rc = cmm_remove_entries(ctx, mo, rdpg, end, &len);
325 /* Send page to slave object */
327 rc = cmm_send_split_pages(ctx, mo, rdpg, lf, len);
332 kmap(rdpg->rp_pages[0]);
333 ldp = page_address(rdpg->rp_pages[0]);
334 if (ldp->ldp_hash_end >= end) {
337 rdpg->rp_hash = ldp->ldp_hash_end;
338 kunmap(rdpg->rp_pages[0]);
343 #define SPLIT_PAGE_COUNT 1
344 static int cmm_scan_and_split(const struct lu_context *ctx,
345 struct md_object *mo, struct md_attr *ma)
347 struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
349 struct lu_rdpg *rdpg = NULL;
356 rdpg->rp_npages = SPLIT_PAGE_COUNT;
357 rdpg->rp_count = CFS_PAGE_SIZE * rdpg->rp_npages;
359 OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
360 if (rdpg->rp_pages == NULL)
361 GOTO(free_rdpg, rc = -ENOMEM);
363 for (i = 0; i < rdpg->rp_npages; i++) {
364 rdpg->rp_pages[i] = alloc_pages(GFP_KERNEL, 0);
365 if (rdpg->rp_pages[i] == NULL)
366 GOTO(cleanup, rc = -ENOMEM);
369 hash_segement = MAX_HASH_SIZE / (cmm->cmm_tgt_count + 1);
370 for (i = 1; i < cmm->cmm_tgt_count + 1; i++) {
371 struct lu_fid *lf = &ma->ma_lmv->mea_ids[i];
374 rdpg->rp_hash = i * hash_segement;
375 hash_end = rdpg->rp_hash + hash_segement;
376 rc = cmm_split_entries(ctx, mo, rdpg, lf, hash_end);
381 for (i = 0; i < rdpg->rp_npages; i++)
382 if (rdpg->rp_pages[i] != NULL)
383 __free_pages(rdpg->rp_pages[i], 0);
385 OBD_FREE(rdpg->rp_pages, rdpg->rp_npages *
386 sizeof rdpg->rp_pages[0]);
394 int cml_try_to_split(const struct lu_context *ctx, struct md_object *mo)
400 LASSERT(S_ISDIR(lu_object_attr(&mo->mo_lu)));
406 ma->ma_need = MA_INODE|MA_LMV;
407 rc = mo_attr_get(ctx, mo, ma);
411 /* step1: checking whether the dir need to be splitted */
412 rc = cmm_expect_splitting(ctx, mo, ma);
413 if (rc != CMM_EXPECT_SPLIT)
414 GOTO(cleanup, rc = 0);
416 /* step2: create slave objects */
417 rc = cmm_create_slave_objects(ctx, mo, ma);
421 /* step3: scan and split the object */
422 rc = cmm_scan_and_split(ctx, mo, ma);
426 /* step4: set mea to the master object */
427 rc = mo_xattr_set(ctx, md_object_next(mo), ma->ma_lmv, ma->ma_lmv_size,
431 CWARN("Dir"DFID" has been split \n",
432 PFID(lu_object_fid(&mo->mo_lu)));
434 if (ma->ma_lmv_size && ma->ma_lmv)
435 OBD_FREE(ma->ma_lmv, ma->ma_lmv_size);