Whamcloud - gitweb
Branch: b_new_cmd
[fs/lustre-release.git] / lustre / cmm / cmm_split.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  lustre/cmm/cmm_split.c
5  *  Lustre splitting dir 
6  *
7  *  Copyright (c) 2006 Cluster File Systems, Inc.
8  *   Author: Alex thomas <alex@clusterfs.com>
9  *           Wang Di     <wangdi@clusterfs.com>
10  *
11  *   This file is part of the Lustre file system, http://www.lustre.org
12  *   Lustre is a trademark of Cluster File Systems, Inc.
13  *
14  *   You may have signed or agreed to another license before downloading
15  *   this software.  If so, you are bound by the terms and conditions
16  *   of that agreement, and the following does not apply to you.  See the
17  *   LICENSE file included with this distribution for more information.
18  *
19  *   If you did not agree to a different license, then this copy of Lustre
20  *   is open source software; you can redistribute it and/or modify it
21  *   under the terms of version 2 of the GNU General Public License as
22  *   published by the Free Software Foundation.
23  *
24  *   In either case, Lustre is distributed in the hope that it will be
25  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
26  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  *   license text for more details.
28  */
29
30 #ifndef EXPORT_SYMTAB
31 # define EXPORT_SYMTAB
32 #endif
33
34 #define DEBUG_SUBSYSTEM S_MDS
35
36 #include <obd_class.h>
37 #include <lustre_fid.h>
38 #include <lustre_mds.h>
39 #include "cmm_internal.h"
40 #include "mdc_internal.h"
41
42 struct cmm_thread_info {
43         struct md_attr   cti_ma;
44 };
45
46 struct lu_context_key cmm_thread_key;
47 struct cmm_thread_info *cmm_ctx_info(const struct lu_context *ctx)
48 {
49         struct cmm_thread_info *info;
50
51         info = lu_context_key_get(ctx, &cmm_thread_key);
52         LASSERT(info != NULL);
53         return info;
54 }
55
56 #define CMM_NO_SPLIT_EXPECTED   0
57 #define CMM_EXPECT_SPLIT        1
58 #define CMM_NO_SPLITTABLE       2
59
60 #define SPLIT_SIZE 64*1024
61
62 static int cmm_expect_splitting(const struct lu_context *ctx,
63                                 struct md_object *mo, struct md_attr *ma)
64 {
65         struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
66         ENTRY;
67
68         if (cmm->cmm_tgt_count == 1)
69                 RETURN(CMM_NO_SPLIT_EXPECTED);
70
71         if (ma->ma_attr.la_size < SPLIT_SIZE)
72                 RETURN(CMM_NO_SPLIT_EXPECTED);
73
74         if (ma->ma_lmv_size)
75                 RETURN(CMM_NO_SPLIT_EXPECTED);
76                        
77         RETURN(CMM_EXPECT_SPLIT);
78 }
79
80 static inline struct lu_fid* cmm2_fid(struct cmm_object *obj)
81 {
82        return &(obj->cmo_obj.mo_lu.lo_header->loh_fid);
83 }
84
85 #define cmm_md_size(stripes)                            \
86        (sizeof(struct lmv_stripe_md) + stripes * sizeof(struct lu_fid))
87
88 static int cmm_alloc_fid(const struct lu_context *ctx, struct cmm_device *cmm,
89                          struct lu_fid *fid, int count)
90 {
91         struct  mdc_device *mc, *tmp;
92         int rc = 0, i = 0;
93         
94         LASSERT(count == cmm->cmm_tgt_count);
95         
96         /* FIXME: this spin_lock maybe not proper, 
97          * because fid_alloc may need RPC */
98         spin_lock(&cmm->cmm_tgt_guard);
99         list_for_each_entry_safe(mc, tmp, &cmm->cmm_targets,
100                                  mc_linkage) {
101                 rc = obd_fid_alloc(mc->mc_desc.cl_exp, &fid[i++], NULL);
102                 if (rc) {
103                         spin_unlock(&cmm->cmm_tgt_guard);
104                         RETURN(rc);
105                 }
106         }
107         spin_unlock(&cmm->cmm_tgt_guard);
108         RETURN(rc);
109 }
110
111 struct cmm_object *cmm_object_find(const struct lu_context *ctxt,
112                                    struct cmm_device *d,
113                                    const struct lu_fid *f)
114 {
115         struct lu_object *o;
116         struct cmm_object *m;
117         ENTRY;
118
119         o = lu_object_find(ctxt, d->cmm_md_dev.md_lu_dev.ld_site, f);
120         if (IS_ERR(o))
121                 m = (struct cmm_object *)o;
122         else
123                 m = lu2cmm_obj(lu_object_locate(o->lo_header,
124                                d->cmm_md_dev.md_lu_dev.ld_type));
125         RETURN(m);
126 }
127
128 static inline void cmm_object_put(const struct lu_context *ctxt,
129                                   struct cmm_object *o)
130 {
131         lu_object_put(ctxt, &o->cmo_obj.mo_lu);
132 }
133
134 static int cmm_creat_remote_obj(const struct lu_context *ctx, 
135                                 struct cmm_device *cmm,
136                                 struct lu_fid *fid, struct md_attr *ma)
137 {
138         struct cmm_object *obj;
139         struct md_create_spec *spec;
140         int rc;
141         ENTRY;
142
143         obj = cmm_object_find(ctx, cmm, fid);
144         if (IS_ERR(obj))
145                 RETURN(PTR_ERR(obj));
146
147         OBD_ALLOC_PTR(spec);
148         rc = mo_object_create(ctx, md_object_next(&obj->cmo_obj), 
149                               spec, ma);
150         OBD_FREE_PTR(spec);
151
152         cmm_object_put(ctx, obj);
153         RETURN(rc);
154 }
155
156 static int cmm_create_slave_objects(const struct lu_context *ctx,
157                                     struct md_object *mo, struct md_attr *ma)
158 {
159         struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
160         struct lmv_stripe_md *lmv = NULL;
161         int lmv_size, i, rc;
162         struct lu_fid *lf = cmm2_fid(md2cmm_obj(mo));
163         ENTRY;
164
165         lmv_size = cmm_md_size(cmm->cmm_tgt_count + 1);
166
167         /* This lmv will be free after finish splitting. */
168         OBD_ALLOC(lmv, lmv_size);
169         if (!lmv)
170                 RETURN(-ENOMEM);
171
172         lmv->mea_master = -1;
173         lmv->mea_magic = MEA_MAGIC_ALL_CHARS;
174         lmv->mea_count = cmm->cmm_tgt_count + 1;
175
176         lmv->mea_ids[0] = *lf;
177
178         rc = cmm_alloc_fid(ctx, cmm, &lmv->mea_ids[1], cmm->cmm_tgt_count);
179         if (rc)
180                 GOTO(cleanup, rc);
181
182         for (i = 0; i < cmm->cmm_tgt_count; i ++) {
183                 rc = cmm_creat_remote_obj(ctx, cmm, &lmv->mea_ids[i], ma);
184                 if (rc)
185                         GOTO(cleanup, rc);
186         }
187
188         rc = mo_xattr_set(ctx, md_object_next(mo), lmv, lmv_size,
189                           MDS_LMV_MD_NAME, 0);
190
191         ma->ma_lmv_size = lmv_size;
192         ma->ma_lmv = lmv;
193 cleanup:
194         RETURN(rc);
195 }
196
197 static int cmm_send_split_pages(const struct lu_context *ctx, 
198                                 struct md_object *mo, struct lu_rdpg *rdpg, 
199                                 struct lu_fid *fid)
200 {
201         struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
202         struct cmm_object *obj;
203         int rc = 0, i;
204         ENTRY;
205
206         obj = cmm_object_find(ctx, cmm, fid);
207         if (IS_ERR(obj))
208                 RETURN(PTR_ERR(obj));
209
210         for (i = 0; i < rdpg->rp_npages; i++) {
211                 rc = mdc_send_page(ctx, md_object_next(&obj->cmo_obj),
212                                    rdpg->rp_pages[i]);
213                 if (rc)
214                         GOTO(cleanup, rc);
215         }
216 cleanup:
217         cmm_object_put(ctx, obj);
218         RETURN(rc);
219 }
220
221 static int cmm_split_entries(const struct lu_context *ctx, struct md_object *mo,
222                              struct lu_rdpg *rdpg, struct lu_fid *lf)
223 {
224         struct lu_dirpage *dp;
225         __u32 hash_end;
226         int rc, i;
227         ENTRY;
228
229         /* init page with '0' */
230         for (i = 0; i < rdpg->rp_npages; i++) {
231                 memset(kmap(rdpg->rp_pages[i]), 0, CFS_PAGE_SIZE);
232                 kunmap(rdpg->rp_pages[i]);
233         }
234
235         /* Read splitted page and send them to the slave master */
236         do {
237                 rc = mo_readpage(ctx, md_object_next(mo), rdpg);
238                 if (rc)
239                         RETURN(rc);
240
241                 rc = cmm_send_split_pages(ctx, mo, rdpg, lf);
242                 if (rc)
243                         RETURN(rc);
244
245                 dp = kmap(rdpg->rp_pages[0]);
246                 hash_end = dp->ldp_hash_end;
247                 kunmap(rdpg->rp_pages[0]);
248                 if (hash_end == ~0ul)
249                         break;
250         } while (hash_end < rdpg->rp_hash_end);
251         
252         RETURN(rc);
253 }
254
255 static int cmm_remove_entries(const struct lu_context *ctx, 
256                               struct md_object *mo, struct lu_rdpg *rdpg)
257 {
258         struct lu_dirpage *dp;
259         struct lu_dirent  *ent;
260         int rc = 0, i;
261         ENTRY;
262
263         for (i = 0; i < rdpg->rp_npages; i++) {
264                 kmap(rdpg->rp_pages[i]);
265                 dp = page_address(rdpg->rp_pages[i]);
266                 for (ent = lu_dirent_start(dp); ent != NULL;
267                                   ent = lu_dirent_next(ent)) {
268                         rc = mdo_name_remove(ctx, md_object_next(mo),
269                                              ent->lde_name);
270                         if (rc) {
271                                 kunmap(rdpg->rp_pages[i]);
272                                 RETURN(rc);
273                         }
274                 }
275                 kunmap(rdpg->rp_pages[i]);
276         }
277         RETURN(rc);
278 }
279
280 #define MAX_HASH_SIZE 0x3fffffff
281 #define SPLIT_PAGE_COUNT 1
282 static int cmm_scan_and_split(const struct lu_context *ctx,
283                               struct md_object *mo, struct md_attr *ma)
284 {
285         struct cmm_device *cmm = cmm_obj2dev(md2cmm_obj(mo));
286         __u32 hash_segement;
287         struct lu_rdpg   *rdpg = NULL;
288         int rc = 0, i;
289
290         OBD_ALLOC_PTR(rdpg);
291         if (!rdpg)
292                 RETURN(-ENOMEM);
293
294         rdpg->rp_npages = SPLIT_PAGE_COUNT;
295         rdpg->rp_count  = CFS_PAGE_SIZE * rdpg->rp_npages;
296
297         OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
298         if (rdpg->rp_pages == NULL)
299                 GOTO(free_rdpg, rc = -ENOMEM);
300
301         for (i = 0; i < rdpg->rp_npages; i++) {
302                 rdpg->rp_pages[i] = alloc_pages(GFP_KERNEL, 0);
303                 if (rdpg->rp_pages[i] == NULL)
304                         GOTO(cleanup, rc = -ENOMEM);
305         }
306
307         hash_segement = MAX_HASH_SIZE / cmm->cmm_tgt_count;
308         for (i = 1; i < cmm->cmm_tgt_count; i++) {
309                 struct lu_fid *lf = &ma->ma_lmv->mea_ids[i];
310
311                 rdpg->rp_hash = i * hash_segement;
312                 rdpg->rp_hash_end = rdpg->rp_hash + hash_segement;
313                 rc = cmm_split_entries(ctx, mo, rdpg, lf);
314                 if (rc)
315                         GOTO(cleanup, rc);
316                 rc = cmm_remove_entries(ctx, mo, rdpg);
317                 if (rc)
318                         GOTO(cleanup, rc);
319         }
320 cleanup:
321         for (i = 0; i < rdpg->rp_npages; i++)
322                 if (rdpg->rp_pages[i] != NULL)
323                         __free_pages(rdpg->rp_pages[i], 0);
324         if (rdpg->rp_pages)
325                 OBD_FREE(rdpg->rp_pages, rdpg->rp_npages *
326                                          sizeof rdpg->rp_pages[0]);
327 free_rdpg:
328         if (rdpg)
329                 OBD_FREE_PTR(rdpg);
330
331         RETURN(rc);
332 }
333
334 int cml_try_to_split(const struct lu_context *ctx, struct md_object *mo)
335 {
336         struct md_attr *ma;
337         int rc = 0;
338         ENTRY;
339
340         LASSERT(S_ISDIR(lu_object_attr(&mo->mo_lu)));
341        
342         OBD_ALLOC_PTR(ma);
343         if (ma == NULL)
344                 RETURN(-ENOMEM);
345
346         ma->ma_need = MA_INODE;
347         rc = mo_attr_get(ctx, mo, ma);
348         if (rc)
349                 GOTO(cleanup, ma);
350
351         /* step1: checking whether the dir need to be splitted */
352         rc = cmm_expect_splitting(ctx, mo, ma);
353         if (rc != CMM_EXPECT_SPLIT)
354                 GOTO(cleanup, rc = 0);
355
356         /* step2: create slave objects */
357         rc = cmm_create_slave_objects(ctx, mo, ma);
358         if (rc)
359                 GOTO(cleanup, ma);
360
361         /* step3: scan and split the object */
362         rc = cmm_scan_and_split(ctx, mo, ma);
363
364 cleanup:
365         if (ma->ma_lmv_size && ma->ma_lmv)
366                 OBD_FREE(ma->ma_lmv, ma->ma_lmv_size);
367         
368         OBD_FREE_PTR(ma);
369
370         RETURN(rc);
371 }