Whamcloud - gitweb
LU-1842 ldlm: support for sending GL ASTs to multiple locks
[fs/lustre-release.git] / lustre / obdfilter / filter.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdfilter/filter.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Andreas Dilger <adilger@clusterfs.com>
40  */
41
42 /*
43  * Invariant: Get O/R i_mutex for lookup, if needed, before any journal ops
44  *            (which need to get journal_lock, may block if journal full).
45  *
46  * Invariant: Call filter_start_transno() before any journal ops to avoid the
47  *            same deadlock problem.  We can (and want) to get rid of the
48  *            transno sem in favour of the dir/inode i_mutex to avoid single
49  *            threaded operation on the OST.
50  */
51
52 #define DEBUG_SUBSYSTEM S_FILTER
53
54 #include <linux/module.h>
55 #include <linux/fs.h>
56 #include <linux/dcache.h>
57 #include <linux/init.h>
58 #include <linux/version.h>
59 #include <linux/sched.h>
60 #include <linux/mount.h>
61 #include <linux/buffer_head.h>
62
63 #include <obd_cksum.h>
64 #include <obd_class.h>
65 #include <obd_lov.h>
66 #include <lustre_dlm.h>
67 #include <lustre_fsfilt.h>
68 #include <lprocfs_status.h>
69 #include <lustre_log.h>
70 #include <libcfs/list.h>
71 #include <lustre_disk.h>
72 #include <lustre_quota.h>
73 #include <linux/slab.h>
74 #include <lustre_param.h>
75 #include <lustre/ll_fiemap.h>
76
77 #include "filter_internal.h"
78
79 static struct lvfs_callback_ops filter_lvfs_ops;
80 cfs_mem_cache_t *ll_fmd_cachep;
81
82 static void filter_commit_cb(struct obd_device *obd, __u64 transno,
83                              void *cb_data, int error)
84 {
85         struct obd_export *exp = cb_data;
86         LASSERT(exp->exp_obd == obd);
87         obd_transno_commit_cb(obd, transno, exp, error);
88         class_export_cb_put(exp);
89 }
90
91 int filter_version_get_check(struct obd_export *exp,
92                              struct obd_trans_info *oti, struct inode *inode)
93 {
94         __u64 curr_version;
95
96         if (inode == NULL || oti == NULL)
97                 RETURN(0);
98
99         curr_version = fsfilt_get_version(exp->exp_obd, inode);
100         if ((__s64)curr_version == -EOPNOTSUPP)
101                 RETURN(0);
102         /* VBR: version is checked always because costs nothing */
103         if (oti->oti_pre_version != 0 &&
104             oti->oti_pre_version != curr_version) {
105                 CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
106                        oti->oti_pre_version, curr_version);
107                 cfs_spin_lock(&exp->exp_lock);
108                 exp->exp_vbr_failed = 1;
109                 cfs_spin_unlock(&exp->exp_lock);
110                 RETURN (-EOVERFLOW);
111         }
112         oti->oti_pre_version = curr_version;
113         RETURN(0);
114 }
115
116 /* Assumes caller has already pushed us into the kernel context. */
117 int filter_finish_transno(struct obd_export *exp, struct inode *inode,
118                           struct obd_trans_info *oti, int rc, int force_sync)
119 {
120         struct obd_device_target *obt = &exp->exp_obd->u.obt;
121         struct tg_export_data *ted = &exp->exp_target_data;
122         struct lr_server_data *lsd = class_server_data(exp->exp_obd);
123         struct lsd_client_data *lcd;
124         __u64 last_rcvd;
125         loff_t off;
126         int err, log_pri = D_RPCTRACE;
127
128         /* Propagate error code. */
129         if (rc)
130                 RETURN(rc);
131
132         if (!exp->exp_obd->obd_replayable || oti == NULL)
133                 RETURN(rc);
134
135         cfs_mutex_lock(&ted->ted_lcd_lock);
136         lcd = ted->ted_lcd;
137         /* if the export has already been disconnected, we have no last_rcvd slot,
138          * update server data with latest transno then */
139         if (lcd == NULL) {
140                 cfs_mutex_unlock(&ted->ted_lcd_lock);
141                 CWARN("commit transaction for disconnected client %s: rc %d\n",
142                       exp->exp_client_uuid.uuid, rc);
143                 err = filter_update_server_data(exp->exp_obd);
144                 RETURN(err);
145         }
146
147         /* we don't allocate new transnos for replayed requests */
148         cfs_spin_lock(&obt->obt_lut->lut_translock);
149         if (oti->oti_transno == 0) {
150                 last_rcvd = le64_to_cpu(lsd->lsd_last_transno) + 1;
151                 lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
152                 LASSERT(last_rcvd >= le64_to_cpu(lcd->lcd_last_transno));
153         } else {
154                 last_rcvd = oti->oti_transno;
155                 if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
156                         lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
157                 if (unlikely(last_rcvd < le64_to_cpu(lcd->lcd_last_transno))) {
158                         CERROR("Trying to overwrite bigger transno, on-disk: "
159                                LPU64", new: "LPU64"\n",
160                                le64_to_cpu(lcd->lcd_last_transno), last_rcvd);
161                         cfs_spin_lock(&exp->exp_lock);
162                         exp->exp_vbr_failed = 1;
163                         cfs_spin_unlock(&exp->exp_lock);
164                         cfs_spin_unlock(&obt->obt_lut->lut_translock);
165                         cfs_mutex_unlock(&ted->ted_lcd_lock);
166                         RETURN(-EOVERFLOW);
167                 }
168         }
169         oti->oti_transno = last_rcvd;
170
171         lcd->lcd_last_transno = cpu_to_le64(last_rcvd);
172         lcd->lcd_pre_versions[0] = cpu_to_le64(oti->oti_pre_version);
173         lcd->lcd_last_xid = cpu_to_le64(oti->oti_xid);
174         cfs_spin_unlock(&obt->obt_lut->lut_translock);
175
176         if (inode)
177                 fsfilt_set_version(exp->exp_obd, inode, last_rcvd);
178
179         off = ted->ted_lr_off;
180         if (off <= 0) {
181                 CERROR("%s: client idx %d is %lld\n", exp->exp_obd->obd_name,
182                        ted->ted_lr_idx, ted->ted_lr_off);
183                 err = -EINVAL;
184         } else {
185                 class_export_cb_get(exp); /* released when the cb is called */
186                 if (!force_sync)
187                         force_sync = fsfilt_add_journal_cb(exp->exp_obd,
188                                                            last_rcvd,
189                                                            oti->oti_handle,
190                                                            filter_commit_cb,
191                                                            exp);
192
193                 err = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
194                                           lcd, sizeof(*lcd), &off,
195                                           force_sync | exp->exp_need_sync);
196                 if (force_sync)
197                         filter_commit_cb(exp->exp_obd, last_rcvd, exp, err);
198         }
199         if (err) {
200                 log_pri = D_ERROR;
201                 if (rc == 0)
202                         rc = err;
203         }
204
205         CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
206                last_rcvd, lcd->lcd_uuid, ted->ted_lr_idx, err);
207         cfs_mutex_unlock(&ted->ted_lcd_lock);
208         RETURN(rc);
209 }
210
211 void f_dput(struct dentry *dentry)
212 {
213         /* Can't go inside filter_ddelete because it can block */
214         CDEBUG(D_INODE, "putting %s: %p, count = %d\n",
215                dentry->d_name.name, dentry, atomic_read(&dentry->d_count) - 1);
216         LASSERT(atomic_read(&dentry->d_count) > 0);
217
218         dput(dentry);
219 }
220
221 static void init_brw_stats(struct brw_stats *brw_stats)
222 {
223         int i;
224         for (i = 0; i < BRW_LAST; i++)
225                 cfs_spin_lock_init(&brw_stats->hist[i].oh_lock);
226 }
227
228 static int lprocfs_init_rw_stats(struct obd_device *obd,
229                                  struct lprocfs_stats **stats)
230 {
231         int num_stats;
232
233         num_stats = (sizeof(*obd->obd_type->typ_dt_ops) / sizeof(void *)) +
234                                                         LPROC_FILTER_LAST - 1;
235         *stats = lprocfs_alloc_stats(num_stats, LPROCFS_STATS_FLAG_NOPERCPU);
236         if (*stats == NULL)
237                 return -ENOMEM;
238
239         lprocfs_init_ops_stats(LPROC_FILTER_LAST, *stats);
240         lprocfs_counter_init(*stats, LPROC_FILTER_READ_BYTES,
241                              LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
242         lprocfs_counter_init(*stats, LPROC_FILTER_WRITE_BYTES,
243                              LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
244
245         return(0);
246 }
247
248 /* brw_stats are 2128, ops are 3916, ldlm are 204, so 6248 bytes per client,
249    plus the procfs overhead :( */
250 static int filter_export_stats_init(struct obd_device *obd,
251                                     struct obd_export *exp,
252                                     void *client_nid)
253 {
254         int rc, newnid = 0;
255         ENTRY;
256
257         if (obd_uuid_equals(&exp->exp_client_uuid, &obd->obd_uuid))
258                 /* Self-export gets no proc entry */
259                 RETURN(0);
260
261         rc = lprocfs_exp_setup(exp, client_nid, &newnid);
262         if (rc) {
263                 /* Mask error for already created
264                  * /proc entries */
265                 if (rc == -EALREADY)
266                         rc = 0;
267                 RETURN(rc);
268         }
269
270         if (newnid) {
271                 struct nid_stat *tmp = exp->exp_nid_stats;
272                 LASSERT(tmp != NULL);
273
274                 OBD_ALLOC(tmp->nid_brw_stats, sizeof(struct brw_stats));
275                 if (tmp->nid_brw_stats == NULL)
276                         GOTO(clean, rc = -ENOMEM);
277
278                 init_brw_stats(tmp->nid_brw_stats);
279                 rc = lprocfs_seq_create(exp->exp_nid_stats->nid_proc, "brw_stats",
280                                         0644, &filter_per_nid_stats_fops,
281                                         exp->exp_nid_stats);
282                 if (rc)
283                         CWARN("Error adding the brw_stats file\n");
284
285                 rc = lprocfs_init_rw_stats(obd, &exp->exp_nid_stats->nid_stats);
286                 if (rc)
287                         GOTO(clean, rc);
288
289                 rc = lprocfs_register_stats(tmp->nid_proc, "stats",
290                                             tmp->nid_stats);
291                 if (rc)
292                         GOTO(clean, rc);
293                 rc = lprocfs_nid_ldlm_stats_init(tmp);
294                 if (rc)
295                         GOTO(clean, rc);
296         }
297
298         RETURN(0);
299  clean:
300         return rc;
301 }
302
303 /* Add client data to the FILTER.  We use a bitmap to locate a free space
304  * in the last_rcvd file if cl_idx is -1 (i.e. a new client).
305  * Otherwise, we have just read the data from the last_rcvd file and
306  * we know its offset. */
307 static int filter_client_add(struct obd_device *obd, struct obd_export *exp,
308                              int cl_idx)
309 {
310         struct obd_device_target *obt = &obd->u.obt;
311         struct tg_export_data *ted = &exp->exp_target_data;
312         struct lr_server_data *lsd = class_server_data(obd);
313         unsigned long *bitmap = obt->obt_lut->lut_client_bitmap;
314         int new_client = (cl_idx == -1);
315
316         ENTRY;
317
318         LASSERT(bitmap != NULL);
319         LASSERTF(cl_idx > -2, "%d\n", cl_idx);
320
321         /* Self-export */
322         if (strcmp(ted->ted_lcd->lcd_uuid, obd->obd_uuid.uuid) == 0)
323                 RETURN(0);
324
325         /* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
326          * there's no need for extra complication here
327          */
328         if (new_client) {
329                 cl_idx = cfs_find_first_zero_bit(bitmap, LR_MAX_CLIENTS);
330         repeat:
331                 if (cl_idx >= LR_MAX_CLIENTS) {
332                         CERROR("no room for %u client - fix LR_MAX_CLIENTS\n",
333                                cl_idx);
334                         RETURN(-EOVERFLOW);
335                 }
336                 if (cfs_test_and_set_bit(cl_idx, bitmap)) {
337                         cl_idx = cfs_find_next_zero_bit(bitmap, LR_MAX_CLIENTS,
338                                                         cl_idx);
339                         goto repeat;
340                 }
341         } else {
342                 if (cfs_test_and_set_bit(cl_idx, bitmap)) {
343                         CERROR("FILTER client %d: bit already set in bitmap!\n",
344                                cl_idx);
345                         LBUG();
346                 }
347         }
348
349         ted->ted_lr_idx = cl_idx;
350         ted->ted_lr_off = le32_to_cpu(lsd->lsd_client_start) +
351                           cl_idx * le16_to_cpu(lsd->lsd_client_size);
352         cfs_mutex_init(&ted->ted_lcd_lock);
353         LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
354
355         CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
356                ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
357
358         if (new_client) {
359                 struct lvfs_run_ctxt saved;
360                 loff_t off = ted->ted_lr_off;
361                 int rc;
362                 void *handle;
363
364                 CDEBUG(D_INFO, "writing client lcd at idx %u (%llu) (len %u)\n",
365                        ted->ted_lr_idx,off,(unsigned int)sizeof(*ted->ted_lcd));
366
367                 if (OBD_FAIL_CHECK(OBD_FAIL_TGT_CLIENT_ADD))
368                         RETURN(-ENOSPC);
369
370                 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
371                 /* Transaction needed to fix bug 1403 */
372                 handle = fsfilt_start(obd,
373                                       obt->obt_rcvd_filp->f_dentry->d_inode,
374                                       FSFILT_OP_SETATTR, NULL);
375                 if (IS_ERR(handle)) {
376                         rc = PTR_ERR(handle);
377                         CERROR("unable to start transaction: rc %d\n", rc);
378                 } else {
379                         ted->ted_lcd->lcd_last_epoch = lsd->lsd_start_epoch;
380                         exp->exp_last_request_time = cfs_time_current_sec();
381                         rc = fsfilt_add_journal_cb(obd, 0, handle,
382                                                    target_client_add_cb,
383                                                    class_export_cb_get(exp));
384                         if (rc == 0) {
385                                 cfs_spin_lock(&exp->exp_lock);
386                                 exp->exp_need_sync = 1;
387                                 cfs_spin_unlock(&exp->exp_lock);
388                         }
389                         rc = fsfilt_write_record(obd, obt->obt_rcvd_filp,
390                                                  ted->ted_lcd,
391                                                  sizeof(*ted->ted_lcd),
392                                                  &off, rc /* sync if no cb */);
393                         fsfilt_commit(obd,
394                                       obt->obt_rcvd_filp->f_dentry->d_inode,
395                                       handle, 0);
396                 }
397                 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
398
399                 if (rc) {
400                         CERROR("error writing %s client idx %u: rc %d\n",
401                                LAST_RCVD, ted->ted_lr_idx, rc);
402                         RETURN(rc);
403                 }
404         }
405         RETURN(0);
406 }
407
408 static int filter_client_del(struct obd_export *exp)
409 {
410         struct tg_export_data *ted = &exp->exp_target_data;
411         struct obd_device_target *obt = &exp->exp_obd->u.obt;
412         struct lvfs_run_ctxt saved;
413         int rc;
414         loff_t off;
415         ENTRY;
416
417         if (ted->ted_lcd == NULL)
418                 RETURN(0);
419
420         /* XXX if lcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
421         if (strcmp(ted->ted_lcd->lcd_uuid, exp->exp_obd->obd_uuid.uuid ) == 0)
422                 GOTO(free, 0);
423
424         LASSERT(obt->obt_lut->lut_client_bitmap != NULL);
425
426         off = ted->ted_lr_off;
427
428         CDEBUG(D_INFO, "freeing client at idx %u, offset %lld with UUID '%s'\n",
429                ted->ted_lr_idx, ted->ted_lr_off, ted->ted_lcd->lcd_uuid);
430
431         /* Don't clear ted_lr_idx here as it is likely also unset.  At worst
432          * we leak a client slot that will be cleaned on the next recovery. */
433         if (off <= 0) {
434                 CERROR("%s: client idx %d has med_off %lld\n",
435                        exp->exp_obd->obd_name, ted->ted_lr_idx, off);
436                 GOTO(free, rc = -EINVAL);
437         }
438
439         /* Clear the bit _after_ zeroing out the client so we don't
440            race with filter_client_add and zero out new clients.*/
441         if (!cfs_test_bit(ted->ted_lr_idx, obt->obt_lut->lut_client_bitmap)) {
442                 CERROR("FILTER client %u: bit already clear in bitmap!!\n",
443                        ted->ted_lr_idx);
444                 LBUG();
445         }
446
447         push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
448         /* Make sure the server's last_transno is up to date.
449          * This should be done before zeroing client slot so last_transno will
450          * be in server data or in client data in case of failure */
451         filter_update_server_data(exp->exp_obd);
452
453         cfs_mutex_lock(&ted->ted_lcd_lock);
454         memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
455         rc = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
456                                  ted->ted_lcd,
457                                  sizeof(*ted->ted_lcd), &off, 0);
458         cfs_mutex_unlock(&ted->ted_lcd_lock);
459         pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
460
461         CDEBUG(rc == 0 ? D_INFO : D_ERROR,
462                "zero out client %s at idx %u/%llu in %s, rc %d\n",
463                ted->ted_lcd->lcd_uuid, ted->ted_lr_idx, ted->ted_lr_off,
464                LAST_RCVD, rc);
465         EXIT;
466 free:
467         return 0;
468 }
469
470 /* drop fmd reference, free it if last ref. must be called with fed_lock held.*/
471 static inline void filter_fmd_put_nolock(struct filter_export_data *fed,
472                                          struct filter_mod_data *fmd)
473 {
474         LASSERT_SPIN_LOCKED(&fed->fed_lock);
475         if (--fmd->fmd_refcount == 0) {
476                 /* XXX when we have persistent reservations and the handle
477                  * is stored herein we need to drop it here. */
478                 fed->fed_mod_count--;
479                 cfs_list_del(&fmd->fmd_list);
480                 OBD_SLAB_FREE(fmd, ll_fmd_cachep, sizeof(*fmd));
481         }
482 }
483
484 /* drop fmd reference, free it if last ref */
485 void filter_fmd_put(struct obd_export *exp, struct filter_mod_data *fmd)
486 {
487         struct filter_export_data *fed;
488
489         if (fmd == NULL)
490                 return;
491
492         fed = &exp->exp_filter_data;
493         cfs_spin_lock(&fed->fed_lock);
494         filter_fmd_put_nolock(fed, fmd); /* caller reference */
495         cfs_spin_unlock(&fed->fed_lock);
496 }
497
498 /* expire entries from the end of the list if there are too many
499  * or they are too old */
500 static void filter_fmd_expire_nolock(struct filter_obd *filter,
501                                      struct filter_export_data *fed,
502                                      struct filter_mod_data *keep)
503 {
504         struct filter_mod_data *fmd, *tmp;
505
506         cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
507                 if (fmd == keep)
508                         break;
509
510                 if (cfs_time_before(jiffies, fmd->fmd_expire) &&
511                     fed->fed_mod_count < filter->fo_fmd_max_num)
512                         break;
513
514                 cfs_list_del_init(&fmd->fmd_list);
515                 filter_fmd_put_nolock(fed, fmd); /* list reference */
516         }
517 }
518
519 void filter_fmd_expire(struct obd_export *exp)
520 {
521         cfs_spin_lock(&exp->exp_filter_data.fed_lock);
522         filter_fmd_expire_nolock(&exp->exp_obd->u.filter,
523                                  &exp->exp_filter_data, NULL);
524         cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
525 }
526
527 /* find specified objid, group in export fmd list.
528  * caller must hold fed_lock and take fmd reference itself */
529 static struct filter_mod_data *filter_fmd_find_nolock(struct filter_obd *filter,
530                                                 struct filter_export_data *fed,
531                                                 obd_id objid, obd_seq group)
532 {
533         struct filter_mod_data *found = NULL, *fmd;
534
535         LASSERT_SPIN_LOCKED(&fed->fed_lock);
536
537         cfs_list_for_each_entry_reverse(fmd, &fed->fed_mod_list, fmd_list) {
538                 if (fmd->fmd_id == objid && fmd->fmd_gr == group) {
539                         found = fmd;
540                         cfs_list_del(&fmd->fmd_list);
541                         cfs_list_add_tail(&fmd->fmd_list, &fed->fed_mod_list);
542                         fmd->fmd_expire = jiffies + filter->fo_fmd_max_age;
543                         break;
544                 }
545         }
546
547         filter_fmd_expire_nolock(filter, fed, found);
548
549         return found;
550 }
551
552 /* Find fmd based on objid and group, or return NULL if not found. */
553 struct filter_mod_data *filter_fmd_find(struct obd_export *exp,
554                                         obd_id objid, obd_seq group)
555 {
556         struct filter_mod_data *fmd;
557
558         cfs_spin_lock(&exp->exp_filter_data.fed_lock);
559         fmd = filter_fmd_find_nolock(&exp->exp_obd->u.filter,
560                                      &exp->exp_filter_data, objid, group);
561         if (fmd)
562                 fmd->fmd_refcount++;    /* caller reference */
563         cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
564
565         return fmd;
566 }
567
568 /* Find fmd based on objid and group, or create a new one if none is found.
569  * It is possible for this function to return NULL under memory pressure,
570  * or if objid = 0 is passed (which will only cause old entries to expire).
571  * Currently this is not fatal because any fmd state is transient and
572  * may also be freed when it gets sufficiently old. */
573 struct filter_mod_data *filter_fmd_get(struct obd_export *exp,
574                                        obd_id objid, obd_seq group)
575 {
576         struct filter_export_data *fed = &exp->exp_filter_data;
577         struct filter_mod_data *found = NULL, *fmd_new = NULL;
578
579         OBD_SLAB_ALLOC_PTR_GFP(fmd_new, ll_fmd_cachep, CFS_ALLOC_IO);
580
581         cfs_spin_lock(&fed->fed_lock);
582         found = filter_fmd_find_nolock(&exp->exp_obd->u.filter,fed,objid,group);
583         if (fmd_new) {
584                 if (found == NULL) {
585                         cfs_list_add_tail(&fmd_new->fmd_list,
586                                           &fed->fed_mod_list);
587                         fmd_new->fmd_id = objid;
588                         fmd_new->fmd_gr = group;
589                         fmd_new->fmd_refcount++;   /* list reference */
590                         found = fmd_new;
591                         fed->fed_mod_count++;
592                 } else {
593                         OBD_SLAB_FREE(fmd_new, ll_fmd_cachep, sizeof(*fmd_new));
594                 }
595         }
596         if (found) {
597                 found->fmd_refcount++;          /* caller reference */
598                 found->fmd_expire = jiffies +
599                         exp->exp_obd->u.filter.fo_fmd_max_age;
600         }
601
602         cfs_spin_unlock(&fed->fed_lock);
603
604         return found;
605 }
606
607 #ifdef DO_FMD_DROP
608 /* drop fmd list reference so it will disappear when last reference is put.
609  * This isn't so critical because it would in fact only affect the one client
610  * that is doing the unlink and at worst we have an stale entry referencing
611  * an object that should never be used again. */
612 static void filter_fmd_drop(struct obd_export *exp, obd_id objid, obd_seq group)
613 {
614         struct filter_mod_data *found = NULL;
615
616         cfs_spin_lock(&exp->exp_filter_data.fed_lock);
617         found = filter_fmd_find_nolock(&exp->exp_filter_data, objid, group);
618         if (found) {
619                 cfs_list_del_init(&found->fmd_list);
620                 filter_fmd_put_nolock(&exp->exp_filter_data, found);
621         }
622         cfs_spin_unlock(&exp->exp_filter_data.fed_lock);
623 }
624 #else
625 #define filter_fmd_drop(exp, objid, group)
626 #endif
627
628 /* remove all entries from fmd list */
629 static void filter_fmd_cleanup(struct obd_export *exp)
630 {
631         struct filter_export_data *fed = &exp->exp_filter_data;
632         struct filter_mod_data *fmd = NULL, *tmp;
633
634         cfs_spin_lock(&fed->fed_lock);
635         cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
636                 cfs_list_del_init(&fmd->fmd_list);
637                 filter_fmd_put_nolock(fed, fmd);
638         }
639         cfs_spin_unlock(&fed->fed_lock);
640 }
641
642 static int filter_init_export(struct obd_export *exp)
643 {
644         int rc;
645         ENTRY;
646
647         cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
648         CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
649
650         cfs_spin_lock(&exp->exp_lock);
651         exp->exp_connecting = 1;
652         cfs_spin_unlock(&exp->exp_lock);
653
654         /* self-export doesn't need client data and ldlm initialization */
655         if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
656                                      &exp->exp_client_uuid)))
657                 RETURN(0);
658
659         rc = lut_client_alloc(exp);
660         if (rc == 0)
661                 rc = ldlm_init_export(exp);
662         if (rc)
663                 CERROR("%s: Can't initialize export: rc %d\n",
664                        exp->exp_obd->obd_name, rc);
665
666         RETURN(rc);
667 }
668
669 static int filter_free_server_data(struct obd_device_target *obt)
670 {
671         lut_fini(NULL, obt->obt_lut);
672         OBD_FREE_PTR(obt->obt_lut);
673         return 0;
674 }
675
676 /* assumes caller is already in kernel ctxt */
677 int filter_update_server_data(struct obd_device *obd)
678 {
679         struct file *filp = obd->u.obt.obt_rcvd_filp;
680         struct lr_server_data *lsd = class_server_data(obd);
681         loff_t off = 0;
682         int rc;
683         ENTRY;
684
685         CDEBUG(D_INODE, "server uuid      : %s\n", lsd->lsd_uuid);
686         CDEBUG(D_INODE, "server last_rcvd : "LPU64"\n",
687                le64_to_cpu(lsd->lsd_last_transno));
688         CDEBUG(D_INODE, "server last_mount: "LPU64"\n",
689                le64_to_cpu(lsd->lsd_mount_count));
690
691         rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off, 0);
692         if (rc)
693                 CERROR("error writing lr_server_data: rc = %d\n", rc);
694
695         RETURN(rc);
696 }
697
698 int filter_update_last_objid(struct obd_device *obd, obd_seq group,
699                              int force_sync)
700 {
701         struct filter_obd *filter = &obd->u.filter;
702         __u64 tmp;
703         loff_t off = 0;
704         int rc;
705         ENTRY;
706
707         if (filter->fo_last_objid_files[group] == NULL) {
708                 CERROR("Object seq "LPU64" not fully setup; not updating "
709                        "last_objid\n", group);
710                 RETURN(-EINVAL);
711         }
712
713         CDEBUG(D_INODE, "%s: server last_objid for "POSTID"\n",
714                obd->obd_name, filter->fo_last_objids[group], group);
715
716         tmp = cpu_to_le64(filter->fo_last_objids[group]);
717         rc = fsfilt_write_record(obd, filter->fo_last_objid_files[group],
718                                  &tmp, sizeof(tmp), &off, force_sync);
719         if (rc)
720                 CERROR("error writing seq "LPU64" last objid: rc = %d\n",
721                        group, rc);
722         RETURN(rc);
723 }
724 extern int ost_handle(struct ptlrpc_request *req);
725 /* assumes caller has already in kernel ctxt */
726 static int filter_init_server_data(struct obd_device *obd, struct file * filp)
727 {
728         struct filter_obd *filter = &obd->u.filter;
729         struct lr_server_data *lsd;
730         struct lsd_client_data *lcd = NULL;
731         struct inode *inode = filp->f_dentry->d_inode;
732         unsigned long last_rcvd_size = i_size_read(inode);
733         struct lu_target *lut;
734         __u64 mount_count;
735         __u32 start_epoch;
736         int cl_idx;
737         loff_t off = 0;
738         int rc;
739
740         /* ensure padding in the struct is the correct size */
741         CLASSERT (offsetof(struct lr_server_data, lsd_padding) +
742                  sizeof(lsd->lsd_padding) == LR_SERVER_SIZE);
743         CLASSERT (offsetof(struct lsd_client_data, lcd_padding) +
744                  sizeof(lcd->lcd_padding) == LR_CLIENT_SIZE);
745
746         /* allocate and initialize lu_target */
747         OBD_ALLOC_PTR(lut);
748         if (lut == NULL)
749                 RETURN(-ENOMEM);
750         rc = lut_init(NULL, lut, obd, NULL);
751         if (rc)
752                 GOTO(err_lut, rc);
753         lsd = class_server_data(obd);
754         if (last_rcvd_size == 0) {
755                 LCONSOLE_WARN("%s: new disk, initializing\n", obd->obd_name);
756
757                 memcpy(lsd->lsd_uuid, obd->obd_uuid.uuid,sizeof(lsd->lsd_uuid));
758                 lsd->lsd_last_transno = 0;
759                 mount_count = lsd->lsd_mount_count = 0;
760                 lsd->lsd_server_size = cpu_to_le32(LR_SERVER_SIZE);
761                 lsd->lsd_client_start = cpu_to_le32(LR_CLIENT_START);
762                 lsd->lsd_client_size = cpu_to_le16(LR_CLIENT_SIZE);
763                 lsd->lsd_subdir_count = cpu_to_le16(FILTER_SUBDIR_COUNT);
764                 filter->fo_subdir_count = FILTER_SUBDIR_COUNT;
765                 /* OBD_COMPAT_OST is set in filter_connect_internal when the
766                  * MDS first connects and assigns the OST index number. */
767                 lsd->lsd_feature_incompat = cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
768                                                         OBD_INCOMPAT_OST);
769         } else {
770                 rc = fsfilt_read_record(obd, filp, lsd, sizeof(*lsd), &off);
771                 if (rc) {
772                         CDEBUG(D_INODE,"OBD filter: error reading %s: rc %d\n",
773                                LAST_RCVD, rc);
774                         GOTO(err_lut, rc);
775                 }
776                 if (strcmp(lsd->lsd_uuid, obd->obd_uuid.uuid) != 0) {
777                         LCONSOLE_ERROR_MSG(0x134, "Trying to start OBD %s "
778                                            "using the wrong disk %s. Were the "
779                                            "/dev/ assignments rearranged?\n",
780                                            obd->obd_uuid.uuid, lsd->lsd_uuid);
781                         GOTO(err_lut, rc = -EINVAL);
782                 }
783                 mount_count = le64_to_cpu(lsd->lsd_mount_count);
784                 filter->fo_subdir_count = le16_to_cpu(lsd->lsd_subdir_count);
785                 /* COMPAT_146 */
786                 /* Assume old last_rcvd format unless I_C_LR is set */
787                 if (!(lsd->lsd_feature_incompat &
788                       cpu_to_le32(OBD_INCOMPAT_COMMON_LR)))
789                         lsd->lsd_last_transno = lsd->lsd_compat14;
790                 /* end COMPAT_146 */
791                 /* OBD_COMPAT_OST is set in filter_connect_internal when the
792                  * MDS first connects and assigns the OST index number. */
793                 lsd->lsd_feature_incompat |= cpu_to_le32(OBD_INCOMPAT_COMMON_LR|
794                                                          OBD_INCOMPAT_OST);
795         }
796
797         if (lsd->lsd_feature_incompat & ~cpu_to_le32(FILTER_INCOMPAT_SUPP)) {
798                 CERROR("%s: unsupported incompat filesystem feature(s) %x\n",
799                        obd->obd_name, le32_to_cpu(lsd->lsd_feature_incompat) &
800                        ~FILTER_INCOMPAT_SUPP);
801                 GOTO(err_lut, rc = -EINVAL);
802         }
803         if (lsd->lsd_feature_rocompat & ~cpu_to_le32(FILTER_ROCOMPAT_SUPP)) {
804                 CERROR("%s: unsupported read-only filesystem feature(s) %x\n",
805                        obd->obd_name, le32_to_cpu(lsd->lsd_feature_rocompat) &
806                        ~FILTER_ROCOMPAT_SUPP);
807                 /* Do something like remount filesystem read-only */
808                 GOTO(err_lut, rc = -EINVAL);
809         }
810
811         start_epoch = le32_to_cpu(lsd->lsd_start_epoch);
812
813         CDEBUG(D_INODE, "%s: server start_epoch : %#x\n",
814                obd->obd_name, start_epoch);
815         CDEBUG(D_INODE, "%s: server last_transno : "LPX64"\n",
816                obd->obd_name, le64_to_cpu(lsd->lsd_last_transno));
817         CDEBUG(D_INODE, "%s: server mount_count: "LPU64"\n",
818                obd->obd_name, mount_count + 1);
819         CDEBUG(D_INODE, "%s: server data size: %u\n",
820                obd->obd_name, le32_to_cpu(lsd->lsd_server_size));
821         CDEBUG(D_INODE, "%s: per-client data start: %u\n",
822                obd->obd_name, le32_to_cpu(lsd->lsd_client_start));
823         CDEBUG(D_INODE, "%s: per-client data size: %u\n",
824                obd->obd_name, le32_to_cpu(lsd->lsd_client_size));
825         CDEBUG(D_INODE, "%s: server subdir_count: %u\n",
826                obd->obd_name, le16_to_cpu(lsd->lsd_subdir_count));
827         CDEBUG(D_INODE, "%s: last_rcvd clients: %lu\n", obd->obd_name,
828                last_rcvd_size <= le32_to_cpu(lsd->lsd_client_start) ? 0 :
829                (last_rcvd_size - le32_to_cpu(lsd->lsd_client_start)) /
830                 le16_to_cpu(lsd->lsd_client_size));
831
832         if (!obd->obd_replayable) {
833                 CWARN("%s: recovery support OFF\n", obd->obd_name);
834                 GOTO(out, rc = 0);
835         }
836
837         OBD_ALLOC_PTR(lcd);
838         if (!lcd)
839                 GOTO(err_client, rc = -ENOMEM);
840
841         for (cl_idx = 0, off = le32_to_cpu(lsd->lsd_client_start);
842              off < last_rcvd_size; cl_idx++) {
843                 __u64 last_rcvd;
844                 struct obd_export *exp;
845                 struct filter_export_data *fed;
846
847                 /* Don't assume off is incremented properly by
848                  * fsfilt_read_record(), in case sizeof(*lcd)
849                  * isn't the same as lsd->lsd_client_size.  */
850                 off = le32_to_cpu(lsd->lsd_client_start) +
851                         cl_idx * le16_to_cpu(lsd->lsd_client_size);
852                 rc = fsfilt_read_record(obd, filp, lcd, sizeof(*lcd), &off);
853                 if (rc) {
854                         CERROR("error reading FILT %s idx %d off %llu: rc %d\n",
855                                LAST_RCVD, cl_idx, off, rc);
856                         break; /* read error shouldn't cause startup to fail */
857                 }
858
859                 if (lcd->lcd_uuid[0] == '\0') {
860                         CDEBUG(D_INFO, "skipping zeroed client at offset %d\n",
861                                cl_idx);
862                         continue;
863                 }
864
865                 check_lcd(obd->obd_name, cl_idx, lcd);
866
867                 last_rcvd = le64_to_cpu(lcd->lcd_last_transno);
868
869                 CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: "LPU64
870                        " srv lr: "LPU64"\n", lcd->lcd_uuid, cl_idx,
871                        last_rcvd, le64_to_cpu(lsd->lsd_last_transno));
872
873                 /* These exports are cleaned up by filter_disconnect(), so they
874                  * need to be set up like real exports as filter_connect() does.
875                  */
876                 exp = class_new_export(obd, (struct obd_uuid *)lcd->lcd_uuid);
877                 if (IS_ERR(exp)) {
878                         if (PTR_ERR(exp) == -EALREADY) {
879                                 /* export already exists, zero out this one */
880                                 CERROR("Duplicate export %s!\n", lcd->lcd_uuid);
881                                 continue;
882                         }
883                         OBD_FREE_PTR(lcd);
884                         GOTO(err_client, rc = PTR_ERR(exp));
885                 }
886
887                 fed = &exp->exp_filter_data;
888                 *fed->fed_ted.ted_lcd = *lcd;
889                 fed->fed_group = 0; /* will be assigned at connect */
890                 filter_export_stats_init(obd, exp, NULL);
891                 rc = filter_client_add(obd, exp, cl_idx);
892                 /* can't fail for existing client */
893                 LASSERTF(rc == 0, "rc = %d\n", rc);
894
895                 /* VBR: set export last committed */
896                 exp->exp_last_committed = last_rcvd;
897                 cfs_spin_lock(&exp->exp_lock);
898                 exp->exp_connecting = 0;
899                 exp->exp_in_recovery = 0;
900                 cfs_spin_unlock(&exp->exp_lock);
901                 obd->obd_max_recoverable_clients++;
902                 class_export_put(exp);
903
904                 if (last_rcvd > le64_to_cpu(lsd->lsd_last_transno))
905                         lsd->lsd_last_transno = cpu_to_le64(last_rcvd);
906         }
907         OBD_FREE_PTR(lcd);
908
909         obd->obd_last_committed = le64_to_cpu(lsd->lsd_last_transno);
910 out:
911         obd->u.obt.obt_mount_count = mount_count + 1;
912         obd->u.obt.obt_instance = (__u32)obd->u.obt.obt_mount_count;
913         lsd->lsd_mount_count = cpu_to_le64(obd->u.obt.obt_mount_count);
914
915         /* save it, so mount count and last_transno is current */
916         rc = filter_update_server_data(obd);
917         if (rc)
918                 GOTO(err_client, rc);
919
920         RETURN(0);
921
922 err_client:
923         class_disconnect_exports(obd);
924 err_lut:
925         filter_free_server_data(&obd->u.obt);
926         RETURN(rc);
927 }
928
929 static int filter_cleanup_groups(struct obd_device *obd)
930 {
931         struct filter_obd *filter = &obd->u.filter;
932         struct file *filp;
933         struct dentry *dentry;
934         int i, j;
935         ENTRY;
936
937         if (filter->fo_dentry_O_groups != NULL) {
938                 for (i = 0; i < filter->fo_group_count; i++) {
939                         dentry = filter->fo_dentry_O_groups[i];
940                         if (dentry != NULL)
941                                 f_dput(dentry);
942                 }
943                 OBD_FREE(filter->fo_dentry_O_groups,
944                          filter->fo_group_count *
945                          sizeof(*filter->fo_dentry_O_groups));
946                 filter->fo_dentry_O_groups = NULL;
947         }
948         if (filter->fo_last_objid_files != NULL) {
949                 for (i = 0; i < filter->fo_group_count; i++) {
950                         filp = filter->fo_last_objid_files[i];
951                         if (filp != NULL)
952                                 filp_close(filp, 0);
953                 }
954                 OBD_FREE(filter->fo_last_objid_files,
955                          filter->fo_group_count *
956                          sizeof(*filter->fo_last_objid_files));
957                 filter->fo_last_objid_files = NULL;
958         }
959         if (filter->fo_dentry_O_sub != NULL) {
960                 for (i = 0; i < filter->fo_group_count; i++) {
961                         for (j = 0; j < filter->fo_subdir_count; j++) {
962                                 dentry = filter->fo_dentry_O_sub[i].dentry[j];
963                                 if (dentry != NULL)
964                                         f_dput(dentry);
965                         }
966                 }
967                 OBD_FREE(filter->fo_dentry_O_sub,
968                          filter->fo_group_count *
969                          sizeof(*filter->fo_dentry_O_sub));
970                 filter->fo_dentry_O_sub = NULL;
971         }
972         if (filter->fo_last_objids != NULL) {
973                 OBD_FREE(filter->fo_last_objids,
974                          filter->fo_group_count *
975                          sizeof(*filter->fo_last_objids));
976                 filter->fo_last_objids = NULL;
977         }
978         if (filter->fo_dentry_O != NULL) {
979                 f_dput(filter->fo_dentry_O);
980                 filter->fo_dentry_O = NULL;
981         }
982         RETURN(0);
983 }
984
985 static int filter_update_last_group(struct obd_device *obd, int group)
986 {
987         struct filter_obd *filter = &obd->u.filter;
988         struct file *filp = NULL;
989         int last_group = 0, rc;
990         loff_t off = 0;
991         ENTRY;
992
993         if (group <= filter->fo_committed_group)
994                 RETURN(0);
995
996         filp = filp_open("LAST_GROUP", O_RDWR, 0700);
997         if (IS_ERR(filp)) {
998                 rc = PTR_ERR(filp);
999                 filp = NULL;
1000                 CERROR("cannot open LAST_GROUP: rc = %d\n", rc);
1001                 GOTO(cleanup, rc);
1002         }
1003
1004         rc = fsfilt_read_record(obd, filp, &last_group, sizeof(__u32), &off);
1005         if (rc) {
1006                 CDEBUG(D_INODE, "error reading LAST_GROUP: rc %d\n",rc);
1007                 GOTO(cleanup, rc);
1008         }
1009
1010         CDEBUG(D_INODE, "%s: previous %d, new %d\n",
1011                obd->obd_name, last_group, group);
1012
1013         off = 0;
1014         last_group = group;
1015         /* must be sync: bXXXX */
1016         rc = fsfilt_write_record(obd, filp, &last_group, sizeof(__u32), &off, 1);
1017         if (rc) {
1018                 CDEBUG(D_INODE, "error updating LAST_GROUP: rc %d\n", rc);
1019                 GOTO(cleanup, rc);
1020         }
1021
1022         filter->fo_committed_group = group;
1023 cleanup:
1024         if (filp)
1025                 filp_close(filp, 0);
1026         RETURN(rc);
1027 }
1028
1029 static int filter_read_group_internal(struct obd_device *obd, int group,
1030                                       int create)
1031 {
1032         struct filter_obd *filter = &obd->u.filter;
1033         __u64 *new_objids = NULL;
1034         struct filter_subdirs *new_subdirs = NULL, *tmp_subdirs = NULL;
1035         struct dentry **new_groups = NULL;
1036         struct file **new_files = NULL;
1037         struct dentry *dentry;
1038         struct file *filp;
1039         int old_count = filter->fo_group_count, rc, stage = 0, i;
1040         char name[25];
1041         __u64 last_objid;
1042         loff_t off = 0;
1043         int len = group + 1;
1044
1045         snprintf(name, 24, "%d", group);
1046         name[24] = '\0';
1047
1048         if (!create) {
1049                 dentry = ll_lookup_one_len(name, filter->fo_dentry_O,
1050                                            strlen(name));
1051                 if (IS_ERR(dentry)) {
1052                         CERROR("Cannot lookup expected object group %d: %ld\n",
1053                                group, PTR_ERR(dentry));
1054                         RETURN(PTR_ERR(dentry));
1055                 }
1056         } else {
1057                 dentry = simple_mkdir(filter->fo_dentry_O,
1058                                       obd->u.obt.obt_vfsmnt, name, 0700, 1);
1059                 if (IS_ERR(dentry)) {
1060                         CERROR("cannot lookup/create O/%s: rc = %ld\n", name,
1061                                PTR_ERR(dentry));
1062                         RETURN(PTR_ERR(dentry));
1063                 }
1064         }
1065         stage = 1;
1066
1067         snprintf(name, 24, "O/%d/LAST_ID", group);
1068         name[24] = '\0';
1069         filp = filp_open(name, O_CREAT | O_RDWR, 0700);
1070         if (IS_ERR(filp)) {
1071                 CERROR("cannot create %s: rc = %ld\n", name, PTR_ERR(filp));
1072                 GOTO(cleanup, rc = PTR_ERR(filp));
1073         }
1074         stage = 2;
1075
1076         rc = fsfilt_read_record(obd, filp, &last_objid, sizeof(__u64), &off);
1077         if (rc) {
1078                 CDEBUG(D_INODE, "error reading %s: rc %d\n", name, rc);
1079                 GOTO(cleanup, rc);
1080         }
1081
1082         if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1083                 OBD_ALLOC(tmp_subdirs, sizeof(*tmp_subdirs));
1084                 if (tmp_subdirs == NULL)
1085                         GOTO(cleanup, rc = -ENOMEM);
1086                 stage = 3;
1087
1088                 for (i = 0; i < filter->fo_subdir_count; i++) {
1089                         char dir[20];
1090                         snprintf(dir, sizeof(dir), "d%u", i);
1091
1092                         tmp_subdirs->dentry[i] = simple_mkdir(dentry,
1093                                                               obd->u.obt.obt_vfsmnt,
1094                                                               dir, 0700, 1);
1095                         if (IS_ERR(tmp_subdirs->dentry[i])) {
1096                                 rc = PTR_ERR(tmp_subdirs->dentry[i]);
1097                                 CERROR("can't lookup/create O/%d/%s: rc = %d\n",
1098                                        group, dir, rc);
1099                                 GOTO(cleanup, rc);
1100                         }
1101
1102                         CDEBUG(D_INODE, "got/created O/%d/%s: %p\n", group, dir,
1103                                tmp_subdirs->dentry[i]);
1104                 }
1105         }
1106
1107         /* 'group' is an index; we need an array of length 'group + 1' */
1108         if (group + 1 > old_count) {
1109                 OBD_ALLOC(new_objids, len * sizeof(*new_objids));
1110                 OBD_ALLOC(new_subdirs, len * sizeof(*new_subdirs));
1111                 OBD_ALLOC(new_groups, len * sizeof(*new_groups));
1112                 OBD_ALLOC(new_files, len * sizeof(*new_files));
1113                 stage = 4;
1114                 if (new_objids == NULL || new_subdirs == NULL ||
1115                     new_groups == NULL || new_files == NULL)
1116                         GOTO(cleanup, rc = -ENOMEM);
1117
1118                 if (old_count) {
1119                         memcpy(new_objids, filter->fo_last_objids,
1120                                old_count * sizeof(*new_objids));
1121                         memcpy(new_subdirs, filter->fo_dentry_O_sub,
1122                                old_count * sizeof(*new_subdirs));
1123                         memcpy(new_groups, filter->fo_dentry_O_groups,
1124                                old_count * sizeof(*new_groups));
1125                         memcpy(new_files, filter->fo_last_objid_files,
1126                                old_count * sizeof(*new_files));
1127
1128                         OBD_FREE(filter->fo_last_objids,
1129                                  old_count * sizeof(*new_objids));
1130                         OBD_FREE(filter->fo_dentry_O_sub,
1131                                  old_count * sizeof(*new_subdirs));
1132                         OBD_FREE(filter->fo_dentry_O_groups,
1133                                  old_count * sizeof(*new_groups));
1134                         OBD_FREE(filter->fo_last_objid_files,
1135                                  old_count * sizeof(*new_files));
1136                 }
1137                 filter->fo_last_objids = new_objids;
1138                 filter->fo_dentry_O_sub = new_subdirs;
1139                 filter->fo_dentry_O_groups = new_groups;
1140                 filter->fo_last_objid_files = new_files;
1141                 filter->fo_group_count = len;
1142         }
1143
1144         filter->fo_dentry_O_groups[group] = dentry;
1145         filter->fo_last_objid_files[group] = filp;
1146         if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1147                 filter->fo_dentry_O_sub[group] = *tmp_subdirs;
1148                 OBD_FREE(tmp_subdirs, sizeof(*tmp_subdirs));
1149         }
1150
1151         filter_update_last_group(obd, group);
1152
1153         if (i_size_read(filp->f_dentry->d_inode) == 0) {
1154                 filter->fo_last_objids[group] = FILTER_INIT_OBJID;
1155                 rc = filter_update_last_objid(obd, group, 1);
1156                 RETURN(rc);
1157         }
1158
1159         filter->fo_last_objids[group] = le64_to_cpu(last_objid);
1160         CDEBUG(D_INODE, "%s: server last_objid group %d: "LPU64"\n",
1161                obd->obd_name, group, last_objid);
1162         RETURN(0);
1163  cleanup:
1164         switch (stage) {
1165         case 4:
1166                 if (new_objids != NULL)
1167                         OBD_FREE(new_objids, len * sizeof(*new_objids));
1168                 if (new_subdirs != NULL)
1169                         OBD_FREE(new_subdirs, len * sizeof(*new_subdirs));
1170                 if (new_groups != NULL)
1171                         OBD_FREE(new_groups, len * sizeof(*new_groups));
1172                 if (new_files != NULL)
1173                         OBD_FREE(new_files, len * sizeof(*new_files));
1174         case 3:
1175                 if (filter->fo_subdir_count && fid_seq_is_mdt(group)) {
1176                         for (i = 0; i < filter->fo_subdir_count; i++) {
1177                                 if (tmp_subdirs->dentry[i] != NULL)
1178                                         dput(tmp_subdirs->dentry[i]);
1179                         }
1180                         OBD_FREE(tmp_subdirs, sizeof(*tmp_subdirs));
1181                 }
1182         case 2:
1183                 filp_close(filp, 0);
1184         case 1:
1185                 dput(dentry);
1186         }
1187         RETURN(rc);
1188 }
1189
1190 static int filter_read_groups(struct obd_device *obd, int last_group,
1191                               int create)
1192 {
1193         struct filter_obd *filter = &obd->u.filter;
1194         int old_count, group, rc = 0;
1195
1196         cfs_mutex_lock(&filter->fo_init_lock);
1197         old_count = filter->fo_group_count;
1198         for (group = old_count; group <= last_group; group++) {
1199                 rc = filter_read_group_internal(obd, group, create);
1200                 if (rc != 0)
1201                         break;
1202         }
1203         cfs_mutex_unlock(&filter->fo_init_lock);
1204         return rc;
1205 }
1206
1207 /* FIXME: object groups */
1208 static int filter_prep_groups(struct obd_device *obd)
1209 {
1210         struct filter_obd *filter = &obd->u.filter;
1211         struct dentry *O_dentry;
1212         struct file *filp;
1213         int    last_group, rc = 0, cleanup_phase = 0;
1214         loff_t off = 0;
1215         ENTRY;
1216
1217         O_dentry = simple_mkdir(cfs_fs_pwd(current->fs), obd->u.obt.obt_vfsmnt,
1218                                 "O", 0700, 1);
1219         CDEBUG(D_INODE, "%s: got/created O: %p\n", obd->obd_name, O_dentry);
1220         if (IS_ERR(O_dentry)) {
1221                 rc = PTR_ERR(O_dentry);
1222                 CERROR("%s: cannot open/create O: rc = %d\n", obd->obd_name,rc);
1223                 GOTO(cleanup, rc);
1224         }
1225         filter->fo_dentry_O = O_dentry;
1226         cleanup_phase = 1; /* O_dentry */
1227
1228         /* we have to initialize all groups before first connections from
1229          * clients because they may send create/destroy for any group -bzzz */
1230         filp = filp_open("LAST_GROUP", O_CREAT | O_RDWR, 0700);
1231         if (IS_ERR(filp)) {
1232                 CERROR("%s: cannot create LAST_GROUP: rc = %ld\n",
1233                        obd->obd_name, PTR_ERR(filp));
1234                 GOTO(cleanup, rc = PTR_ERR(filp));
1235         }
1236         cleanup_phase = 2; /* filp */
1237
1238         rc = fsfilt_read_record(obd, filp, &last_group, sizeof(__u32), &off);
1239         if (rc) {
1240                 CERROR("%s: error reading LAST_GROUP: rc %d\n",
1241                        obd->obd_name, rc);
1242                 GOTO(cleanup, rc);
1243         }
1244
1245         if (off == 0)
1246                 last_group = FID_SEQ_OST_MDT0;
1247
1248         CDEBUG(D_INODE, "%s: initialize group %u (max %u)\n", obd->obd_name,
1249                FID_SEQ_OST_MDT0, last_group);
1250         filter->fo_committed_group = last_group;
1251         rc = filter_read_groups(obd, last_group, 1);
1252         if (rc)
1253                 GOTO(cleanup, rc);
1254
1255         filp_close(filp, 0);
1256         RETURN(0);
1257
1258  cleanup:
1259         switch (cleanup_phase) {
1260         case 2:
1261                 filp_close(filp, 0);
1262         case 1:
1263                 filter_cleanup_groups(obd);
1264                 f_dput(filter->fo_dentry_O);
1265                 filter->fo_dentry_O = NULL;
1266         default:
1267                 break;
1268         }
1269         return rc;
1270
1271 }
1272
1273 /* setup the object store with correct subdirectories */
1274 static int filter_prep(struct obd_device *obd)
1275 {
1276         struct lvfs_run_ctxt saved;
1277         struct filter_obd *filter = &obd->u.filter;
1278         struct file *file;
1279         struct inode *inode;
1280         int rc = 0;
1281         ENTRY;
1282
1283         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1284         file = filp_open(LAST_RCVD, O_RDWR | O_CREAT | O_LARGEFILE, 0700);
1285         if (!file || IS_ERR(file)) {
1286                 rc = PTR_ERR(file);
1287                 CERROR("OBD filter: cannot open/create %s: rc = %d\n",
1288                        LAST_RCVD, rc);
1289                 GOTO(out, rc);
1290         }
1291         obd->u.obt.obt_rcvd_filp = file;
1292         if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1293                 CERROR("%s is not a regular file!: mode = %o\n", LAST_RCVD,
1294                        file->f_dentry->d_inode->i_mode);
1295                 GOTO(err_filp, rc = -ENOENT);
1296         }
1297
1298         inode = file->f_dentry->d_parent->d_inode;
1299         /* We use i_op->unlink directly in filter_vfs_unlink() */
1300         if (!inode->i_op || !inode->i_op->create || !inode->i_op->unlink) {
1301                 CERROR("%s: filesystem does not support create/unlink ops\n",
1302                        obd->obd_name);
1303                 GOTO(err_filp, rc = -EOPNOTSUPP);
1304         }
1305
1306         rc = filter_init_server_data(obd, file);
1307         if (rc) {
1308                 CERROR("cannot read %s: rc = %d\n", LAST_RCVD, rc);
1309                 GOTO(err_filp, rc);
1310         }
1311         LASSERT(obd->u.obt.obt_lut);
1312         target_recovery_init(obd->u.obt.obt_lut, ost_handle);
1313
1314         /* open and create health check io file*/
1315         file = filp_open(HEALTH_CHECK, O_RDWR | O_CREAT, 0644);
1316         if (IS_ERR(file)) {
1317                 rc = PTR_ERR(file);
1318                 CERROR("OBD filter: cannot open/create %s rc = %d\n",
1319                        HEALTH_CHECK, rc);
1320                 GOTO(err_server_data, rc);
1321         }
1322         filter->fo_obt.obt_health_check_filp = file;
1323         if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
1324                 CERROR("%s is not a regular file!: mode = %o\n", HEALTH_CHECK,
1325                        file->f_dentry->d_inode->i_mode);
1326                 GOTO(err_health_check, rc = -ENOENT);
1327         }
1328         rc = lvfs_check_io_health(obd, file);
1329         if (rc)
1330                 GOTO(err_health_check, rc);
1331
1332         rc = filter_prep_groups(obd);
1333         if (rc)
1334                 GOTO(err_health_check, rc);
1335 out:
1336         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1337
1338         return(rc);
1339
1340 err_health_check:
1341         if (filp_close(filter->fo_obt.obt_health_check_filp, 0))
1342                 CERROR("can't close %s after error\n", HEALTH_CHECK);
1343         filter->fo_obt.obt_health_check_filp = NULL;
1344 err_server_data:
1345         target_recovery_fini(obd);
1346         filter_free_server_data(&obd->u.obt);
1347 err_filp:
1348         if (filp_close(obd->u.obt.obt_rcvd_filp, 0))
1349                 CERROR("can't close %s after error\n", LAST_RCVD);
1350         obd->u.obt.obt_rcvd_filp = NULL;
1351         goto out;
1352 }
1353
1354 /* cleanup the filter: write last used object id to status file */
1355 static void filter_post(struct obd_device *obd)
1356 {
1357         struct lvfs_run_ctxt saved;
1358         struct filter_obd *filter = &obd->u.filter;
1359         int rc, i;
1360
1361         /* XXX: filter_update_lastobjid used to call fsync_dev.  It might be
1362          * best to start a transaction with h_sync, because we removed this
1363          * from lastobjid */
1364
1365         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1366         rc = filter_update_server_data(obd);
1367         if (rc)
1368                 CERROR("error writing server data: rc = %d\n", rc);
1369
1370         for (i = 0; i < filter->fo_group_count; i++) {
1371                 rc = filter_update_last_objid(obd, i,
1372                                 (i == filter->fo_group_count - 1));
1373                 if (rc)
1374                         CERROR("error writing group %d lastobjid: rc = %d\n",
1375                                i, rc);
1376         }
1377
1378         rc = filp_close(obd->u.obt.obt_rcvd_filp, 0);
1379         obd->u.obt.obt_rcvd_filp = NULL;
1380         if (rc)
1381                 CERROR("error closing %s: rc = %d\n", LAST_RCVD, rc);
1382
1383         rc = filp_close(filter->fo_obt.obt_health_check_filp, 0);
1384         filter->fo_obt.obt_health_check_filp = NULL;
1385         if (rc)
1386                 CERROR("error closing %s: rc = %d\n", HEALTH_CHECK, rc);
1387
1388         filter_cleanup_groups(obd);
1389         filter_free_server_data(&obd->u.obt);
1390         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1391
1392         filter_free_capa_keys(filter);
1393         cleanup_capa_hash(filter->fo_capa_hash);
1394 }
1395
1396 static void filter_set_last_id(struct filter_obd *filter,
1397                                obd_id id, obd_seq group)
1398 {
1399         LASSERT(group <= filter->fo_group_count);
1400
1401         cfs_spin_lock(&filter->fo_objidlock);
1402         filter->fo_last_objids[group] = id;
1403         cfs_spin_unlock(&filter->fo_objidlock);
1404 }
1405
1406 obd_id filter_last_id(struct filter_obd *filter, obd_seq group)
1407 {
1408         obd_id id;
1409         LASSERT(group <= filter->fo_group_count);
1410         LASSERT(filter->fo_last_objids != NULL);
1411
1412         /* FIXME: object groups */
1413         cfs_spin_lock(&filter->fo_objidlock);
1414         id = filter->fo_last_objids[group];
1415         cfs_spin_unlock(&filter->fo_objidlock);
1416         return id;
1417 }
1418
1419 static int filter_lock_dentry(struct obd_device *obd, struct dentry *dparent)
1420 {
1421         mutex_lock_nested(&dparent->d_inode->i_mutex, I_MUTEX_PARENT);
1422         return 0;
1423 }
1424
1425 /* We never dget the object parent, so DON'T dput it either */
1426 struct dentry *filter_parent(struct obd_device *obd, obd_seq group, obd_id objid)
1427 {
1428         struct filter_obd *filter = &obd->u.filter;
1429         struct filter_subdirs *subdirs;
1430
1431         if (group >= filter->fo_group_count) /* FIXME: object groups */
1432                 return ERR_PTR(-EBADF);
1433
1434         if (!fid_seq_is_mdt(group) || filter->fo_subdir_count == 0)
1435                 return filter->fo_dentry_O_groups[group];
1436
1437         subdirs = &filter->fo_dentry_O_sub[group];
1438         return subdirs->dentry[objid & (filter->fo_subdir_count - 1)];
1439 }
1440
1441 /* We never dget the object parent, so DON'T dput it either */
1442 struct dentry *filter_parent_lock(struct obd_device *obd, obd_seq group,
1443                                   obd_id objid)
1444 {
1445         unsigned long now = jiffies;
1446         struct dentry *dparent = filter_parent(obd, group, objid);
1447         int rc;
1448
1449         if (IS_ERR(dparent))
1450                 return dparent;
1451         if (dparent == NULL)
1452                 return ERR_PTR(-ENOENT);
1453
1454         rc = filter_lock_dentry(obd, dparent);
1455         fsfilt_check_slow(obd, now, "parent lock");
1456         return rc ? ERR_PTR(rc) : dparent;
1457 }
1458
1459 /* We never dget the object parent, so DON'T dput it either */
1460 static void filter_parent_unlock(struct dentry *dparent)
1461 {
1462         mutex_unlock(&dparent->d_inode->i_mutex);
1463 }
1464
1465 /* How to get files, dentries, inodes from object id's.
1466  *
1467  * If dir_dentry is passed, the caller has already locked the parent
1468  * appropriately for this operation (normally a write lock).  If
1469  * dir_dentry is NULL, we do a read lock while we do the lookup to
1470  * avoid races with create/destroy and such changing the directory
1471  * internal to the filesystem code. */
1472 struct dentry *filter_fid2dentry(struct obd_device *obd,
1473                                  struct dentry *dir_dentry,
1474                                  obd_seq group, obd_id id)
1475 {
1476         struct dentry *dparent = dir_dentry;
1477         struct dentry *dchild;
1478         char name[32];
1479         int len;
1480         ENTRY;
1481
1482         if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT) &&
1483             obd->u.filter.fo_destroys_in_progress == 0) {
1484                 /* don't fail lookups for orphan recovery, it causes
1485                  * later LBUGs when objects still exist during precreate */
1486                 CDEBUG(D_INFO, "*** cfs_fail_loc=%x ***\n",OBD_FAIL_OST_ENOENT);
1487                 RETURN(ERR_PTR(-ENOENT));
1488         }
1489         if (id == 0) {
1490                 CERROR("fatal: invalid object id 0\n");
1491                 RETURN(ERR_PTR(-ESTALE));
1492         }
1493
1494         len = sprintf(name, LPU64, id);
1495         if (dir_dentry == NULL) {
1496                 dparent = filter_parent_lock(obd, group, id);
1497                 if (IS_ERR(dparent)) {
1498                         CERROR("%s: error getting object "POSTID
1499                                " parent: rc %ld\n", obd->obd_name,
1500                                id, group, PTR_ERR(dparent));
1501                         RETURN(dparent);
1502                 }
1503         }
1504         CDEBUG(D_INODE, "looking up object O/%.*s/%s\n",
1505                dparent->d_name.len, dparent->d_name.name, name);
1506         /* dparent is already locked here, so we cannot use ll_lookup_one_len() */
1507         dchild = lookup_one_len(name, dparent, len);
1508         if (dir_dentry == NULL)
1509                 filter_parent_unlock(dparent);
1510         if (IS_ERR(dchild)) {
1511                 CERROR("%s: object "LPU64":"LPU64" lookup error: rc %ld\n",
1512                        obd->obd_name, id, group, PTR_ERR(dchild));
1513                 RETURN(dchild);
1514         }
1515
1516         if (dchild->d_inode != NULL && is_bad_inode(dchild->d_inode)) {
1517                 CERROR("%s: got bad object "LPU64" inode %lu\n",
1518                        obd->obd_name, id, dchild->d_inode->i_ino);
1519                 f_dput(dchild);
1520                 RETURN(ERR_PTR(-ENOENT));
1521         }
1522
1523         CDEBUG(D_INODE, "got child objid %s: %p, count = %d\n",
1524                name, dchild, atomic_read(&dchild->d_count));
1525
1526         LASSERT(atomic_read(&dchild->d_count) > 0);
1527
1528         RETURN(dchild);
1529 }
1530
1531 static int filter_prepare_destroy(struct obd_device *obd, obd_id objid,
1532                                   obd_id group, struct lustre_handle *lockh)
1533 {
1534         int flags = LDLM_AST_DISCARD_DATA, rc;
1535         struct ldlm_res_id res_id;
1536         ldlm_policy_data_t policy = { .l_extent = { 0, OBD_OBJECT_EOF } };
1537         ENTRY;
1538
1539         osc_build_res_name(objid, group, &res_id);
1540         /* Tell the clients that the object is gone now and that they should
1541          * throw away any cached pages. */
1542         rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_EXTENT,
1543                                     &policy, LCK_PW, &flags, ldlm_blocking_ast,
1544                                     ldlm_completion_ast, NULL, NULL, 0, NULL,
1545                                     lockh);
1546         if (rc != ELDLM_OK)
1547                 lockh->cookie = 0;
1548         RETURN(rc);
1549 }
1550
1551 static void filter_fini_destroy(struct obd_device *obd,
1552                                 struct lustre_handle *lockh)
1553 {
1554         if (lustre_handle_is_used(lockh))
1555                 ldlm_lock_decref(lockh, LCK_PW);
1556 }
1557
1558 /* This is vfs_unlink() without down(i_sem).  If we call regular vfs_unlink()
1559  * we have 2.6 lock ordering issues with filter_commitrw_write() as it takes
1560  * i_sem before starting a handle, while filter_destroy() + vfs_unlink do the
1561  * reverse.  Caller must take i_sem before starting the transaction and we
1562  * drop it here before the inode is removed from the dentry.  bug 4180/6984 */
1563 int filter_vfs_unlink(struct inode *dir, struct dentry *dentry,
1564                       struct vfsmount *mnt)
1565 {
1566         int rc;
1567         ENTRY;
1568
1569         /* don't need dir->i_zombie for 2.4, it is for rename/unlink of dir
1570          * itself we already hold dir->i_mutex for child create/unlink ops */
1571         LASSERT(dentry->d_inode != NULL);
1572         LASSERT(mutex_trylock(&dir->i_mutex) == 0);
1573         LASSERT(mutex_trylock(&dentry->d_inode->i_mutex) == 0);
1574
1575
1576         /* may_delete() */
1577         if (/*!dentry->d_inode ||*/dentry->d_parent->d_inode != dir)
1578                 GOTO(out, rc = -ENOENT);
1579
1580         rc = ll_permission(dir, MAY_WRITE | MAY_EXEC, NULL);
1581         if (rc)
1582                 GOTO(out, rc);
1583
1584         if (IS_APPEND(dir))
1585                 GOTO(out, rc = -EPERM);
1586
1587         /* check_sticky() */
1588         if ((dentry->d_inode->i_uid != cfs_curproc_fsuid() &&
1589              !cfs_capable(CFS_CAP_FOWNER)) || IS_APPEND(dentry->d_inode) ||
1590             IS_IMMUTABLE(dentry->d_inode))
1591                 GOTO(out, rc = -EPERM);
1592
1593         /* Locking order: i_mutex -> journal_lock -> dqptr_sem. LU-952 */
1594         ll_vfs_dq_init(dir);
1595
1596         rc = ll_security_inode_unlink(dir, dentry, mnt);
1597         if (rc)
1598                 GOTO(out, rc);
1599
1600         rc = dir->i_op->unlink(dir, dentry);
1601 out:
1602         /* need to drop i_mutex before we lose inode reference */
1603         mutex_unlock(&dentry->d_inode->i_mutex);
1604         if (rc == 0)
1605                 d_delete(dentry);
1606
1607         RETURN(rc);
1608 }
1609
1610 /* Caller must hold LCK_PW on parent and push us into kernel context.
1611  * Caller must hold child i_mutex, we drop it always.
1612  * Caller is also required to ensure that dchild->d_inode exists. */
1613 static int filter_destroy_internal(struct obd_device *obd, obd_id objid,
1614                                    obd_seq group, struct dentry *dparent,
1615                                    struct dentry *dchild)
1616 {
1617         struct inode *inode = dchild->d_inode;
1618         int rc;
1619
1620         /* There should be 2 references to the inode:
1621          *  1) taken by filter_prepare_destroy
1622          *  2) taken by filter_destroy */
1623         if (inode->i_nlink != 1 || atomic_read(&inode->i_count) != 2) {
1624                 CERROR("destroying objid %.*s ino %lu nlink %lu count %d\n",
1625                        dchild->d_name.len, dchild->d_name.name, inode->i_ino,
1626                        (unsigned long)inode->i_nlink,
1627                        atomic_read(&inode->i_count));
1628         }
1629
1630         rc = filter_vfs_unlink(dparent->d_inode, dchild, obd->u.obt.obt_vfsmnt);
1631         if (rc)
1632                 CERROR("error unlinking objid %.*s: rc %d\n",
1633                        dchild->d_name.len, dchild->d_name.name, rc);
1634         return(rc);
1635 }
1636
1637 struct filter_intent_args {
1638         struct ldlm_lock **victim;
1639         __u64 size;
1640         int *liblustre;
1641 };
1642
1643 static enum interval_iter filter_intent_cb(struct interval_node *n,
1644                                            void *args)
1645 {
1646         struct ldlm_interval *node = (struct ldlm_interval *)n;
1647         struct filter_intent_args *arg = (struct filter_intent_args*)args;
1648         __u64 size = arg->size;
1649         struct ldlm_lock **v = arg->victim;
1650         struct ldlm_lock *lck;
1651
1652         /* If the interval is lower than the current file size,
1653          * just break. */
1654         if (interval_high(n) <= size)
1655                 return INTERVAL_ITER_STOP;
1656
1657         cfs_list_for_each_entry(lck, &node->li_group, l_sl_policy) {
1658                 /* Don't send glimpse ASTs to liblustre clients.
1659                  * They aren't listening for them, and they do
1660                  * entirely synchronous I/O anyways. */
1661                 if (lck->l_export == NULL ||
1662                     lck->l_export->exp_libclient == 1)
1663                         continue;
1664
1665                 if (*arg->liblustre)
1666                         *arg->liblustre = 0;
1667
1668                 if (*v == NULL) {
1669                         *v = LDLM_LOCK_GET(lck);
1670                 } else if ((*v)->l_policy_data.l_extent.start <
1671                            lck->l_policy_data.l_extent.start) {
1672                         LDLM_LOCK_RELEASE(*v);
1673                         *v = LDLM_LOCK_GET(lck);
1674                 }
1675
1676                 /* the same policy group - every lock has the
1677                  * same extent, so needn't do it any more */
1678                 break;
1679         }
1680
1681         return INTERVAL_ITER_CONT;
1682 }
1683
1684 static int filter_intent_policy(struct ldlm_namespace *ns,
1685                                 struct ldlm_lock **lockp, void *req_cookie,
1686                                 ldlm_mode_t mode, int flags, void *data)
1687 {
1688         struct ptlrpc_request *req = req_cookie;
1689         struct ldlm_lock *lock = *lockp, *l = NULL;
1690         struct ldlm_resource *res = lock->l_resource;
1691         ldlm_processing_policy policy;
1692         struct ost_lvb *res_lvb, *reply_lvb;
1693         struct ldlm_reply *rep;
1694         ldlm_error_t err;
1695         int idx, rc, tmpflags = 0, only_liblustre = 1;
1696         struct ldlm_interval_tree *tree;
1697         struct filter_intent_args arg;
1698         __u32 repsize[3] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
1699                            [DLM_LOCKREPLY_OFF]   = sizeof(*rep),
1700                            [DLM_REPLY_REC_OFF]   = sizeof(*reply_lvb) };
1701         struct ldlm_glimpse_work        gl_work;
1702         CFS_LIST_HEAD(gl_list);
1703         ENTRY;
1704
1705         policy = ldlm_get_processing_policy(res);
1706         LASSERT(policy != NULL);
1707         LASSERT(req != NULL);
1708
1709         rc = lustre_pack_reply(req, 3, repsize, NULL);
1710         if (rc)
1711                 RETURN(req->rq_status = rc);
1712
1713         rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
1714         LASSERT(rep != NULL);
1715
1716         reply_lvb = lustre_msg_buf(req->rq_repmsg, DLM_REPLY_REC_OFF,
1717                                    sizeof(*reply_lvb));
1718         LASSERT(reply_lvb != NULL);
1719
1720         //fixup_handle_for_resent_req(req, lock, &lockh);
1721
1722         /* Call the extent policy function to see if our request can be
1723          * granted, or is blocked.
1724          * If the OST lock has LDLM_FL_HAS_INTENT set, it means a glimpse
1725          * lock, and should not be granted if the lock will be blocked.
1726          */
1727
1728         if (flags & LDLM_FL_BLOCK_NOWAIT) {
1729                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_AGL_DELAY, 5);
1730
1731                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_AGL_NOLOCK))
1732                         RETURN(ELDLM_LOCK_ABORTED);
1733         }
1734
1735         LASSERT(ns == ldlm_res_to_ns(res));
1736         lock_res(res);
1737         rc = policy(lock, &tmpflags, 0, &err, NULL);
1738         check_res_locked(res);
1739
1740         /* The lock met with no resistance; we're finished. */
1741         if (rc == LDLM_ITER_CONTINUE) {
1742                 /* do not grant locks to the liblustre clients: they cannot
1743                  * handle ASTs robustly.  We need to do this while still
1744                  * holding lr_lock to avoid the lock remaining on the res_link
1745                  * list (and potentially being added to l_pending_list by an
1746                  * AST) when we are going to drop this lock ASAP. */
1747                 if (lock->l_export->exp_libclient ||
1748                     OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_GLIMPSE, 2)) {
1749                         ldlm_resource_unlink_lock(lock);
1750                         err = ELDLM_LOCK_ABORTED;
1751                 } else {
1752                         err = ELDLM_LOCK_REPLACED;
1753                 }
1754                 unlock_res(res);
1755                 RETURN(err);
1756         } else if (flags & LDLM_FL_BLOCK_NOWAIT) {
1757                 /* LDLM_FL_BLOCK_NOWAIT means it is for AGL. Do not send glimpse
1758                  * callback for glimpse size. The real size user will trigger
1759                  * the glimpse callback when necessary. */
1760                 unlock_res(res);
1761                 RETURN(ELDLM_LOCK_ABORTED);
1762         }
1763
1764         /* Do not grant any lock, but instead send GL callbacks.  The extent
1765          * policy nicely created a list of all PW locks for us.  We will choose
1766          * the highest of those which are larger than the size in the LVB, if
1767          * any, and perform a glimpse callback. */
1768         res_lvb = res->lr_lvb_data;
1769         LASSERT(res_lvb != NULL);
1770         *reply_lvb = *res_lvb;
1771
1772         /*
1773          * lr_lock guarantees that no new locks are granted, and,
1774          * therefore, that res->lr_lvb_data cannot increase beyond the
1775          * end of already granted lock. As a result, it is safe to
1776          * check against "stale" reply_lvb->lvb_size value without
1777          * res->lr_lvb_mutex.
1778          */
1779         arg.size = reply_lvb->lvb_size;
1780         arg.victim = &l;
1781         arg.liblustre = &only_liblustre;
1782         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1783                 tree = &res->lr_itree[idx];
1784                 if (tree->lit_mode == LCK_PR)
1785                         continue;
1786
1787                 interval_iterate_reverse(tree->lit_root,
1788                                          filter_intent_cb, &arg);
1789         }
1790         unlock_res(res);
1791
1792         /* There were no PW locks beyond the size in the LVB; finished. */
1793         if (l == NULL) {
1794                 if (only_liblustre) {
1795                         /* If we discovered a liblustre client with a PW lock,
1796                          * however, the LVB may be out of date!  The LVB is
1797                          * updated only on glimpse (which we don't do for
1798                          * liblustre clients) and cancel (which the client
1799                          * obviously has not yet done).  So if it has written
1800                          * data but kept the lock, the LVB is stale and needs
1801                          * to be updated from disk.
1802                          *
1803                          * Of course, this will all disappear when we switch to
1804                          * taking liblustre locks on the OST. */
1805                         ldlm_res_lvbo_update(res, NULL, 1);
1806                 }
1807                 RETURN(ELDLM_LOCK_ABORTED);
1808         }
1809
1810         /*
1811          * This check is for lock taken in filter_prepare_destroy() that does
1812          * not have l_glimpse_ast set. So the logic is: if there is a lock
1813          * with no l_glimpse_ast set, this object is being destroyed already.
1814          *
1815          * Hence, if you are grabbing DLM locks on the server, always set
1816          * non-NULL glimpse_ast (e.g., ldlm_request.c:ldlm_glimpse_ast()).
1817          */
1818         if (l->l_glimpse_ast == NULL) {
1819                 /* We are racing with unlink(); just return -ENOENT */
1820                 rep->lock_policy_res1 = -ENOENT;
1821                 goto out;
1822         }
1823
1824         LASSERTF(l->l_glimpse_ast != NULL, "l == %p", l);
1825
1826         /* Populate the gl_work structure.
1827          * Grab additional reference on the lock which will be released in
1828          * ldlm_work_gl_ast_lock() */
1829         gl_work.gl_lock = LDLM_LOCK_GET(l);
1830         /* The glimpse callback is sent to one single extent lock. As a result,
1831          * the gl_work list is just composed of one element */
1832         cfs_list_add_tail(&gl_work.gl_list, &gl_list);
1833         /* the ldlm_glimpse_work structure is allocated on the stack */
1834         gl_work.gl_flags = LDLM_GL_WORK_NOFREE;
1835
1836         rc = ldlm_glimpse_locks(res, &gl_list); /* this will update the LVB */
1837
1838         if (!cfs_list_empty(&gl_list))
1839                 LDLM_LOCK_RELEASE(l);
1840
1841         lock_res(res);
1842         *reply_lvb = *res_lvb;
1843         unlock_res(res);
1844
1845  out:
1846         LDLM_LOCK_RELEASE(l);
1847
1848         RETURN(ELDLM_LOCK_ABORTED);
1849 }
1850
1851 /*
1852  * per-obd_device iobuf pool.
1853  *
1854  * To avoid memory deadlocks in low-memory setups, amount of dynamic
1855  * allocations in write-path has to be minimized (see bug 5137).
1856  *
1857  * Pages, niobuf_local's and niobuf_remote's are pre-allocated and attached to
1858  * OST threads (see ost_thread_{init,done}()).
1859  *
1860  * "iobuf's" used by filter cannot be attached to OST thread, however, because
1861  * at the OST layer there are only (potentially) multiple obd_device of type
1862  * unknown at the time of OST thread creation.
1863  *
1864  * We create a cfs_hash for struct filter_obd (->fo_iobuf_hash field) on
1865  * initializing, each OST thread will create it's own iobuf on the first
1866  * access and insert it into ->fo_iobuf_hash with thread ID as key,
1867  * so the iobuf can be found again by thread ID.
1868  *
1869  * Functions below
1870  *
1871  *     filter_iobuf_pool_init()
1872  *
1873  *     filter_iobuf_pool_done()
1874  *
1875  *     filter_iobuf_get()
1876  *
1877  * operate on this array. They are "generic" in a sense that they don't depend
1878  * on actual type of iobuf's (the latter depending on Linux kernel version).
1879  */
1880
1881 /*
1882  * destroy pool created by filter_iobuf_pool_init
1883  */
1884 static void filter_iobuf_pool_done(struct filter_obd *filter)
1885 {
1886         ENTRY;
1887
1888         if (filter->fo_iobuf_hash != NULL) {
1889                 cfs_hash_putref(filter->fo_iobuf_hash);
1890                 filter->fo_iobuf_hash = NULL;
1891         }
1892         EXIT;
1893 }
1894
1895 static int filter_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
1896 {
1897         struct filter_obd       *filter = &obd->u.filter;
1898         struct sptlrpc_rule_set  tmp_rset;
1899         int                      rc;
1900
1901         sptlrpc_rule_set_init(&tmp_rset);
1902         rc = sptlrpc_conf_target_get_rules(obd, &tmp_rset, initial);
1903         if (rc) {
1904                 CERROR("obd %s: failed get sptlrpc rules: %d\n",
1905                        obd->obd_name, rc);
1906                 return rc;
1907         }
1908
1909         sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
1910
1911         cfs_write_lock(&filter->fo_sptlrpc_lock);
1912         sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
1913         filter->fo_sptlrpc_rset = tmp_rset;
1914         cfs_write_unlock(&filter->fo_sptlrpc_lock);
1915
1916         return 0;
1917 }
1918
1919 static unsigned
1920 filter_iobuf_hop_hash(cfs_hash_t *hs, const void *key, unsigned mask)
1921 {
1922         __u64   val = *((__u64 *)key);
1923
1924         return cfs_hash_long(val, hs->hs_cur_bits);
1925 }
1926
1927 static void *
1928 filter_iobuf_hop_key(cfs_hlist_node_t *hnode)
1929 {
1930         struct filter_iobuf     *pool;
1931
1932         pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1933         return &pool->dr_hkey;
1934 }
1935
1936 static int
1937 filter_iobuf_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
1938 {
1939         struct filter_iobuf     *pool;
1940
1941         pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1942         return pool->dr_hkey == *((__u64 *)key);
1943 }
1944
1945 static void *
1946 filter_iobuf_hop_object(cfs_hlist_node_t *hnode)
1947 {
1948         return cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1949 }
1950
1951 static void
1952 filter_iobuf_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1953 {
1954         /* dummy, required by cfs_hash */
1955 }
1956
1957 static void
1958 filter_iobuf_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1959 {
1960         /* dummy, required by cfs_hash */
1961 }
1962
1963 static void
1964 filter_iobuf_hop_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1965 {
1966         struct filter_iobuf     *pool;
1967
1968         pool = cfs_hlist_entry(hnode, struct filter_iobuf, dr_hlist);
1969         filter_free_iobuf(pool);
1970 }
1971
1972 static struct cfs_hash_ops filter_iobuf_hops = {
1973         .hs_hash        = filter_iobuf_hop_hash,
1974         .hs_key         = filter_iobuf_hop_key,
1975         .hs_keycmp      = filter_iobuf_hop_keycmp,
1976         .hs_object      = filter_iobuf_hop_object,
1977         .hs_get         = filter_iobuf_hop_get,
1978         .hs_put_locked  = filter_iobuf_hop_put_locked,
1979         .hs_exit        = filter_iobuf_hop_exit
1980 };
1981
1982 #define FILTER_IOBUF_HASH_BITS  9
1983 #define FILTER_IOBUF_HBKT_BITS  4
1984
1985 /*
1986  * pre-allocate pool of iobuf's to be used by filter_{prep,commit}rw_write().
1987  */
1988 static int filter_iobuf_pool_init(struct filter_obd *filter)
1989 {
1990         filter->fo_iobuf_hash = cfs_hash_create("filter_iobuf",
1991                                                 FILTER_IOBUF_HASH_BITS,
1992                                                 FILTER_IOBUF_HASH_BITS,
1993                                                 FILTER_IOBUF_HBKT_BITS, 0,
1994                                                 CFS_HASH_MIN_THETA,
1995                                                 CFS_HASH_MAX_THETA,
1996                                                 &filter_iobuf_hops,
1997                                                 CFS_HASH_RW_BKTLOCK |
1998                                                 CFS_HASH_NO_ITEMREF);
1999
2000         return filter->fo_iobuf_hash != NULL ? 0 : -ENOMEM;
2001 }
2002
2003 /* Return iobuf allocated for @thread_id.
2004  * If we haven't allocated a pool entry for this thread before, do so now and
2005  * insert it into fo_iobuf_hash, otherwise we can find it from fo_iobuf_hash */
2006 void *filter_iobuf_get(struct filter_obd *filter, struct obd_trans_info *oti)
2007 {
2008         struct filter_iobuf     *pool = NULL;
2009         __u64                   key = 0;
2010         int                     thread_id;
2011         int                     rc;
2012
2013         thread_id = (oti && oti->oti_thread) ? oti->oti_thread->t_id : -1;
2014         if (thread_id >= 0) {
2015                 struct ptlrpc_service_part *svcpt;
2016
2017                 svcpt = oti->oti_thread->t_svcpt;
2018                 LASSERT(svcpt != NULL);
2019
2020                 key = (__u64)(svcpt->scp_cpt) << 32 | thread_id;
2021                 pool = cfs_hash_lookup(filter->fo_iobuf_hash, &key);
2022                 if (pool != NULL)
2023                         return pool;
2024         }
2025
2026         pool = filter_alloc_iobuf(filter, OBD_BRW_WRITE, PTLRPC_MAX_BRW_PAGES);
2027         if (pool == NULL)
2028                 return NULL;
2029
2030         if (thread_id >= 0) {
2031                 pool->dr_hkey = key;
2032                 rc = cfs_hash_add_unique(filter->fo_iobuf_hash,
2033                                          &key, &pool->dr_hlist);
2034                 /* ptlrpc service thould guarantee thread ID is unique */
2035                 LASSERT(rc != -EALREADY);
2036         }
2037
2038         return pool;
2039 }
2040
2041 /* mount the file system (secretly).  lustre_cfg parameters are:
2042  * 1 = device
2043  * 2 = fstype
2044  * 3 = flags: failover=f, failout=n
2045  * 4 = mount options
2046  */
2047 int filter_common_setup(struct obd_device *obd, struct lustre_cfg* lcfg,
2048                         void *option)
2049 {
2050         struct filter_obd *filter = &obd->u.filter;
2051         struct vfsmount *mnt;
2052         struct file_system_type *type;
2053         struct lustre_mount_info *lmi;
2054         struct obd_uuid uuid;
2055         __u8 *uuid_ptr;
2056         char *str, *label;
2057         char ns_name[48];
2058         struct request_queue *q;
2059         int rc, i;
2060         ENTRY;
2061
2062         if (lcfg->lcfg_bufcount < 3 ||
2063             LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
2064             LUSTRE_CFG_BUFLEN(lcfg, 2) < 1)
2065                 RETURN(-EINVAL);
2066
2067         lmi = server_get_mount(obd->obd_name);
2068         if (lmi) {
2069                 /* We already mounted in lustre_fill_super.
2070                    lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
2071                 struct lustre_sb_info *lsi = s2lsi(lmi->lmi_sb);
2072                 mnt = lmi->lmi_mnt;
2073                 obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
2074         } else {
2075                 /* old path - used by lctl */
2076                 CERROR("Using old MDS mount method\n");
2077                 type = get_fs_type(lustre_cfg_string(lcfg, 2));
2078                 if (!type) {
2079                         CERROR("get_fs_type failed\n");
2080                         RETURN(-ENODEV);
2081                 }
2082                 mnt = vfs_kern_mount(type, MS_NOATIME|MS_NODIRATIME,
2083                                      lustre_cfg_string(lcfg, 1), option);
2084                 cfs_module_put(type->owner);
2085                 if (IS_ERR(mnt)) {
2086                         rc = PTR_ERR(mnt);
2087                         LCONSOLE_ERROR_MSG(0x135, "Can't mount disk %s (%d)\n",
2088                                            lustre_cfg_string(lcfg, 1), rc);
2089                         RETURN(rc);
2090                 }
2091
2092                 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
2093         }
2094         if (IS_ERR(obd->obd_fsops))
2095                 GOTO(err_mntput, rc = PTR_ERR(obd->obd_fsops));
2096
2097         rc = filter_iobuf_pool_init(filter);
2098         if (rc != 0)
2099                 GOTO(err_ops, rc);
2100
2101         if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
2102                 CERROR("%s: Underlying device is marked as read-only. "
2103                        "Setup failed\n", obd->obd_name);
2104                 GOTO(err_ops, rc = -EROFS);
2105         }
2106
2107         /* failover is the default */
2108         obd->obd_replayable = 1;
2109
2110         /* disable connection until configuration finishes */
2111         obd->obd_no_conn = 1;
2112
2113         if (lcfg->lcfg_bufcount > 3 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
2114                 str = lustre_cfg_string(lcfg, 3);
2115                 if (strchr(str, 'n')) {
2116                         CWARN("%s: recovery disabled\n", obd->obd_name);
2117                         obd->obd_replayable = 0;
2118                 }
2119         }
2120
2121         obd->u.obt.obt_magic = OBT_MAGIC;
2122         obd->u.obt.obt_vfsmnt = mnt;
2123         obd->u.obt.obt_sb = mnt->mnt_sb;
2124         filter->fo_fstype = mnt->mnt_sb->s_type->name;
2125         CDEBUG(D_SUPER, "%s: mnt = %p\n", filter->fo_fstype, mnt);
2126
2127         rc = fsfilt_setup(obd, obd->u.obt.obt_sb);
2128         if (rc)
2129                 GOTO(err_ops, rc);
2130
2131         OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
2132         obd->obd_lvfs_ctxt.pwdmnt = mnt;
2133         obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
2134         obd->obd_lvfs_ctxt.fs = get_ds();
2135         obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
2136
2137         cfs_mutex_init(&filter->fo_init_lock);
2138         filter->fo_committed_group = 0;
2139         filter->fo_destroys_in_progress = 0;
2140         for (i = 0; i < 32; i++)
2141                 cfs_mutex_init(&filter->fo_create_locks[i]);
2142
2143         cfs_spin_lock_init(&filter->fo_objidlock);
2144         CFS_INIT_LIST_HEAD(&filter->fo_export_list);
2145         cfs_mutex_init(&filter->fo_alloc_lock);
2146         init_brw_stats(&filter->fo_filter_stats);
2147         cfs_spin_lock_init(&filter->fo_flags_lock);
2148         filter->fo_read_cache = 1; /* enable read-only cache by default */
2149         filter->fo_writethrough_cache = 1; /* enable writethrough cache */
2150         filter->fo_readcache_max_filesize = FILTER_MAX_CACHE_SIZE;
2151         filter->fo_fmd_max_num = FILTER_FMD_MAX_NUM_DEFAULT;
2152         filter->fo_fmd_max_age = FILTER_FMD_MAX_AGE_DEFAULT;
2153         filter->fo_syncjournal = 0; /* Don't sync journals on i/o by default */
2154         filter_slc_set(filter); /* initialize sync on lock cancel */
2155
2156         rc = filter_prep(obd);
2157         if (rc)
2158                 GOTO(err_ops, rc);
2159
2160         CFS_INIT_LIST_HEAD(&filter->fo_llog_list);
2161         cfs_spin_lock_init(&filter->fo_llog_list_lock);
2162
2163         filter->fo_fl_oss_capa = 1;
2164
2165         CFS_INIT_LIST_HEAD(&filter->fo_capa_keys);
2166         filter->fo_capa_hash = init_capa_hash();
2167         if (filter->fo_capa_hash == NULL)
2168                 GOTO(err_post, rc = -ENOMEM);
2169
2170         sprintf(ns_name, "filter-%s", obd->obd_uuid.uuid);
2171         obd->obd_namespace = ldlm_namespace_new(obd, ns_name,
2172                                                 LDLM_NAMESPACE_SERVER,
2173                                                 LDLM_NAMESPACE_GREEDY,
2174                                                 LDLM_NS_TYPE_OST);
2175         if (obd->obd_namespace == NULL)
2176                 GOTO(err_post, rc = -ENOMEM);
2177         obd->obd_namespace->ns_lvbp = obd;
2178         obd->obd_namespace->ns_lvbo = &filter_lvbo;
2179         ldlm_register_intent(obd->obd_namespace, filter_intent_policy);
2180
2181         ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2182                            "filter_ldlm_cb_client", &obd->obd_ldlm_client);
2183
2184         rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
2185         if (rc) {
2186                 CERROR("failed to setup llogging subsystems\n");
2187                 GOTO(err_post, rc);
2188         }
2189
2190         cfs_rwlock_init(&filter->fo_sptlrpc_lock);
2191         sptlrpc_rule_set_init(&filter->fo_sptlrpc_rset);
2192         /* do this after llog being initialized */
2193         filter_adapt_sptlrpc_conf(obd, 1);
2194
2195         rc = lquota_setup(filter_quota_interface_ref, obd);
2196         if (rc)
2197                 GOTO(err_post, rc);
2198
2199         q = bdev_get_queue(mnt->mnt_sb->s_bdev);
2200         if (queue_max_sectors(q) < queue_max_hw_sectors(q) &&
2201             queue_max_sectors(q) < PTLRPC_MAX_BRW_SIZE >> 9)
2202                 LCONSOLE_INFO("%s: underlying device %s should be tuned "
2203                               "for larger I/O requests: max_sectors = %u "
2204                               "could be up to max_hw_sectors=%u\n",
2205                               obd->obd_name, mnt->mnt_sb->s_id,
2206                               queue_max_sectors(q), queue_max_hw_sectors(q));
2207
2208         uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2209         if (uuid_ptr != NULL) {
2210                 class_uuid_unparse(uuid_ptr, &uuid);
2211                 str = uuid.uuid;
2212         } else {
2213                 str = "no UUID";
2214         }
2215
2216         label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2217         LCONSOLE_INFO("%s: Now serving %s %s%s with recovery %s\n",
2218                       obd->obd_name, label ?: str, lmi ? "on " : "",
2219                       lmi ? s2lsi(lmi->lmi_sb)->lsi_lmd->lmd_dev : "",
2220                       obd->obd_replayable ? "enabled" : "disabled");
2221
2222         RETURN(0);
2223
2224 err_post:
2225         filter_post(obd);
2226 err_ops:
2227         fsfilt_put_ops(obd->obd_fsops);
2228         filter_iobuf_pool_done(filter);
2229 err_mntput:
2230         server_put_mount(obd->obd_name, mnt);
2231         obd->u.obt.obt_sb = 0;
2232         return rc;
2233 }
2234
2235 static int filter_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
2236 {
2237         struct lprocfs_static_vars lvars;
2238         cfs_proc_dir_entry_t *entry;
2239         unsigned long addr;
2240         struct page *page;
2241         int rc;
2242         ENTRY;
2243
2244         CLASSERT(offsetof(struct obd_device, u.obt) ==
2245                  offsetof(struct obd_device, u.filter.fo_obt));
2246
2247         if (!LUSTRE_CFG_BUFLEN(lcfg, 1) || !LUSTRE_CFG_BUFLEN(lcfg, 2))
2248                 RETURN(-EINVAL);
2249
2250         /* lprocfs must be setup before the filter so state can be safely added
2251          * to /proc incrementally as the filter is setup */
2252         lprocfs_filter_init_vars(&lvars);
2253         rc = lprocfs_obd_setup(obd, lvars.obd_vars);
2254         if (rc) {
2255                 CERROR("%s: lprocfs_obd_setup failed: %d.\n",
2256                        obd->obd_name, rc);
2257                 RETURN(rc);
2258         }
2259
2260         rc = lprocfs_alloc_obd_stats(obd, LPROC_FILTER_LAST);
2261         if (rc) {
2262                 CERROR("%s: lprocfs_alloc_obd_stats failed: %d.\n",
2263                        obd->obd_name, rc);
2264                 GOTO(obd_cleanup, rc);
2265         }
2266
2267         /* Init obdfilter private stats here */
2268         lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_READ_BYTES,
2269                              LPROCFS_CNTR_AVGMINMAX, "read_bytes", "bytes");
2270         lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
2271                              LPROCFS_CNTR_AVGMINMAX, "write_bytes", "bytes");
2272         lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_GET_PAGE,
2273                              LPROCFS_CNTR_AVGMINMAX|LPROCFS_CNTR_STDDEV,
2274                              "get_page", "usec");
2275         lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_NO_PAGE,
2276                              LPROCFS_CNTR_AVGMINMAX, "get_page_failures", "num");
2277         lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS,
2278                              LPROCFS_CNTR_AVGMINMAX, "cache_access", "pages");
2279         lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_HIT,
2280                              LPROCFS_CNTR_AVGMINMAX, "cache_hit", "pages");
2281         lprocfs_counter_init(obd->obd_stats, LPROC_FILTER_CACHE_MISS,
2282                              LPROCFS_CNTR_AVGMINMAX, "cache_miss", "pages");
2283
2284         rc = lproc_filter_attach_seqstat(obd);
2285         if (rc) {
2286                 CERROR("%s: create seqstat failed: %d.\n", obd->obd_name, rc);
2287                 GOTO(free_obd_stats, rc);
2288         }
2289
2290         entry = lprocfs_register("exports", obd->obd_proc_entry, NULL, NULL);
2291         if (IS_ERR(entry)) {
2292                 rc = PTR_ERR(entry);
2293                 CERROR("%s: error %d setting up lprocfs for %s\n",
2294                        obd->obd_name, rc, "exports");
2295                 GOTO(free_obd_stats, rc);
2296         }
2297         obd->obd_proc_exports_entry = entry;
2298
2299         entry = lprocfs_add_simple(obd->obd_proc_exports_entry, "clear",
2300                                    lprocfs_nid_stats_clear_read,
2301                                    lprocfs_nid_stats_clear_write, obd, NULL);
2302         if (IS_ERR(entry)) {
2303                 rc = PTR_ERR(entry);
2304                 CERROR("%s: add proc entry 'clear' failed: %d.\n",
2305                        obd->obd_name, rc);
2306                 GOTO(free_obd_stats, rc);
2307         }
2308
2309         rc = lprocfs_job_stats_init(obd, LPROC_FILTER_STATS_LAST,
2310                                     filter_stats_counter_init);
2311         if (rc)
2312                 GOTO(remove_entry_clear, rc);
2313
2314         /* 2.6.9 selinux wants a full option page for do_kern_mount (bug6471) */
2315         OBD_PAGE_ALLOC(page, CFS_ALLOC_STD);
2316         if (!page)
2317                 GOTO(job_stats_fini, rc = -ENOMEM);
2318         addr = (unsigned long)cfs_page_address(page);
2319         clear_page((void *)addr);
2320         memcpy((void *)addr, lustre_cfg_buf(lcfg, 4),
2321                LUSTRE_CFG_BUFLEN(lcfg, 4));
2322         rc = filter_common_setup(obd, lcfg, (void *)addr);
2323         OBD_PAGE_FREE(page);
2324         if (rc) {
2325                 CERROR("%s: filter_common_setup failed: %d.\n",
2326                        obd->obd_name, rc);
2327                 GOTO(job_stats_fini, rc);
2328         }
2329
2330         RETURN(0);
2331
2332 job_stats_fini:
2333         lprocfs_job_stats_fini(obd);
2334 remove_entry_clear:
2335         lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
2336 free_obd_stats:
2337         lprocfs_free_obd_stats(obd);
2338 obd_cleanup:
2339         lprocfs_obd_cleanup(obd);
2340         return rc;
2341 }
2342
2343 static struct llog_operations filter_mds_ost_repl_logops;
2344
2345 static struct llog_operations filter_size_orig_logops = {
2346         .lop_setup   = llog_obd_origin_setup,
2347         .lop_cleanup = llog_obd_origin_cleanup,
2348         .lop_add     = llog_obd_origin_add
2349 };
2350
2351 static int filter_olg_fini(struct obd_llog_group *olg)
2352 {
2353         struct llog_ctxt *ctxt;
2354         int rc = 0, rc2 = 0;
2355         ENTRY;
2356
2357         ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2358         if (ctxt)
2359                 rc = llog_cleanup(ctxt);
2360
2361         ctxt = llog_group_get_ctxt(olg, LLOG_SIZE_ORIG_CTXT);
2362         if (ctxt) {
2363                 rc2 = llog_cleanup(ctxt);
2364                 if (!rc)
2365                         rc = rc2;
2366         }
2367
2368         ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_ORIG_CTXT);
2369         if (ctxt) {
2370                 rc2 = llog_cleanup(ctxt);
2371                 if (!rc)
2372                         rc = rc2;
2373         }
2374
2375         RETURN(rc);
2376 }
2377
2378 static int
2379 filter_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
2380                 struct obd_device *tgt)
2381 {
2382         int rc;
2383         ENTRY;
2384
2385         rc = llog_setup(obd, olg, LLOG_MDS_OST_REPL_CTXT, tgt, 0, NULL,
2386                         &filter_mds_ost_repl_logops);
2387         if (rc)
2388                 GOTO(cleanup, rc);
2389
2390         rc = llog_setup(obd, olg, LLOG_SIZE_ORIG_CTXT, tgt, 0, NULL,
2391                         &filter_size_orig_logops);
2392         if (rc)
2393                 GOTO(cleanup, rc);
2394         EXIT;
2395 cleanup:
2396         if (rc)
2397                 filter_olg_fini(olg);
2398         return rc;
2399 }
2400
2401 /**
2402  * Init the default olg, which is embeded in the obd_device, for filter.
2403  */
2404 static int
2405 filter_default_olg_init(struct obd_device *obd, struct obd_llog_group *olg,
2406                         struct obd_device *tgt)
2407 {
2408         struct filter_obd *filter = &obd->u.filter;
2409         struct llog_ctxt *ctxt;
2410         int rc;
2411         ENTRY;
2412
2413         filter->fo_lcm = llog_recov_thread_init(obd->obd_name);
2414         if (!filter->fo_lcm)
2415                 RETURN(-ENOMEM);
2416
2417         filter_mds_ost_repl_logops = llog_client_ops;
2418         filter_mds_ost_repl_logops.lop_cancel = llog_obd_repl_cancel;
2419         filter_mds_ost_repl_logops.lop_connect = llog_obd_repl_connect;
2420         filter_mds_ost_repl_logops.lop_sync = llog_obd_repl_sync;
2421
2422         rc = filter_olg_init(obd, olg, tgt);
2423         if (rc)
2424                 GOTO(cleanup_lcm, rc);
2425
2426         rc = llog_setup(obd, olg, LLOG_CONFIG_ORIG_CTXT, tgt, 0, NULL,
2427                         &llog_lvfs_ops);
2428         if (rc)
2429                 GOTO(cleanup_olg, rc);
2430
2431         ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2432         if (!ctxt) {
2433                 CERROR("Can't get ctxt for %p:%x\n", olg,
2434                        LLOG_MDS_OST_REPL_CTXT);
2435                 GOTO(cleanup_olg, rc = -ENODEV);
2436         }
2437         ctxt->loc_lcm = lcm_get(filter->fo_lcm);
2438         ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
2439         llog_ctxt_put(ctxt);
2440
2441         RETURN(0);
2442 cleanup_olg:
2443         filter_olg_fini(olg);
2444 cleanup_lcm:
2445         llog_recov_thread_fini(filter->fo_lcm, 1);
2446         filter->fo_lcm = NULL;
2447         return rc;
2448 }
2449
2450 static int
2451 filter_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
2452                  struct obd_device *tgt, int *index)
2453 {
2454         struct filter_obd *filter = &obd->u.filter;
2455         struct llog_ctxt *ctxt;
2456         int rc;
2457         ENTRY;
2458
2459         LASSERT(olg != NULL);
2460         if (olg == &obd->obd_olg)
2461                 return filter_default_olg_init(obd, olg, tgt);
2462
2463         LASSERT(filter->fo_lcm != NULL);
2464         rc = filter_olg_init(obd, olg, tgt);
2465         if (rc)
2466                 RETURN(rc);
2467         ctxt = llog_group_get_ctxt(olg, LLOG_MDS_OST_REPL_CTXT);
2468         if (!ctxt) {
2469                 CERROR("Can't get ctxt for %p:%x\n", olg,
2470                        LLOG_MDS_OST_REPL_CTXT);
2471                 filter_olg_fini(olg);
2472                 RETURN(-ENODEV);
2473         }
2474         ctxt->llog_proc_cb = filter_recov_log_mds_ost_cb;
2475         ctxt->loc_lcm = lcm_get(filter->fo_lcm);
2476         llog_ctxt_put(ctxt);
2477         RETURN(rc);
2478 }
2479
2480 static int filter_llog_finish(struct obd_device *obd, int count)
2481 {
2482         struct filter_obd *filter = &obd->u.filter;
2483         struct llog_ctxt *ctxt;
2484         ENTRY;
2485
2486         ctxt = llog_group_get_ctxt(&obd->obd_olg, LLOG_MDS_OST_REPL_CTXT);
2487         if (ctxt) {
2488                 /*
2489                  * Make sure that no cached llcds left in recov_thread.
2490                  * We actually do sync in disconnect time, but disconnect
2491                  * may not come being marked rq_no_resend = 1.
2492                  */
2493                 llog_sync(ctxt, NULL, OBD_LLOG_FL_EXIT);
2494
2495                 /*
2496                  * Balance class_import_get() in llog_receptor_accept().
2497                  * This is safe to do, as llog is already synchronized
2498                  * and its import may go.
2499                  */
2500                 cfs_mutex_lock(&ctxt->loc_mutex);
2501                 if (ctxt->loc_imp) {
2502                         class_import_put(ctxt->loc_imp);
2503                         ctxt->loc_imp = NULL;
2504                 }
2505
2506                 if (filter->fo_lcm) {
2507                         llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
2508                         filter->fo_lcm = NULL;
2509                 }
2510
2511                 cfs_mutex_unlock(&ctxt->loc_mutex);
2512                 llog_ctxt_put(ctxt);
2513         }
2514
2515         RETURN(filter_olg_fini(&obd->obd_olg));
2516 }
2517
2518 /**
2519  * Find the group llog according to group index in the llog group list.
2520  */
2521 static struct obd_llog_group *
2522 filter_find_olg_internal(struct filter_obd *filter, int group)
2523 {
2524         struct obd_llog_group *olg;
2525
2526         LASSERT_SPIN_LOCKED(&filter->fo_llog_list_lock);
2527         cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
2528                 if (olg->olg_seq == group)
2529                         RETURN(olg);
2530         }
2531         RETURN(NULL);
2532 }
2533
2534 /**
2535  * Find the group llog according to group index on the filter
2536  */
2537 struct obd_llog_group *filter_find_olg(struct obd_device *obd, int group)
2538 {
2539         struct obd_llog_group *olg = NULL;
2540         struct filter_obd *filter;
2541
2542         filter = &obd->u.filter;
2543
2544         if (group == FID_SEQ_LLOG)
2545                 RETURN(&obd->obd_olg);
2546
2547         cfs_spin_lock(&filter->fo_llog_list_lock);
2548         olg = filter_find_olg_internal(filter, group);
2549         cfs_spin_unlock(&filter->fo_llog_list_lock);
2550
2551         RETURN(olg);
2552 }
2553 /**
2554  * Find the llog_group of the filter according to the group. If it can not
2555  * find, create the llog_group, which only happens when mds is being synced
2556  * with OST.
2557  */
2558 struct obd_llog_group *filter_find_create_olg(struct obd_device *obd, int group)
2559 {
2560         struct obd_llog_group *olg = NULL, *olg_new = NULL;
2561         struct filter_obd *filter;
2562         int rc;
2563
2564         filter = &obd->u.filter;
2565
2566         if (group == FID_SEQ_LLOG)
2567                 RETURN(&obd->obd_olg);
2568
2569         OBD_ALLOC_PTR(olg_new);
2570         if (olg_new == NULL)
2571                RETURN(ERR_PTR(-ENOMEM));
2572
2573         cfs_spin_lock(&filter->fo_llog_list_lock);
2574         olg = filter_find_olg_internal(filter, group);
2575         if (olg) {
2576                 if (olg->olg_initializing) {
2577                         GOTO(out_unlock, olg = ERR_PTR(-EBUSY));
2578                 } else {
2579                         GOTO(out_unlock, olg);
2580                 }
2581         } else {
2582                 /* set as the newly allocated one */
2583                 olg = olg_new;
2584                 olg_new = NULL;
2585         }
2586
2587         llog_group_init(olg, group);
2588         cfs_list_add(&olg->olg_list, &filter->fo_llog_list);
2589         olg->olg_initializing = 1;
2590         cfs_spin_unlock(&filter->fo_llog_list_lock);
2591
2592         rc = obd_llog_init(obd, olg, obd, NULL);
2593         if (rc) {
2594                cfs_spin_lock(&filter->fo_llog_list_lock);
2595                cfs_list_del(&olg->olg_list);
2596                cfs_spin_unlock(&filter->fo_llog_list_lock);
2597                OBD_FREE_PTR(olg);
2598                GOTO(out, olg = ERR_PTR(-ENOMEM));
2599         }
2600         cfs_spin_lock(&filter->fo_llog_list_lock);
2601         olg->olg_initializing = 0;
2602         cfs_spin_unlock(&filter->fo_llog_list_lock);
2603         CDEBUG(D_OTHER, "%s: new llog group %u (0x%p)\n",
2604               obd->obd_name, group, olg);
2605 out:
2606         RETURN(olg);
2607
2608 out_unlock:
2609         cfs_spin_unlock(&filter->fo_llog_list_lock);
2610         if (olg_new)
2611                OBD_FREE_PTR(olg_new);
2612         goto out;
2613 }
2614
2615 static int filter_llog_connect(struct obd_export *exp,
2616                                struct llogd_conn_body *body)
2617 {
2618         struct obd_device *obd = exp->exp_obd;
2619         struct llog_ctxt *ctxt;
2620         struct obd_llog_group *olg;
2621         int rc;
2622         ENTRY;
2623
2624         CDEBUG(D_OTHER, "%s: LLog connect for: "LPX64"/"LPX64":%x\n",
2625                obd->obd_name, body->lgdc_logid.lgl_oid,
2626                body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
2627
2628         olg = filter_find_olg(obd, body->lgdc_logid.lgl_oseq);
2629         if (!olg) {
2630                 CERROR(" %s: can not find olg of group %d\n",
2631                        obd->obd_name, (int)body->lgdc_logid.lgl_oseq);
2632                 RETURN(-ENOENT);
2633         }
2634         llog_group_set_export(olg, exp);
2635
2636         ctxt = llog_group_get_ctxt(olg, body->lgdc_ctxt_idx);
2637         LASSERTF(ctxt != NULL, "ctxt is not null, ctxt idx %d \n",
2638                  body->lgdc_ctxt_idx);
2639
2640         CDEBUG(D_HA, "%s: Recovery from log "LPX64"/"LPX64":%x\n",
2641                obd->obd_name, body->lgdc_logid.lgl_oid,
2642                body->lgdc_logid.lgl_oseq, body->lgdc_logid.lgl_ogen);
2643
2644         cfs_spin_lock(&obd->u.filter.fo_flags_lock);
2645         obd->u.filter.fo_mds_ost_sync = 1;
2646         cfs_spin_unlock(&obd->u.filter.fo_flags_lock);
2647         rc = llog_connect(ctxt, &body->lgdc_logid,
2648                           &body->lgdc_gen, NULL);
2649         llog_ctxt_put(ctxt);
2650         if (rc != 0)
2651                 CERROR("failed to connect rc %d idx %d\n", rc,
2652                                 body->lgdc_ctxt_idx);
2653
2654         RETURN(rc);
2655 }
2656
2657 static int filter_llog_preclean(struct obd_device *obd)
2658 {
2659         struct obd_llog_group *olg, *tmp;
2660         struct filter_obd *filter;
2661         cfs_list_t  remove_list;
2662         int rc = 0;
2663         ENTRY;
2664
2665         rc = obd_llog_finish(obd, 0);
2666         if (rc)
2667                 CERROR("failed to cleanup llogging subsystem\n");
2668
2669         filter = &obd->u.filter;
2670         CFS_INIT_LIST_HEAD(&remove_list);
2671
2672         cfs_spin_lock(&filter->fo_llog_list_lock);
2673         while (!cfs_list_empty(&filter->fo_llog_list)) {
2674                 olg = cfs_list_entry(filter->fo_llog_list.next,
2675                                      struct obd_llog_group, olg_list);
2676                 cfs_list_del(&olg->olg_list);
2677                 cfs_list_add(&olg->olg_list, &remove_list);
2678         }
2679         cfs_spin_unlock(&filter->fo_llog_list_lock);
2680
2681         cfs_list_for_each_entry_safe(olg, tmp, &remove_list, olg_list) {
2682                 cfs_list_del_init(&olg->olg_list);
2683                 rc = filter_olg_fini(olg);
2684                 if (rc)
2685                         CERROR("failed to cleanup llogging subsystem for %u\n",
2686                                olg->olg_seq);
2687                 OBD_FREE_PTR(olg);
2688         }
2689
2690         RETURN(rc);
2691 }
2692
2693 static int filter_precleanup(struct obd_device *obd,
2694                              enum obd_cleanup_stage stage)
2695 {
2696         int rc = 0;
2697         ENTRY;
2698
2699         switch(stage) {
2700         case OBD_CLEANUP_EARLY:
2701                 break;
2702         case OBD_CLEANUP_EXPORTS:
2703                 /* Stop recovery before namespace cleanup. */
2704                 target_recovery_fini(obd);
2705
2706                 obd_exports_barrier(obd);
2707                 obd_zombie_barrier();
2708
2709                 rc = filter_llog_preclean(obd);
2710                 lprocfs_job_stats_fini(obd);
2711                 lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
2712                 lprocfs_free_per_client_stats(obd);
2713                 lprocfs_obd_cleanup(obd);
2714                 lprocfs_free_obd_stats(obd);
2715                 lquota_cleanup(filter_quota_interface_ref, obd);
2716                 break;
2717         }
2718         RETURN(rc);
2719 }
2720
2721 static int filter_cleanup(struct obd_device *obd)
2722 {
2723         struct filter_obd *filter = &obd->u.filter;
2724         ENTRY;
2725
2726         if (obd->obd_fail)
2727                 LCONSOLE_WARN("%s: shutting down for failover; client state "
2728                               "will be preserved.\n", obd->obd_name);
2729
2730         ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2731         obd->obd_namespace = NULL;
2732
2733         sptlrpc_rule_set_free(&filter->fo_sptlrpc_rset);
2734
2735         if (obd->u.obt.obt_sb == NULL)
2736                 RETURN(0);
2737
2738         filter_post(obd);
2739
2740         ll_vfs_dq_off(obd->u.obt.obt_sb, 0);
2741         shrink_dcache_sb(obd->u.obt.obt_sb);
2742
2743         server_put_mount(obd->obd_name, obd->u.obt.obt_vfsmnt);
2744         obd->u.obt.obt_sb = NULL;
2745
2746         fsfilt_put_ops(obd->obd_fsops);
2747
2748         filter_iobuf_pool_done(filter);
2749
2750         LCONSOLE_INFO("OST %s has stopped.\n", obd->obd_name);
2751
2752         RETURN(0);
2753 }
2754
2755 static int filter_connect_internal(struct obd_export *exp,
2756                                    struct obd_connect_data *data,
2757                                    int reconnect)
2758 {
2759         struct filter_export_data *fed = &exp->exp_filter_data;
2760
2761         if (!data)
2762                 RETURN(0);
2763
2764         CDEBUG(D_RPCTRACE, "%s: cli %s/%p ocd_connect_flags: "LPX64
2765                " ocd_version: %x ocd_grant: %d ocd_index: %u\n",
2766                exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
2767                data->ocd_connect_flags, data->ocd_version,
2768                data->ocd_grant, data->ocd_index);
2769
2770         if (fed->fed_group != 0 && fed->fed_group != data->ocd_group) {
2771                 CWARN("!!! This export (nid %s) used object group %d "
2772                        "earlier; now it's trying to use group %d!  This could "
2773                        "be a bug in the MDS. Please report to "
2774                        "http://bugs.whamcloud.com/\n",
2775                        obd_export_nid2str(exp), fed->fed_group,data->ocd_group);
2776                 RETURN(-EPROTO);
2777         }
2778         fed->fed_group = data->ocd_group;
2779
2780         data->ocd_connect_flags &= OST_CONNECT_SUPPORTED;
2781         exp->exp_connect_flags = data->ocd_connect_flags;
2782         data->ocd_version = LUSTRE_VERSION_CODE;
2783
2784         /* Kindly make sure the SKIP_ORPHAN flag is from MDS. */
2785         if (data->ocd_connect_flags & OBD_CONNECT_MDS)
2786                 CDEBUG(D_HA, "%s: Received MDS connection for group %u\n",
2787                        exp->exp_obd->obd_name, data->ocd_group);
2788         else if (data->ocd_connect_flags & OBD_CONNECT_SKIP_ORPHAN)
2789                 RETURN(-EPROTO);
2790
2791         if (exp->exp_connect_flags & OBD_CONNECT_GRANT) {
2792                 struct filter_obd *filter = &exp->exp_obd->u.filter;
2793                 obd_size left, want;
2794
2795                 cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
2796                 left = filter_grant_space_left(exp);
2797                 want = data->ocd_grant;
2798                 filter_grant(exp, fed->fed_grant, want, left, (reconnect == 0));
2799                 data->ocd_grant = fed->fed_grant;
2800                 cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
2801
2802                 CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %d want: "
2803                        LPU64" left: "LPU64"\n", exp->exp_obd->obd_name,
2804                        exp->exp_client_uuid.uuid, exp,
2805                        data->ocd_grant, want, left);
2806
2807                 filter->fo_tot_granted_clients ++;
2808         }
2809
2810         if (data->ocd_connect_flags & OBD_CONNECT_INDEX) {
2811                 struct lr_server_data *lsd = class_server_data(exp->exp_obd);
2812                 int index = le32_to_cpu(lsd->lsd_ost_index);
2813
2814                 if (!(lsd->lsd_feature_compat &
2815                       cpu_to_le32(OBD_COMPAT_OST))) {
2816                         /* this will only happen on the first connect */
2817                         lsd->lsd_ost_index = cpu_to_le32(data->ocd_index);
2818                         lsd->lsd_feature_compat |= cpu_to_le32(OBD_COMPAT_OST);
2819                         /* sync is not needed here as filter_client_add will
2820                          * set exp_need_sync flag */
2821                         filter_update_server_data(exp->exp_obd);
2822                 } else if (index != data->ocd_index) {
2823                         LCONSOLE_ERROR_MSG(0x136, "Connection from %s to index"
2824                                            " %u doesn't match actual OST index"
2825                                            " %u in last_rcvd file, bad "
2826                                            "configuration?\n",
2827                                            obd_export_nid2str(exp), index,
2828                                            data->ocd_index);
2829                         RETURN(-EBADF);
2830                 }
2831                 /* FIXME: Do the same with the MDS UUID and lsd_peeruuid.
2832                  * FIXME: We don't strictly need the COMPAT flag for that,
2833                  * FIXME: as lsd_peeruuid[0] will tell us if that is set.
2834                  * FIXME: We needed it for the index, as index 0 is valid. */
2835         }
2836
2837         if (OBD_FAIL_CHECK(OBD_FAIL_OST_BRW_SIZE)) {
2838                 data->ocd_brw_size = 65536;
2839         } else if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) {
2840                 data->ocd_brw_size = min(data->ocd_brw_size,
2841                                (__u32)(PTLRPC_MAX_BRW_PAGES << CFS_PAGE_SHIFT));
2842                 if (data->ocd_brw_size == 0) {
2843                         CERROR("%s: cli %s/%p ocd_connect_flags: "LPX64
2844                                " ocd_version: %x ocd_grant: %d ocd_index: %u "
2845                                "ocd_brw_size is unexpectedly zero, "
2846                                "network data corruption?"
2847                                "Refusing connection of this client\n",
2848                                 exp->exp_obd->obd_name,
2849                                 exp->exp_client_uuid.uuid,
2850                                 exp, data->ocd_connect_flags, data->ocd_version,
2851                                 data->ocd_grant, data->ocd_index);
2852                         RETURN(-EPROTO);
2853                 }
2854         }
2855
2856         if (data->ocd_connect_flags & OBD_CONNECT_CKSUM) {
2857                 __u32 cksum_types = data->ocd_cksum_types;
2858
2859                 /* The client set in ocd_cksum_types the checksum types it
2860                  * supports. We have to mask off the algorithms that we don't
2861                  * support */
2862                 data->ocd_cksum_types &= cksum_types_supported_server();
2863
2864                 /* 1.6.4 clients are not supported any more */
2865
2866                 CDEBUG(D_RPCTRACE, "%s: cli %s supports cksum type %x, return "
2867                                    "%x\n", exp->exp_obd->obd_name,
2868                                    obd_export_nid2str(exp), cksum_types,
2869                                    data->ocd_cksum_types);
2870         } else {
2871                 /* This client does not support OBD_CONNECT_CKSUM
2872                  * fall back to CRC32 */
2873                 CDEBUG(D_RPCTRACE, "%s: cli %s does not support "
2874                                    "OBD_CONNECT_CKSUM, CRC32 will be used\n",
2875                                    exp->exp_obd->obd_name,
2876                                    obd_export_nid2str(exp));
2877         }
2878
2879         if (data->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
2880                 data->ocd_maxbytes = exp->exp_obd->u.obt.obt_sb->s_maxbytes;
2881
2882         RETURN(0);
2883 }
2884
2885 static int filter_reconnect(const struct lu_env *env,
2886                             struct obd_export *exp, struct obd_device *obd,
2887                             struct obd_uuid *cluuid,
2888                             struct obd_connect_data *data,
2889                             void *localdata)
2890 {
2891         int rc;
2892         ENTRY;
2893
2894         if (exp == NULL || obd == NULL || cluuid == NULL)
2895                 RETURN(-EINVAL);
2896
2897         rc = filter_connect_internal(exp, data, 1);
2898         if (rc == 0)
2899                 filter_export_stats_init(obd, exp, localdata);
2900
2901         RETURN(rc);
2902 }
2903
2904 static int filter_connect(const struct lu_env *env,
2905                           struct obd_export **exp, struct obd_device *obd,
2906                           struct obd_uuid *cluuid,
2907                           struct obd_connect_data *data, void *localdata)
2908 {
2909         struct lvfs_run_ctxt saved;
2910         struct lustre_handle conn = { 0 };
2911         struct obd_export *lexp;
2912         int rc;
2913         ENTRY;
2914
2915         if (exp == NULL || obd == NULL || cluuid == NULL)
2916                 RETURN(-EINVAL);
2917
2918         rc = class_connect(&conn, obd, cluuid);
2919         if (rc)
2920                 RETURN(rc);
2921         lexp = class_conn2export(&conn);
2922         LASSERT(lexp != NULL);
2923
2924         rc = filter_connect_internal(lexp, data, 0);
2925         if (rc)
2926                 GOTO(cleanup, rc);
2927
2928         filter_export_stats_init(obd, lexp, localdata);
2929         if (obd->obd_replayable) {
2930                 struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd;
2931                 LASSERT(lcd);
2932                 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
2933                 rc = filter_client_add(obd, lexp, -1);
2934                 if (rc)
2935                         GOTO(cleanup, rc);
2936         }
2937
2938         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2939         rc = filter_read_groups(obd, data->ocd_group, 1);
2940         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2941         if (rc != 0) {
2942                 CERROR("can't read group %u\n", data->ocd_group);
2943                 GOTO(cleanup, rc);
2944         }
2945
2946         GOTO(cleanup, rc);
2947
2948 cleanup:
2949         if (rc) {
2950                 class_disconnect(lexp);
2951                 *exp = NULL;
2952         } else {
2953                 *exp = lexp;
2954         }
2955
2956         RETURN(rc);
2957 }
2958
2959 /* Do extra sanity checks for grant accounting.  We do this at connect,
2960  * disconnect, and statfs RPC time, so it shouldn't be too bad.  We can
2961  * always get rid of it or turn it off when we know accounting is good. */
2962 static void filter_grant_sanity_check(struct obd_device *obd, const char *func)
2963 {
2964         struct filter_export_data *fed;
2965         struct obd_export *exp;
2966         obd_size maxsize = obd->obd_osfs.os_blocks * obd->obd_osfs.os_bsize;
2967         obd_size tot_dirty = 0, tot_pending = 0, tot_granted = 0;
2968         obd_size fo_tot_dirty, fo_tot_pending, fo_tot_granted;
2969
2970         if (cfs_list_empty(&obd->obd_exports))
2971                 return;
2972
2973         /* We don't want to do this for large machines that do lots of
2974            mounts or unmounts.  It burns... */
2975         if (obd->obd_num_exports > 100)
2976                 return;
2977
2978         cfs_spin_lock(&obd->obd_osfs_lock);
2979         cfs_spin_lock(&obd->obd_dev_lock);
2980         cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2981                 int error = 0;
2982                 fed = &exp->exp_filter_data;
2983                 if (fed->fed_grant < 0 || fed->fed_pending < 0 ||
2984                     fed->fed_dirty < 0)
2985                         error = 1;
2986                 if (maxsize > 0) { /* we may not have done a statfs yet */
2987                         LASSERTF(fed->fed_grant + fed->fed_pending <= maxsize,
2988                                  "%s: cli %s/%p %ld+%ld > "LPU64"\n", func,
2989                                  exp->exp_client_uuid.uuid, exp,
2990                                  fed->fed_grant, fed->fed_pending, maxsize);
2991                         LASSERTF(fed->fed_dirty <= maxsize,
2992                                  "%s: cli %s/%p %ld > "LPU64"\n", func,
2993                                  exp->exp_client_uuid.uuid, exp,
2994                                  fed->fed_dirty, maxsize);
2995                 }
2996                 if (error)
2997                         CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
2998                                obd->obd_name, exp->exp_client_uuid.uuid, exp,
2999                                fed->fed_dirty, fed->fed_pending,fed->fed_grant);
3000                 else
3001                         CDEBUG(D_CACHE, "%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
3002                                obd->obd_name, exp->exp_client_uuid.uuid, exp,
3003                                fed->fed_dirty, fed->fed_pending,fed->fed_grant);
3004                 tot_granted += fed->fed_grant + fed->fed_pending;
3005                 tot_pending += fed->fed_pending;
3006                 tot_dirty += fed->fed_dirty;
3007         }
3008         fo_tot_granted = obd->u.filter.fo_tot_granted;
3009         fo_tot_pending = obd->u.filter.fo_tot_pending;
3010         fo_tot_dirty = obd->u.filter.fo_tot_dirty;
3011         cfs_spin_unlock(&obd->obd_dev_lock);
3012         cfs_spin_unlock(&obd->obd_osfs_lock);
3013
3014         /* Do these assertions outside the spinlocks so we don't kill system */
3015         if (tot_granted != fo_tot_granted)
3016                 CERROR("%s: tot_granted "LPU64" != fo_tot_granted "LPU64"\n",
3017                        func, tot_granted, fo_tot_granted);
3018         if (tot_pending != fo_tot_pending)
3019                 CERROR("%s: tot_pending "LPU64" != fo_tot_pending "LPU64"\n",
3020                        func, tot_pending, fo_tot_pending);
3021         if (tot_dirty != fo_tot_dirty)
3022                 CERROR("%s: tot_dirty "LPU64" != fo_tot_dirty "LPU64"\n",
3023                        func, tot_dirty, fo_tot_dirty);
3024         if (tot_pending > tot_granted)
3025                 CERROR("%s: tot_pending "LPU64" > tot_granted "LPU64"\n",
3026                        func, tot_pending, tot_granted);
3027         if (tot_granted > maxsize)
3028                 CERROR("%s: tot_granted "LPU64" > maxsize "LPU64"\n",
3029                        func, tot_granted, maxsize);
3030         if (tot_dirty > maxsize)
3031                 CERROR("%s: tot_dirty "LPU64" > maxsize "LPU64"\n",
3032                        func, tot_dirty, maxsize);
3033 }
3034
3035 /* Remove this client from the grant accounting totals.  We also remove
3036  * the export from the obd device under the osfs and dev locks to ensure
3037  * that the filter_grant_sanity_check() calculations are always valid.
3038  * The client should do something similar when it invalidates its import. */
3039 static void filter_grant_discard(struct obd_export *exp)
3040 {
3041         struct obd_device *obd = exp->exp_obd;
3042         struct filter_obd *filter = &obd->u.filter;
3043         struct filter_export_data *fed = &exp->exp_filter_data;
3044
3045         cfs_spin_lock(&obd->obd_osfs_lock);
3046         LASSERTF(filter->fo_tot_granted >= fed->fed_grant,
3047                  "%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
3048                  obd->obd_name, filter->fo_tot_granted,
3049                  exp->exp_client_uuid.uuid, exp, fed->fed_grant);
3050         filter->fo_tot_granted -= fed->fed_grant;
3051         LASSERTF(filter->fo_tot_pending >= fed->fed_pending,
3052                  "%s: tot_pending "LPU64" cli %s/%p fed_pending %ld\n",
3053                  obd->obd_name, filter->fo_tot_pending,
3054                  exp->exp_client_uuid.uuid, exp, fed->fed_pending);
3055         /* fo_tot_pending is handled in filter_grant_commit as bulk finishes */
3056         LASSERTF(filter->fo_tot_dirty >= fed->fed_dirty,
3057                  "%s: tot_dirty "LPU64" cli %s/%p fed_dirty %ld\n",
3058                  obd->obd_name, filter->fo_tot_dirty,
3059                  exp->exp_client_uuid.uuid, exp, fed->fed_dirty);
3060         filter->fo_tot_dirty -= fed->fed_dirty;
3061         fed->fed_dirty = 0;
3062         fed->fed_grant = 0;
3063
3064         cfs_spin_unlock(&obd->obd_osfs_lock);
3065 }
3066
3067 static int filter_destroy_export(struct obd_export *exp)
3068 {
3069         struct filter_export_data *fed = &exp->exp_filter_data;
3070         ENTRY;
3071
3072         if (fed->fed_pending)
3073                 CERROR("%s: cli %s/%p has %lu pending on destroyed export\n",
3074                        exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
3075                        exp, fed->fed_pending);
3076
3077         lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
3078
3079         target_destroy_export(exp);
3080
3081         if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
3082                                      &exp->exp_client_uuid)))
3083                RETURN(0);
3084
3085         ldlm_destroy_export(exp);
3086         lut_client_free(exp);
3087
3088         if (!exp->exp_obd->obd_replayable)
3089                 fsfilt_sync(exp->exp_obd, exp->exp_obd->u.obt.obt_sb);
3090
3091         filter_grant_discard(exp);
3092         filter_fmd_cleanup(exp);
3093
3094         if (exp->exp_connect_flags & OBD_CONNECT_GRANT_SHRINK) {
3095                 struct filter_obd *filter = &exp->exp_obd->u.filter;
3096                 if (filter->fo_tot_granted_clients > 0)
3097                         filter->fo_tot_granted_clients --;
3098         }
3099
3100         if (!(exp->exp_flags & OBD_OPT_FORCE))
3101                 filter_grant_sanity_check(exp->exp_obd, __func__);
3102
3103         RETURN(0);
3104 }
3105
3106 static void filter_sync_llogs(struct obd_device *obd, struct obd_export *dexp)
3107 {
3108         struct obd_llog_group *olg_min, *olg;
3109         struct filter_obd *filter;
3110         int worked = -1, group;
3111         struct llog_ctxt *ctxt;
3112         ENTRY;
3113
3114         filter = &obd->u.filter;
3115
3116         /* we can't sync log holding spinlock. also, we do not want to get
3117          * into livelock. so we do following: loop over MDS's exports in
3118          * group order and skip already synced llogs -bzzz */
3119         do {
3120                 /* look for group with min. number, but > worked */
3121                 olg_min = NULL;
3122                 group = 1 << 30;
3123                 cfs_spin_lock(&filter->fo_llog_list_lock);
3124                 cfs_list_for_each_entry(olg, &filter->fo_llog_list, olg_list) {
3125                         if (olg->olg_seq <= worked) {
3126                                 /* this group is already synced */
3127                                 continue;
3128                         }
3129                         if (group < olg->olg_seq) {
3130                                 /* we have group with smaller number to sync */
3131                                 continue;
3132                         }
3133                         /* store current minimal group */
3134                         olg_min = olg;
3135                         group = olg->olg_seq;
3136                 }
3137                 cfs_spin_unlock(&filter->fo_llog_list_lock);
3138
3139                 if (olg_min == NULL)
3140                         break;
3141
3142                 worked = olg_min->olg_seq;
3143                 if (olg_min->olg_exp &&
3144                     (dexp == olg_min->olg_exp || dexp == NULL)) {
3145                         int err;
3146                         ctxt = llog_group_get_ctxt(olg_min,
3147                                                    LLOG_MDS_OST_REPL_CTXT);
3148                         if (ctxt) {
3149                                 err = llog_sync(ctxt, olg_min->olg_exp, 0);
3150                                 llog_ctxt_put(ctxt);
3151                                 if (err) {
3152                                         CERROR("error flushing logs to MDS: "
3153                                                "rc %d\n", err);
3154                                 }
3155                         }
3156                 }
3157         } while (olg_min != NULL);
3158 }
3159
3160 /* Also incredibly similar to mds_disconnect */
3161 static int filter_disconnect(struct obd_export *exp)
3162 {
3163         struct obd_device *obd = exp->exp_obd;
3164         int rc;
3165         ENTRY;
3166
3167         LASSERT(exp);
3168         class_export_get(exp);
3169
3170         if (!(exp->exp_flags & OBD_OPT_FORCE))
3171                 filter_grant_sanity_check(obd, __func__);
3172         filter_grant_discard(exp);
3173
3174         /* Flush any remaining cancel messages out to the target */
3175         filter_sync_llogs(obd, exp);
3176
3177         lquota_clearinfo(filter_quota_interface_ref, exp, exp->exp_obd);
3178
3179         rc = server_disconnect_export(exp);
3180
3181         /* Do not erase record for recoverable client. */
3182         if (obd->obd_replayable && (!obd->obd_fail || exp->exp_failed))
3183                 filter_client_del(exp);
3184         else
3185                 fsfilt_sync(obd, obd->u.obt.obt_sb);
3186
3187         class_export_put(exp);
3188         RETURN(rc);
3189 }
3190
3191 /* reverse import is changed, sync all cancels */
3192 static void filter_revimp_update(struct obd_export *exp)
3193 {
3194         ENTRY;
3195
3196         LASSERT(exp);
3197         class_export_get(exp);
3198
3199         /* flush any remaining cancel messages out to the target */
3200         filter_sync_llogs(exp->exp_obd, exp);
3201         class_export_put(exp);
3202         EXIT;
3203 }
3204
3205 static int filter_ping(const struct lu_env *env, struct obd_export *exp)
3206 {
3207         filter_fmd_expire(exp);
3208         return 0;