Whamcloud - gitweb
2622a3503b245e1d8bc9ebbe140beb7ed44dcbee
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_handler.c
37  *
38  * Top-level entry points into osd module
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
42  */
43
44 #define DEBUG_SUBSYSTEM S_MDS
45
46 #include <linux/module.h>
47
48 /* LUSTRE_VERSION_CODE */
49 #include <lustre_ver.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/types.h>
52 /* prerequisite for linux/xattr.h */
53 #include <linux/fs.h>
54 /* XATTR_{REPLACE,CREATE} */
55 #include <linux/xattr.h>
56 /* simple_mkdir() */
57 #include <lvfs.h>
58
59 /*
60  * struct OBD_{ALLOC,FREE}*()
61  * OBD_FAIL_CHECK
62  */
63 #include <obd_support.h>
64 /* struct ptlrpc_thread */
65 #include <lustre_net.h>
66
67 /* fid_is_local() */
68 #include <lustre_fid.h>
69
70 #include "osd_internal.h"
71 #include "osd_igif.h"
72
73 /* llo_* api support */
74 #include <md_object.h>
75 #include <lustre_quota.h>
76
77 int ldiskfs_pdo = 1;
78 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
79                 "ldiskfs with parallel directory operations");
80
81 static const char dot[] = ".";
82 static const char dotdot[] = "..";
83 static const char remote_obj_dir[] = "REM_OBJ_DIR";
84
85 static const struct lu_object_operations      osd_lu_obj_ops;
86 static const struct dt_object_operations      osd_obj_ops;
87 static const struct dt_object_operations      osd_obj_ea_ops;
88 static const struct dt_object_operations      osd_obj_otable_it_ops;
89 static const struct dt_index_operations       osd_index_iam_ops;
90 static const struct dt_index_operations       osd_index_ea_ops;
91
92 static int osd_has_index(const struct osd_object *obj)
93 {
94         return obj->oo_dt.do_index_ops != NULL;
95 }
96
97 static int osd_object_invariant(const struct lu_object *l)
98 {
99         return osd_invariant(osd_obj(l));
100 }
101
102 /*
103  * Concurrency: doesn't matter
104  */
105 static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
106 {
107         return osd_oti_get(env)->oti_r_locks > 0;
108 }
109
110 /*
111  * Concurrency: doesn't matter
112  */
113 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
114 {
115         struct osd_thread_info *oti = osd_oti_get(env);
116         return oti->oti_w_locks > 0 && o->oo_owner == env;
117 }
118
119 /*
120  * Concurrency: doesn't access mutable data
121  */
122 static int osd_root_get(const struct lu_env *env,
123                         struct dt_device *dev, struct lu_fid *f)
124 {
125         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
126         return 0;
127 }
128
129 /*
130  * OSD object methods.
131  */
132
133 /*
134  * Concurrency: no concurrent access is possible that early in object
135  * life-cycle.
136  */
137 static struct lu_object *osd_object_alloc(const struct lu_env *env,
138                                           const struct lu_object_header *hdr,
139                                           struct lu_device *d)
140 {
141         struct osd_object *mo;
142
143         OBD_ALLOC_PTR(mo);
144         if (mo != NULL) {
145                 struct lu_object *l;
146
147                 l = &mo->oo_dt.do_lu;
148                 dt_object_init(&mo->oo_dt, NULL, d);
149                 mo->oo_dt.do_ops = &osd_obj_ea_ops;
150                 l->lo_ops = &osd_lu_obj_ops;
151                 init_rwsem(&mo->oo_sem);
152                 init_rwsem(&mo->oo_ext_idx_sem);
153                 spin_lock_init(&mo->oo_guard);
154                 return l;
155         } else {
156                 return NULL;
157         }
158 }
159
160 static int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
161                        struct dentry *dentry, struct lustre_mdt_attrs *lma)
162 {
163         int rc;
164
165         dentry->d_inode = inode;
166         rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA, (void *)lma,
167                                    sizeof(*lma));
168         if (rc == -ERANGE) {
169                 /* try with old lma size */
170                 rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA,
171                                            info->oti_mdt_attrs_old,
172                                            LMA_OLD_SIZE);
173                 if (rc > 0)
174                         memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
175         }
176         if (rc > 0) {
177                 /* Check LMA compatibility */
178                 if (lma->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP)) {
179                         CWARN("%.16s: unsupported incompat LMA feature(s) "
180                               "%lx/%#x\n",
181                               LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
182                               inode->i_ino, le32_to_cpu(lma->lma_incompat) &
183                                                         ~LMA_INCOMPAT_SUPP);
184                         rc = -ENOSYS;
185                 } else {
186                         lustre_lma_swab(lma);
187                         rc = 0;
188                 }
189         } else if (rc == 0) {
190                 rc = -ENODATA;
191         }
192
193         return rc;
194 }
195
196 /*
197  * retrieve object from backend ext fs.
198  **/
199 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
200                        struct osd_inode_id *id)
201 {
202         struct inode *inode = NULL;
203
204         inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
205         if (IS_ERR(inode)) {
206                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
207                        id->oii_ino, PTR_ERR(inode));
208         } else if (id->oii_gen != OSD_OII_NOGEN &&
209                    inode->i_generation != id->oii_gen) {
210                 CDEBUG(D_INODE, "unmatched inode: ino = %u, gen0 = %u, "
211                        "gen1 = %u\n",
212                        id->oii_ino, id->oii_gen, inode->i_generation);
213                 iput(inode);
214                 inode = ERR_PTR(-ESTALE);
215         } else if (inode->i_nlink == 0) {
216                 /* due to parallel readdir and unlink,
217                 * we can have dead inode here. */
218                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
219                 make_bad_inode(inode);
220                 iput(inode);
221                 inode = ERR_PTR(-ESTALE);
222         } else if (is_bad_inode(inode)) {
223                 CWARN("%.16s: bad inode: ino = %u\n",
224                 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
225                 iput(inode);
226                 inode = ERR_PTR(-ENOENT);
227         } else {
228                 if (id->oii_gen == OSD_OII_NOGEN)
229                         osd_id_gen(id, inode->i_ino, inode->i_generation);
230
231                 /* Do not update file c/mtime in ldiskfs.
232                  * NB: we don't have any lock to protect this because we don't
233                  * have reference on osd_object now, but contention with
234                  * another lookup + attr_set can't happen in the tiny window
235                  * between if (...) and set S_NOCMTIME. */
236                 if (!(inode->i_flags & S_NOCMTIME))
237                         inode->i_flags |= S_NOCMTIME;
238         }
239         return inode;
240 }
241
242 struct inode *osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
243                            struct osd_inode_id *id, struct lu_fid *fid)
244 {
245         struct lustre_mdt_attrs *lma   = &info->oti_mdt_attrs;
246         struct inode            *inode;
247         int                      rc;
248
249         inode = osd_iget(info, dev, id);
250         if (IS_ERR(inode))
251                 return inode;
252
253         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
254         if (rc == 0) {
255                 *fid = lma->lma_self_fid;
256         } else if (rc == -ENODATA) {
257                 LU_IGIF_BUILD(fid, inode->i_ino, inode->i_generation);
258         } else {
259                 iput(inode);
260                 inode = ERR_PTR(rc);
261         }
262         return inode;
263 }
264
265 static struct inode *
266 osd_iget_verify(struct osd_thread_info *info, struct osd_device *dev,
267                 struct osd_inode_id *id, const struct lu_fid *fid)
268 {
269         struct lustre_mdt_attrs *lma   = &info->oti_mdt_attrs;
270         struct inode            *inode;
271         int                      rc;
272
273         inode = osd_iget(info, dev, id);
274         if (IS_ERR(inode))
275                 return inode;
276
277         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
278         if (rc == -ENODATA)
279                 return inode;
280
281         if (rc != 0) {
282                 iput(inode);
283                 return ERR_PTR(rc);
284         }
285
286         if (!lu_fid_eq(fid, &lma->lma_self_fid)) {
287                 CDEBUG(D_LFSCK, "inconsistent obj: "DFID", %lu, "DFID"\n",
288                        PFID(&lma->lma_self_fid), inode->i_ino, PFID(fid));
289                 iput(inode);
290                 return ERR_PTR(-EREMCHG);
291         }
292
293         return inode;
294 }
295
296 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
297                           const struct lu_fid *fid,
298                           const struct lu_object_conf *conf)
299 {
300         struct osd_thread_info *info;
301         struct lu_device       *ldev   = obj->oo_dt.do_lu.lo_dev;
302         struct osd_device      *dev;
303         struct osd_idmap_cache *oic;
304         struct osd_inode_id    *id;
305         struct inode           *inode;
306         struct osd_scrub       *scrub;
307         struct scrub_file      *sf;
308         int                     result;
309         int                     verify = 0;
310         ENTRY;
311
312         LINVRNT(osd_invariant(obj));
313         LASSERT(obj->oo_inode == NULL);
314         LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID, PFID(fid));
315
316         dev = osd_dev(ldev);
317         scrub = &dev->od_scrub;
318         sf = &scrub->os_file;
319         info = osd_oti_get(env);
320         LASSERT(info);
321         oic = &info->oti_cache;
322
323         if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
324                 RETURN(-ENOENT);
325
326         /* Search order: 1. per-thread cache. */
327         if (lu_fid_eq(fid, &oic->oic_fid)) {
328                 id = &oic->oic_lid;
329                 goto iget;
330         }
331
332         id = &info->oti_id;
333         if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
334                 /* Search order: 2. OI scrub pending list. */
335                 result = osd_oii_lookup(dev, fid, id);
336                 if (result == 0)
337                         goto iget;
338         }
339
340         if (sf->sf_flags & SF_INCONSISTENT)
341                 verify = 1;
342
343         /*
344          * Objects are created as locking anchors or place holders for objects
345          * yet to be created. No need to osd_oi_lookup() at here because FID
346          * shouldn't never be re-used, if it's really a duplicate FID from
347          * unexpected reason, we should be able to detect it later by calling
348          * do_create->osd_oi_insert()
349          */
350         if (conf != NULL && (conf->loc_flags & LOC_F_NEW) != 0)
351                 GOTO(out, result = 0);
352
353         /* Search order: 3. OI files. */
354         result = osd_oi_lookup(info, dev, fid, id);
355         if (result == -ENOENT) {
356                 if (!fid_is_norm(fid) ||
357                     !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
358                                       sf->sf_oi_bitmap))
359                         GOTO(out, result = 0);
360
361                 goto trigger;
362         }
363
364         if (result != 0)
365                 GOTO(out, result);
366
367 iget:
368         if (verify == 0)
369                 inode = osd_iget(info, dev, id);
370         else
371                 inode = osd_iget_verify(info, dev, id, fid);
372         if (IS_ERR(inode)) {
373                 result = PTR_ERR(inode);
374                 if (result == -ENOENT || result == -ESTALE) {
375                         fid_zero(&oic->oic_fid);
376                         result = 0;
377                 } else if (result == -EREMCHG) {
378
379 trigger:
380                         if (thread_is_running(&scrub->os_thread)) {
381                                 result = -EINPROGRESS;
382                         } else if (!dev->od_noscrub) {
383                                 result = osd_scrub_start(dev);
384                                 LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
385                                                "for "DFID", rc = %d [1]\n",
386                                                LDISKFS_SB(osd_sb(dev))->s_es->\
387                                                s_volume_name,PFID(fid), result);
388                                 if (result == 0 || result == -EALREADY)
389                                         result = -EINPROGRESS;
390                                 else
391                                         result = -EREMCHG;
392                         }
393                 }
394
395                 GOTO(out, result);
396         }
397
398         obj->oo_inode = inode;
399         LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
400
401         obj->oo_compat_dot_created = 1;
402         obj->oo_compat_dotdot_created = 1;
403
404         if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
405                 GOTO(out, result = 0);
406
407         LASSERT(obj->oo_hl_head == NULL);
408         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
409         if (obj->oo_hl_head == NULL) {
410                 obj->oo_inode = NULL;
411                 iput(inode);
412                 GOTO(out, result = -ENOMEM);
413         }
414         GOTO(out, result = 0);
415
416 out:
417         LINVRNT(osd_invariant(obj));
418         return result;
419 }
420
421 /*
422  * Concurrency: shouldn't matter.
423  */
424 static void osd_object_init0(struct osd_object *obj)
425 {
426         LASSERT(obj->oo_inode != NULL);
427         obj->oo_dt.do_body_ops = &osd_body_ops;
428         obj->oo_dt.do_lu.lo_header->loh_attr |=
429                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
430 }
431
432 /*
433  * Concurrency: no concurrent access is possible that early in object
434  * life-cycle.
435  */
436 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
437                            const struct lu_object_conf *conf)
438 {
439         struct osd_object *obj = osd_obj(l);
440         int result;
441
442         LINVRNT(osd_invariant(obj));
443
444         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
445         obj->oo_dt.do_body_ops = &osd_body_ops_new;
446         if (result == 0) {
447                 if (obj->oo_inode != NULL) {
448                         osd_object_init0(obj);
449                 } else if (fid_is_otable_it(&l->lo_header->loh_fid)) {
450                         obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
451                         /* LFSCK iterator object is special without inode */
452                         l->lo_header->loh_attr |= LOHA_EXISTS;
453                 }
454         }
455         LINVRNT(osd_invariant(obj));
456         return result;
457 }
458
459 /*
460  * Concurrency: no concurrent access is possible that late in object
461  * life-cycle.
462  */
463 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
464 {
465         struct osd_object *obj = osd_obj(l);
466
467         LINVRNT(osd_invariant(obj));
468
469         dt_object_fini(&obj->oo_dt);
470         if (obj->oo_hl_head != NULL)
471                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
472         OBD_FREE_PTR(obj);
473 }
474
475 /*
476  * Concurrency: no concurrent access is possible that late in object
477  * life-cycle.
478  */
479 static void osd_index_fini(struct osd_object *o)
480 {
481         struct iam_container *bag;
482
483         if (o->oo_dir != NULL) {
484                 bag = &o->oo_dir->od_container;
485                 if (o->oo_inode != NULL) {
486                         if (bag->ic_object == o->oo_inode)
487                                 iam_container_fini(bag);
488                 }
489                 OBD_FREE_PTR(o->oo_dir);
490                 o->oo_dir = NULL;
491         }
492 }
493
494 /*
495  * Concurrency: no concurrent access is possible that late in object
496  * life-cycle (for all existing callers, that is. New callers have to provide
497  * their own locking.)
498  */
499 static int osd_inode_unlinked(const struct inode *inode)
500 {
501         return inode->i_nlink == 0;
502 }
503
504 enum {
505         OSD_TXN_OI_DELETE_CREDITS    = 20,
506         OSD_TXN_INODE_DELETE_CREDITS = 20
507 };
508
509 /*
510  * Journal
511  */
512
513 #if OSD_THANDLE_STATS
514 /**
515  * Set time when the handle is allocated
516  */
517 static void osd_th_alloced(struct osd_thandle *oth)
518 {
519         oth->oth_alloced = cfs_time_current();
520 }
521
522 /**
523  * Set time when the handle started
524  */
525 static void osd_th_started(struct osd_thandle *oth)
526 {
527         oth->oth_started = cfs_time_current();
528 }
529
530 /**
531  * Helper function to convert time interval to microseconds packed in
532  * long int.
533  */
534 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
535 {
536         struct timeval val;
537
538         cfs_duration_usec(cfs_time_sub(end, start), &val);
539         return val.tv_sec * 1000000 + val.tv_usec;
540 }
541
542 /**
543  * Check whether the we deal with this handle for too long.
544  */
545 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
546                                 cfs_time_t alloced, cfs_time_t started,
547                                 cfs_time_t closed)
548 {
549         cfs_time_t now = cfs_time_current();
550
551         LASSERT(dev != NULL);
552
553         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
554                             interval_to_usec(alloced, started));
555         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
556                             interval_to_usec(started, closed));
557         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
558                             interval_to_usec(closed, now));
559
560         if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
561                 CWARN("transaction handle %p was open for too long: "
562                       "now "CFS_TIME_T" ,"
563                       "alloced "CFS_TIME_T" ,"
564                       "started "CFS_TIME_T" ,"
565                       "closed "CFS_TIME_T"\n",
566                       oth, now, alloced, started, closed);
567                 libcfs_debug_dumpstack(NULL);
568         }
569 }
570
571 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
572 {                                                                       \
573         cfs_time_t __closed = cfs_time_current();                       \
574         cfs_time_t __alloced = oth->oth_alloced;                        \
575         cfs_time_t __started = oth->oth_started;                        \
576                                                                         \
577         expr;                                                           \
578         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
579 }
580
581 #else /* OSD_THANDLE_STATS */
582
583 #define osd_th_alloced(h)                  do {} while(0)
584 #define osd_th_started(h)                  do {} while(0)
585 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
586
587 #endif /* OSD_THANDLE_STATS */
588
589 /*
590  * Concurrency: doesn't access mutable data.
591  */
592 static int osd_param_is_not_sane(const struct osd_device *dev,
593                                  const struct thandle *th)
594 {
595         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
596
597         return oh->ot_credits > osd_journal(dev)->j_max_transaction_buffers;
598 }
599
600 /*
601  * Concurrency: shouldn't matter.
602  */
603 #ifdef HAVE_LDISKFS_JOURNAL_CALLBACK_ADD
604 static void osd_trans_commit_cb(struct super_block *sb,
605                                 struct journal_callback *jcb, int error)
606 #else
607 static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
608 #endif
609 {
610         struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
611         struct thandle     *th  = &oh->ot_super;
612         struct lu_device   *lud = &th->th_dev->dd_lu_dev;
613         struct dt_txn_commit_cb *dcb, *tmp;
614
615         LASSERT(oh->ot_handle == NULL);
616
617         if (error)
618                 CERROR("transaction @0x%p commit error: %d\n", th, error);
619
620         dt_txn_hook_commit(th);
621
622         /* call per-transaction callbacks if any */
623         cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
624                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
625                          "commit callback entry: magic=%x name='%s'\n",
626                          dcb->dcb_magic, dcb->dcb_name);
627                 cfs_list_del_init(&dcb->dcb_linkage);
628                 dcb->dcb_func(NULL, th, dcb, error);
629         }
630
631         lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
632         lu_device_put(lud);
633         th->th_dev = NULL;
634
635         lu_context_exit(&th->th_ctx);
636         lu_context_fini(&th->th_ctx);
637         OBD_FREE_PTR(oh);
638 }
639
640 static struct thandle *osd_trans_create(const struct lu_env *env,
641                                         struct dt_device *d)
642 {
643         struct osd_thread_info *oti = osd_oti_get(env);
644         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
645         struct osd_thandle     *oh;
646         struct thandle         *th;
647         ENTRY;
648
649         /* on pending IO in this thread should left from prev. request */
650         LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
651
652         th = ERR_PTR(-ENOMEM);
653         OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
654         if (oh != NULL) {
655                 oh->ot_quota_trans = &oti->oti_quota_trans;
656                 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
657                 th = &oh->ot_super;
658                 th->th_dev = d;
659                 th->th_result = 0;
660                 th->th_tags = LCT_TX_HANDLE;
661                 oh->ot_credits = 0;
662                 oti->oti_dev = osd_dt_dev(d);
663                 CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
664                 osd_th_alloced(oh);
665         }
666         RETURN(th);
667 }
668
669 /*
670  * Concurrency: shouldn't matter.
671  */
672 int osd_trans_start(const struct lu_env *env, struct dt_device *d,
673                     struct thandle *th)
674 {
675         struct osd_thread_info *oti = osd_oti_get(env);
676         struct osd_device  *dev = osd_dt_dev(d);
677         handle_t           *jh;
678         struct osd_thandle *oh;
679         int rc;
680
681         ENTRY;
682
683         LASSERT(current->journal_info == NULL);
684
685         oh = container_of0(th, struct osd_thandle, ot_super);
686         LASSERT(oh != NULL);
687         LASSERT(oh->ot_handle == NULL);
688
689         rc = dt_txn_hook_start(env, d, th);
690         if (rc != 0)
691                 GOTO(out, rc);
692
693         if (unlikely(osd_param_is_not_sane(dev, th))) {
694                 static unsigned long last_printed;
695                 static int last_credits;
696
697                 CWARN("%.16s: too many transaction credits (%d > %d)\n",
698                       LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
699                       oh->ot_credits,
700                       osd_journal(dev)->j_max_transaction_buffers);
701 #ifdef OSD_TRACK_DECLARES
702                 CWARN("  create: %u/%u, delete: %u/%u, destroy: %u/%u\n",
703                       oh->ot_declare_create, oh->ot_declare_create_cred,
704                       oh->ot_declare_delete, oh->ot_declare_delete_cred,
705                       oh->ot_declare_destroy, oh->ot_declare_destroy_cred);
706                 CWARN("  attr_set: %u/%u, xattr_set: %u/%u\n",
707                       oh->ot_declare_attr_set, oh->ot_declare_attr_set_cred,
708                       oh->ot_declare_xattr_set, oh->ot_declare_xattr_set_cred);
709                 CWARN("  write: %u/%u, punch: %u/%u, quota %u/%u\n",
710                       oh->ot_declare_write, oh->ot_declare_write_cred,
711                       oh->ot_declare_punch, oh->ot_declare_punch_cred,
712                       oh->ot_declare_quota, oh->ot_declare_quota_cred);
713                 CWARN("  insert: %u/%u, delete: %u/%u\n",
714                       oh->ot_declare_insert, oh->ot_declare_insert_cred,
715                       oh->ot_declare_delete, oh->ot_declare_destroy_cred);
716                 CWARN("  ref_add: %u/%u, ref_del: %u/%u\n",
717                       oh->ot_declare_ref_add, oh->ot_declare_ref_add_cred,
718                       oh->ot_declare_ref_del, oh->ot_declare_ref_del_cred);
719
720                 if (last_credits != oh->ot_credits &&
721                     time_after(jiffies, last_printed + 60 * HZ)) {
722                         libcfs_debug_dumpstack(NULL);
723                         last_credits = oh->ot_credits;
724                         last_printed = jiffies;
725                 }
726 #endif
727                 /* XXX Limit the credits to 'max_transaction_buffers', and
728                  *     let the underlying filesystem to catch the error if
729                  *     we really need so many credits.
730                  *
731                  *     This should be removed when we can calculate the
732                  *     credits precisely. */
733                 oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
734         }
735
736         /*
737          * XXX temporary stuff. Some abstraction layer should
738          * be used.
739          */
740         jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
741         osd_th_started(oh);
742         if (!IS_ERR(jh)) {
743                 oh->ot_handle = jh;
744                 LASSERT(oti->oti_txns == 0);
745                 lu_context_init(&th->th_ctx, th->th_tags);
746                 lu_context_enter(&th->th_ctx);
747
748                 lu_device_get(&d->dd_lu_dev);
749                 oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
750                                              "osd-tx", th);
751                 oti->oti_txns++;
752                 rc = 0;
753         } else {
754                 rc = PTR_ERR(jh);
755         }
756 out:
757         RETURN(rc);
758 }
759
760 /*
761  * Concurrency: shouldn't matter.
762  */
763 static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
764 {
765         int                     rc = 0;
766         struct osd_thandle     *oh;
767         struct osd_thread_info *oti = osd_oti_get(env);
768         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
769         struct qsd_instance    *qsd = oti->oti_dev->od_quota_slave;
770         ENTRY;
771
772         oh = container_of0(th, struct osd_thandle, ot_super);
773
774         if (qsd != NULL)
775                 /* inform the quota slave device that the transaction is
776                  * stopping */
777                 qsd_op_end(env, qsd, oh->ot_quota_trans);
778         oh->ot_quota_trans = NULL;
779
780         if (oh->ot_handle != NULL) {
781                 handle_t *hdl = oh->ot_handle;
782
783                 /*
784                  * add commit callback
785                  * notice we don't do this in osd_trans_start()
786                  * as underlying transaction can change during truncate
787                  */
788                 osd_journal_callback_set(hdl, osd_trans_commit_cb,
789                                          &oh->ot_jcb);
790
791                 LASSERT(oti->oti_txns == 1);
792                 oti->oti_txns--;
793                 rc = dt_txn_hook_stop(env, th);
794                 if (rc != 0)
795                         CERROR("Failure in transaction hook: %d\n", rc);
796
797                 /* hook functions might modify th_sync */
798                 hdl->h_sync = th->th_sync;
799
800                 oh->ot_handle = NULL;
801                 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
802                                   rc = ldiskfs_journal_stop(hdl));
803                 if (rc != 0)
804                         CERROR("Failure to stop transaction: %d\n", rc);
805         } else {
806                 OBD_FREE_PTR(oh);
807         }
808
809         /* as we want IO to journal and data IO be concurrent, we don't block
810          * awaiting data IO completion in osd_do_bio(), instead we wait here
811          * once transaction is submitted to the journal. all reqular requests
812          * don't do direct IO (except read/write), thus this wait_event becomes
813          * no-op for them.
814          *
815          * IMPORTANT: we have to wait till any IO submited by the thread is
816          * completed otherwise iobuf may be corrupted by different request
817          */
818         cfs_wait_event(iobuf->dr_wait,
819                        cfs_atomic_read(&iobuf->dr_numreqs) == 0);
820         if (!rc)
821                 rc = iobuf->dr_error;
822
823         RETURN(rc);
824 }
825
826 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
827 {
828         struct osd_thandle *oh = container_of0(th, struct osd_thandle,
829                                                ot_super);
830
831         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
832         LASSERT(&dcb->dcb_func != NULL);
833         cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
834
835         return 0;
836 }
837
838 /*
839  * Called just before object is freed. Releases all resources except for
840  * object itself (that is released by osd_object_free()).
841  *
842  * Concurrency: no concurrent access is possible that late in object
843  * life-cycle.
844  */
845 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
846 {
847         struct osd_object *obj   = osd_obj(l);
848         struct inode      *inode = obj->oo_inode;
849
850         LINVRNT(osd_invariant(obj));
851
852         /*
853          * If object is unlinked remove fid->ino mapping from object index.
854          */
855
856         osd_index_fini(obj);
857         if (inode != NULL) {
858                 struct qsd_instance     *qsd = osd_obj2dev(obj)->od_quota_slave;
859                 qid_t                    uid = inode->i_uid;
860                 qid_t                    gid = inode->i_gid;
861
862                 iput(inode);
863                 obj->oo_inode = NULL;
864
865                 if (qsd != NULL) {
866                         struct osd_thread_info  *info = osd_oti_get(env);
867                         struct lquota_id_info   *qi = &info->oti_qi;
868
869                         /* Release granted quota to master if necessary */
870                         qi->lqi_id.qid_uid = uid;
871                         qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
872
873                         qi->lqi_id.qid_uid = gid;
874                         qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
875                 }
876         }
877 }
878
879 /*
880  * Concurrency: ->loo_object_release() is called under site spin-lock.
881  */
882 static void osd_object_release(const struct lu_env *env,
883                                struct lu_object *l)
884 {
885 }
886
887 /*
888  * Concurrency: shouldn't matter.
889  */
890 static int osd_object_print(const struct lu_env *env, void *cookie,
891                             lu_printer_t p, const struct lu_object *l)
892 {
893         struct osd_object *o = osd_obj(l);
894         struct iam_descr  *d;
895
896         if (o->oo_dir != NULL)
897                 d = o->oo_dir->od_container.ic_descr;
898         else
899                 d = NULL;
900         return (*p)(env, cookie,
901                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
902                     o, o->oo_inode,
903                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
904                     o->oo_inode ? o->oo_inode->i_generation : 0,
905                     d ? d->id_ops->id_name : "plain");
906 }
907
908 /*
909  * Concurrency: shouldn't matter.
910  */
911 int osd_statfs(const struct lu_env *env, struct dt_device *d,
912                struct obd_statfs *sfs)
913 {
914         struct osd_device  *osd = osd_dt_dev(d);
915         struct super_block *sb = osd_sb(osd);
916         struct kstatfs     *ksfs;
917         int result = 0;
918
919         if (unlikely(osd->od_mnt == NULL))
920                 return -EINPROGRESS;
921
922         /* osd_lproc.c call this without env, allocate ksfs for that case */
923         if (unlikely(env == NULL)) {
924                 OBD_ALLOC_PTR(ksfs);
925                 if (ksfs == NULL)
926                         return -ENOMEM;
927         } else {
928                 ksfs = &osd_oti_get(env)->oti_ksfs;
929         }
930
931         spin_lock(&osd->od_osfs_lock);
932         /* cache 1 second */
933         if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
934                 result = sb->s_op->statfs(sb->s_root, ksfs);
935                 if (likely(result == 0)) { /* N.B. statfs can't really fail */
936                         osd->od_osfs_age = cfs_time_current_64();
937                         statfs_pack(&osd->od_statfs, ksfs);
938                         if (sb->s_flags & MS_RDONLY)
939                                 sfs->os_state = OS_STATE_READONLY;
940                 }
941         }
942
943         if (likely(result == 0))
944                 *sfs = osd->od_statfs;
945         spin_unlock(&osd->od_osfs_lock);
946
947         if (unlikely(env == NULL))
948                 OBD_FREE_PTR(ksfs);
949
950         return result;
951 }
952
953 /**
954  * Estimate space needed for file creations. We assume the largest filename
955  * which is 2^64 - 1, hence a filename of 20 chars.
956  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
957  */
958 #ifdef __LDISKFS_DIR_REC_LEN
959 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
960 #else
961 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
962 #endif
963
964 /*
965  * Concurrency: doesn't access mutable data.
966  */
967 static void osd_conf_get(const struct lu_env *env,
968                          const struct dt_device *dev,
969                          struct dt_device_param *param)
970 {
971         struct super_block *sb = osd_sb(osd_dt_dev(dev));
972
973         /*
974          * XXX should be taken from not-yet-existing fs abstraction layer.
975          */
976         param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
977         param->ddp_max_name_len = LDISKFS_NAME_LEN;
978         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
979         param->ddp_block_shift  = sb->s_blocksize_bits;
980         param->ddp_mount_type     = LDD_MT_LDISKFS;
981         param->ddp_maxbytes       = sb->s_maxbytes;
982         /* Overhead estimate should be fairly accurate, so we really take a tiny
983          * error margin which also avoids fragmenting the filesystem too much */
984         param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
985         /* inode are statically allocated, so per-inode space consumption
986          * is the space consumed by the directory entry */
987         param->ddp_inodespace     = PER_OBJ_USAGE;
988         /* per-fragment overhead to be used by the client code */
989         param->ddp_grant_frag     = 6 * LDISKFS_BLOCK_SIZE(sb);
990         param->ddp_mntopts      = 0;
991         if (test_opt(sb, XATTR_USER))
992                 param->ddp_mntopts |= MNTOPT_USERXATTR;
993         if (test_opt(sb, POSIX_ACL))
994                 param->ddp_mntopts |= MNTOPT_ACL;
995
996 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
997         if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
998                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
999         else
1000 #endif
1001                 param->ddp_max_ea_size = sb->s_blocksize;
1002
1003 }
1004
1005 /**
1006  * Helper function to get and fill the buffer with input values.
1007  */
1008 static struct lu_buf *osd_buf_get(const struct lu_env *env, void *area, ssize_t len)
1009 {
1010         struct lu_buf *buf;
1011
1012         buf = &osd_oti_get(env)->oti_buf;
1013         buf->lb_buf = area;
1014         buf->lb_len = len;
1015         return buf;
1016 }
1017
1018 /*
1019  * Concurrency: shouldn't matter.
1020  */
1021 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1022 {
1023         CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1024         return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1025 }
1026
1027 /**
1028  * Start commit for OSD device.
1029  *
1030  * An implementation of dt_commit_async method for OSD device.
1031  * Asychronously starts underlayng fs sync and thereby a transaction
1032  * commit.
1033  *
1034  * \param env environment
1035  * \param d dt device
1036  *
1037  * \see dt_device_operations
1038  */
1039 static int osd_commit_async(const struct lu_env *env,
1040                             struct dt_device *d)
1041 {
1042         struct super_block *s = osd_sb(osd_dt_dev(d));
1043         ENTRY;
1044
1045         CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1046         RETURN(s->s_op->sync_fs(s, 0));
1047 }
1048
1049 /*
1050  * Concurrency: shouldn't matter.
1051  */
1052
1053 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1054 {
1055         struct super_block *sb = osd_sb(osd_dt_dev(d));
1056         int rc;
1057         ENTRY;
1058
1059         CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1060
1061         rc = __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
1062         RETURN(rc);
1063 }
1064
1065 /*
1066  * Concurrency: serialization provided by callers.
1067  */
1068 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
1069                               int mode, unsigned long timeout, __u32 alg,
1070                               struct lustre_capa_key *keys)
1071 {
1072         struct osd_device *dev = osd_dt_dev(d);
1073         ENTRY;
1074
1075         dev->od_fl_capa = mode;
1076         dev->od_capa_timeout = timeout;
1077         dev->od_capa_alg = alg;
1078         dev->od_capa_keys = keys;
1079         RETURN(0);
1080 }
1081
1082 /**
1083  * Note: we do not count into QUOTA here.
1084  * If we mount with --data_journal we may need more.
1085  */
1086 const int osd_dto_credits_noquota[DTO_NR] = {
1087         /**
1088          * Insert/Delete.
1089          * INDEX_EXTRA_TRANS_BLOCKS(8) +
1090          * SINGLEDATA_TRANS_BLOCKS(8)
1091          * XXX Note: maybe iam need more, since iam have more level than
1092          *           EXT3 htree.
1093          */
1094         [DTO_INDEX_INSERT]  = 16,
1095         [DTO_INDEX_DELETE]  = 16,
1096         /**
1097          * Used for OI scrub
1098          */
1099         [DTO_INDEX_UPDATE]  = 16,
1100         /**
1101          * Create a object. The same as create object in EXT3.
1102          * DATA_TRANS_BLOCKS(14) +
1103          * INDEX_EXTRA_BLOCKS(8) +
1104          * 3(inode bits, groups, GDT)
1105          */
1106         [DTO_OBJECT_CREATE] = 25,
1107         /**
1108          * XXX: real credits to be fixed
1109          */
1110         [DTO_OBJECT_DELETE] = 25,
1111         /**
1112          * Attr set credits (inode)
1113          */
1114         [DTO_ATTR_SET_BASE] = 1,
1115         /**
1116          * Xattr set. The same as xattr of EXT3.
1117          * DATA_TRANS_BLOCKS(14)
1118          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1119          * are also counted in. Do not know why?
1120          */
1121         [DTO_XATTR_SET]     = 14,
1122         [DTO_LOG_REC]       = 14,
1123         /**
1124          * credits for inode change during write.
1125          */
1126         [DTO_WRITE_BASE]    = 3,
1127         /**
1128          * credits for single block write.
1129          */
1130         [DTO_WRITE_BLOCK]   = 14,
1131         /**
1132          * Attr set credits for chown.
1133          * This is extra credits for setattr, and it is null without quota
1134          */
1135         [DTO_ATTR_SET_CHOWN]= 0
1136 };
1137
1138 static const struct dt_device_operations osd_dt_ops = {
1139         .dt_root_get       = osd_root_get,
1140         .dt_statfs         = osd_statfs,
1141         .dt_trans_create   = osd_trans_create,
1142         .dt_trans_start    = osd_trans_start,
1143         .dt_trans_stop     = osd_trans_stop,
1144         .dt_trans_cb_add   = osd_trans_cb_add,
1145         .dt_conf_get       = osd_conf_get,
1146         .dt_sync           = osd_sync,
1147         .dt_ro             = osd_ro,
1148         .dt_commit_async   = osd_commit_async,
1149         .dt_init_capa_ctxt = osd_init_capa_ctxt,
1150 };
1151
1152 static void osd_object_read_lock(const struct lu_env *env,
1153                                  struct dt_object *dt, unsigned role)
1154 {
1155         struct osd_object *obj = osd_dt_obj(dt);
1156         struct osd_thread_info *oti = osd_oti_get(env);
1157
1158         LINVRNT(osd_invariant(obj));
1159
1160         LASSERT(obj->oo_owner != env);
1161         down_read_nested(&obj->oo_sem, role);
1162
1163         LASSERT(obj->oo_owner == NULL);
1164         oti->oti_r_locks++;
1165 }
1166
1167 static void osd_object_write_lock(const struct lu_env *env,
1168                                   struct dt_object *dt, unsigned role)
1169 {
1170         struct osd_object *obj = osd_dt_obj(dt);
1171         struct osd_thread_info *oti = osd_oti_get(env);
1172
1173         LINVRNT(osd_invariant(obj));
1174
1175         LASSERT(obj->oo_owner != env);
1176         down_write_nested(&obj->oo_sem, role);
1177
1178         LASSERT(obj->oo_owner == NULL);
1179         obj->oo_owner = env;
1180         oti->oti_w_locks++;
1181 }
1182
1183 static void osd_object_read_unlock(const struct lu_env *env,
1184                                    struct dt_object *dt)
1185 {
1186         struct osd_object *obj = osd_dt_obj(dt);
1187         struct osd_thread_info *oti = osd_oti_get(env);
1188
1189         LINVRNT(osd_invariant(obj));
1190
1191         LASSERT(oti->oti_r_locks > 0);
1192         oti->oti_r_locks--;
1193         up_read(&obj->oo_sem);
1194 }
1195
1196 static void osd_object_write_unlock(const struct lu_env *env,
1197                                     struct dt_object *dt)
1198 {
1199         struct osd_object *obj = osd_dt_obj(dt);
1200         struct osd_thread_info *oti = osd_oti_get(env);
1201
1202         LINVRNT(osd_invariant(obj));
1203
1204         LASSERT(obj->oo_owner == env);
1205         LASSERT(oti->oti_w_locks > 0);
1206         oti->oti_w_locks--;
1207         obj->oo_owner = NULL;
1208         up_write(&obj->oo_sem);
1209 }
1210
1211 static int osd_object_write_locked(const struct lu_env *env,
1212                                    struct dt_object *dt)
1213 {
1214         struct osd_object *obj = osd_dt_obj(dt);
1215
1216         LINVRNT(osd_invariant(obj));
1217
1218         return obj->oo_owner == env;
1219 }
1220
1221 static int capa_is_sane(const struct lu_env *env,
1222                         struct osd_device *dev,
1223                         struct lustre_capa *capa,
1224                         struct lustre_capa_key *keys)
1225 {
1226         struct osd_thread_info *oti = osd_oti_get(env);
1227         struct lustre_capa *tcapa = &oti->oti_capa;
1228         struct obd_capa *oc;
1229         int i, rc = 0;
1230         ENTRY;
1231
1232         oc = capa_lookup(dev->od_capa_hash, capa, 0);
1233         if (oc) {
1234                 if (capa_is_expired(oc)) {
1235                         DEBUG_CAPA(D_ERROR, capa, "expired");
1236                         rc = -ESTALE;
1237                 }
1238                 capa_put(oc);
1239                 RETURN(rc);
1240         }
1241
1242         if (capa_is_expired_sec(capa)) {
1243                 DEBUG_CAPA(D_ERROR, capa, "expired");
1244                 RETURN(-ESTALE);
1245         }
1246
1247         spin_lock(&capa_lock);
1248         for (i = 0; i < 2; i++) {
1249                 if (keys[i].lk_keyid == capa->lc_keyid) {
1250                         oti->oti_capa_key = keys[i];
1251                         break;
1252                 }
1253         }
1254         spin_unlock(&capa_lock);
1255
1256         if (i == 2) {
1257                 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1258                 RETURN(-ESTALE);
1259         }
1260
1261         rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1262         if (rc)
1263                 RETURN(rc);
1264
1265         if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1266                 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1267                 RETURN(-EACCES);
1268         }
1269
1270         oc = capa_add(dev->od_capa_hash, capa);
1271         capa_put(oc);
1272
1273         RETURN(0);
1274 }
1275
1276 int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1277                     struct lustre_capa *capa, __u64 opc)
1278 {
1279         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1280         struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1281         struct md_capainfo *ci;
1282         int rc;
1283
1284         if (!dev->od_fl_capa)
1285                 return 0;
1286
1287         if (capa == BYPASS_CAPA)
1288                 return 0;
1289
1290         ci = md_capainfo(env);
1291         if (unlikely(!ci))
1292                 return 0;
1293
1294         if (ci->mc_auth == LC_ID_NONE)
1295                 return 0;
1296
1297         if (!capa) {
1298                 CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
1299                 return -EACCES;
1300         }
1301
1302         if (!lu_fid_eq(fid, &capa->lc_fid)) {
1303                 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1304                            PFID(fid));
1305                 return -EACCES;
1306         }
1307
1308         if (!capa_opc_supported(capa, opc)) {
1309                 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1310                 return -EACCES;
1311         }
1312
1313         if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
1314                 DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
1315                 return -EACCES;
1316         }
1317
1318         return 0;
1319 }
1320
1321 static struct timespec *osd_inode_time(const struct lu_env *env,
1322                                        struct inode *inode, __u64 seconds)
1323 {
1324         struct osd_thread_info  *oti = osd_oti_get(env);
1325         struct timespec         *t   = &oti->oti_time;
1326
1327         t->tv_sec = seconds;
1328         t->tv_nsec = 0;
1329         *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1330         return t;
1331 }
1332
1333
1334 static void osd_inode_getattr(const struct lu_env *env,
1335                               struct inode *inode, struct lu_attr *attr)
1336 {
1337         attr->la_valid      |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1338                                LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1339                                LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
1340
1341         attr->la_atime      = LTIME_S(inode->i_atime);
1342         attr->la_mtime      = LTIME_S(inode->i_mtime);
1343         attr->la_ctime      = LTIME_S(inode->i_ctime);
1344         attr->la_mode       = inode->i_mode;
1345         attr->la_size       = i_size_read(inode);
1346         attr->la_blocks     = inode->i_blocks;
1347         attr->la_uid        = inode->i_uid;
1348         attr->la_gid        = inode->i_gid;
1349         attr->la_flags      = LDISKFS_I(inode)->i_flags;
1350         attr->la_nlink      = inode->i_nlink;
1351         attr->la_rdev       = inode->i_rdev;
1352         attr->la_blksize    = 1 << inode->i_blkbits;
1353         attr->la_blkbits    = inode->i_blkbits;
1354 }
1355
1356 static int osd_attr_get(const struct lu_env *env,
1357                         struct dt_object *dt,
1358                         struct lu_attr *attr,
1359                         struct lustre_capa *capa)
1360 {
1361         struct osd_object *obj = osd_dt_obj(dt);
1362
1363         LASSERT(dt_object_exists(dt));
1364         LINVRNT(osd_invariant(obj));
1365
1366         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1367                 return -EACCES;
1368
1369         spin_lock(&obj->oo_guard);
1370         osd_inode_getattr(env, obj->oo_inode, attr);
1371         spin_unlock(&obj->oo_guard);
1372         return 0;
1373 }
1374
1375 static int osd_declare_attr_set(const struct lu_env *env,
1376                                 struct dt_object *dt,
1377                                 const struct lu_attr *attr,
1378                                 struct thandle *handle)
1379 {
1380         struct osd_thandle     *oh;
1381         struct osd_object      *obj;
1382         struct osd_thread_info *info = osd_oti_get(env);
1383         struct lquota_id_info  *qi = &info->oti_qi;
1384         long long               bspace;
1385         int                     rc = 0;
1386         bool                    allocated;
1387         ENTRY;
1388
1389         LASSERT(dt != NULL);
1390         LASSERT(handle != NULL);
1391
1392         obj = osd_dt_obj(dt);
1393         LASSERT(osd_invariant(obj));
1394
1395         oh = container_of0(handle, struct osd_thandle, ot_super);
1396         LASSERT(oh->ot_handle == NULL);
1397
1398         OSD_DECLARE_OP(oh, attr_set,
1399                        osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1400
1401         if (attr == NULL || obj->oo_inode == NULL)
1402                 RETURN(rc);
1403
1404         bspace   = obj->oo_inode->i_blocks;
1405         bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1406         bspace   = toqb(bspace);
1407
1408         /* Changing ownership is always preformed by super user, it should not
1409          * fail with EDQUOT.
1410          *
1411          * We still need to call the osd_declare_qid() to calculate the journal
1412          * credits for updating quota accounting files and to trigger quota
1413          * space adjustment once the operation is completed.*/
1414         if ((attr->la_valid & LA_UID) != 0 &&
1415              attr->la_uid != obj->oo_inode->i_uid) {
1416                 qi->lqi_type = USRQUOTA;
1417
1418                 /* inode accounting */
1419                 qi->lqi_is_blk = false;
1420
1421                 /* one more inode for the new owner ... */
1422                 qi->lqi_id.qid_uid = attr->la_uid;
1423                 qi->lqi_space      = 1;
1424                 allocated = (attr->la_uid == 0) ? true : false;
1425                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1426                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1427                         rc = 0;
1428                 if (rc)
1429                         RETURN(rc);
1430
1431                 /* and one less inode for the current uid */
1432                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1433                 qi->lqi_space      = -1;
1434                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1435                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1436                         rc = 0;
1437                 if (rc)
1438                         RETURN(rc);
1439
1440                 /* block accounting */
1441                 qi->lqi_is_blk = true;
1442
1443                 /* more blocks for the new owner ... */
1444                 qi->lqi_id.qid_uid = attr->la_uid;
1445                 qi->lqi_space      = bspace;
1446                 allocated = (attr->la_uid == 0) ? true : false;
1447                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1448                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1449                         rc = 0;
1450                 if (rc)
1451                         RETURN(rc);
1452
1453                 /* and finally less blocks for the current owner */
1454                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1455                 qi->lqi_space      = -bspace;
1456                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1457                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1458                         rc = 0;
1459                 if (rc)
1460                         RETURN(rc);
1461         }
1462
1463         if (attr->la_valid & LA_GID &&
1464             attr->la_gid != obj->oo_inode->i_gid) {
1465                 qi->lqi_type = GRPQUOTA;
1466
1467                 /* inode accounting */
1468                 qi->lqi_is_blk = false;
1469
1470                 /* one more inode for the new group owner ... */
1471                 qi->lqi_id.qid_gid = attr->la_gid;
1472                 qi->lqi_space      = 1;
1473                 allocated = (attr->la_gid == 0) ? true : false;
1474                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1475                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1476                         rc = 0;
1477                 if (rc)
1478                         RETURN(rc);
1479
1480                 /* and one less inode for the current gid */
1481                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1482                 qi->lqi_space      = -1;
1483                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1484                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1485                         rc = 0;
1486                 if (rc)
1487                         RETURN(rc);
1488
1489                 /* block accounting */
1490                 qi->lqi_is_blk = true;
1491
1492                 /* more blocks for the new owner ... */
1493                 qi->lqi_id.qid_gid = attr->la_gid;
1494                 qi->lqi_space      = bspace;
1495                 allocated = (attr->la_gid == 0) ? true : false;
1496                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1497                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1498                         rc = 0;
1499                 if (rc)
1500                         RETURN(rc);
1501
1502                 /* and finally less blocks for the current owner */
1503                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1504                 qi->lqi_space      = -bspace;
1505                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1506                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1507                         rc = 0;
1508                 if (rc)
1509                         RETURN(rc);
1510         }
1511
1512         RETURN(rc);
1513 }
1514
1515 static int osd_inode_setattr(const struct lu_env *env,
1516                              struct inode *inode, const struct lu_attr *attr)
1517 {
1518         __u64 bits;
1519
1520         bits = attr->la_valid;
1521
1522         LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
1523
1524         if (bits & LA_ATIME)
1525                 inode->i_atime  = *osd_inode_time(env, inode, attr->la_atime);
1526         if (bits & LA_CTIME)
1527                 inode->i_ctime  = *osd_inode_time(env, inode, attr->la_ctime);
1528         if (bits & LA_MTIME)
1529                 inode->i_mtime  = *osd_inode_time(env, inode, attr->la_mtime);
1530         if (bits & LA_SIZE) {
1531                 LDISKFS_I(inode)->i_disksize = attr->la_size;
1532                 i_size_write(inode, attr->la_size);
1533         }
1534
1535 #if 0
1536         /* OSD should not change "i_blocks" which is used by quota.
1537          * "i_blocks" should be changed by ldiskfs only. */
1538         if (bits & LA_BLOCKS)
1539                 inode->i_blocks = attr->la_blocks;
1540 #endif
1541         if (bits & LA_MODE)
1542                 inode->i_mode   = (inode->i_mode & S_IFMT) |
1543                         (attr->la_mode & ~S_IFMT);
1544         if (bits & LA_UID)
1545                 inode->i_uid    = attr->la_uid;
1546         if (bits & LA_GID)
1547                 inode->i_gid    = attr->la_gid;
1548         if (bits & LA_NLINK)
1549                 set_nlink(inode, attr->la_nlink);
1550         if (bits & LA_RDEV)
1551                 inode->i_rdev   = attr->la_rdev;
1552
1553         if (bits & LA_FLAGS) {
1554                 /* always keep S_NOCMTIME */
1555                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1556                                  S_NOCMTIME;
1557         }
1558         return 0;
1559 }
1560
1561 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1562 {
1563         if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
1564             (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
1565                 struct iattr    iattr;
1566                 int             rc;
1567
1568                 iattr.ia_valid = 0;
1569                 if (attr->la_valid & LA_UID)
1570                         iattr.ia_valid |= ATTR_UID;
1571                 if (attr->la_valid & LA_GID)
1572                         iattr.ia_valid |= ATTR_GID;
1573                 iattr.ia_uid = attr->la_uid;
1574                 iattr.ia_gid = attr->la_gid;
1575
1576                 rc = ll_vfs_dq_transfer(inode, &iattr);
1577                 if (rc) {
1578                         CERROR("%s: quota transfer failed: rc = %d. Is quota "
1579                                "enforcement enabled on the ldiskfs filesystem?",
1580                                inode->i_sb->s_id, rc);
1581                         return rc;
1582                 }
1583         }
1584         return 0;
1585 }
1586
1587 static int osd_attr_set(const struct lu_env *env,
1588                         struct dt_object *dt,
1589                         const struct lu_attr *attr,
1590                         struct thandle *handle,
1591                         struct lustre_capa *capa)
1592 {
1593         struct osd_object *obj = osd_dt_obj(dt);
1594         struct inode      *inode;
1595         int rc;
1596
1597         LASSERT(handle != NULL);
1598         LASSERT(dt_object_exists(dt));
1599         LASSERT(osd_invariant(obj));
1600
1601         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1602                 return -EACCES;
1603
1604         OSD_EXEC_OP(handle, attr_set);
1605
1606         inode = obj->oo_inode;
1607         ll_vfs_dq_init(inode);
1608
1609         rc = osd_quota_transfer(inode, attr);
1610         if (rc)
1611                 return rc;
1612
1613         spin_lock(&obj->oo_guard);
1614         rc = osd_inode_setattr(env, inode, attr);
1615         spin_unlock(&obj->oo_guard);
1616
1617         if (!rc)
1618                 inode->i_sb->s_op->dirty_inode(inode);
1619         return rc;
1620 }
1621
1622 struct dentry *osd_child_dentry_get(const struct lu_env *env,
1623                                     struct osd_object *obj,
1624                                     const char *name, const int namelen)
1625 {
1626         return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
1627 }
1628
1629 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1630                       cfs_umode_t mode,
1631                       struct dt_allocation_hint *hint,
1632                       struct thandle *th)
1633 {
1634         int result;
1635         struct osd_device  *osd = osd_obj2dev(obj);
1636         struct osd_thandle *oth;
1637         struct dt_object   *parent = NULL;
1638         struct inode       *inode;
1639
1640         LINVRNT(osd_invariant(obj));
1641         LASSERT(obj->oo_inode == NULL);
1642         LASSERT(obj->oo_hl_head == NULL);
1643
1644         if (S_ISDIR(mode) && ldiskfs_pdo) {
1645                 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1646                 if (obj->oo_hl_head == NULL)
1647                         return -ENOMEM;
1648         }
1649
1650         oth = container_of(th, struct osd_thandle, ot_super);
1651         LASSERT(oth->ot_handle->h_transaction != NULL);
1652
1653         if (hint && hint->dah_parent)
1654                 parent = hint->dah_parent;
1655
1656         inode = ldiskfs_create_inode(oth->ot_handle,
1657                                      parent ? osd_dt_obj(parent)->oo_inode :
1658                                               osd_sb(osd)->s_root->d_inode,
1659                                      mode);
1660         if (!IS_ERR(inode)) {
1661                 /* Do not update file c/mtime in ldiskfs.
1662                  * NB: don't need any lock because no contention at this
1663                  * early stage */
1664                 inode->i_flags |= S_NOCMTIME;
1665                 inode->i_state |= I_LUSTRE_NOSCRUB;
1666                 obj->oo_inode = inode;
1667                 result = 0;
1668         } else {
1669                 if (obj->oo_hl_head != NULL) {
1670                         ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1671                         obj->oo_hl_head = NULL;
1672                 }
1673                 result = PTR_ERR(inode);
1674         }
1675         LINVRNT(osd_invariant(obj));
1676         return result;
1677 }
1678
1679 enum {
1680         OSD_NAME_LEN = 255
1681 };
1682
1683 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1684                      struct lu_attr *attr,
1685                      struct dt_allocation_hint *hint,
1686                      struct dt_object_format *dof,
1687                      struct thandle *th)
1688 {
1689         int result;
1690         struct osd_thandle *oth;
1691         __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1692
1693         LASSERT(S_ISDIR(attr->la_mode));
1694
1695         oth = container_of(th, struct osd_thandle, ot_super);
1696         LASSERT(oth->ot_handle->h_transaction != NULL);
1697         result = osd_mkfile(info, obj, mode, hint, th);
1698
1699         return result;
1700 }
1701
1702 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
1703                         struct lu_attr *attr,
1704                         struct dt_allocation_hint *hint,
1705                         struct dt_object_format *dof,
1706                         struct thandle *th)
1707 {
1708         int result;
1709         struct osd_thandle *oth;
1710         const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
1711
1712         __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
1713
1714         LASSERT(S_ISREG(attr->la_mode));
1715
1716         oth = container_of(th, struct osd_thandle, ot_super);
1717         LASSERT(oth->ot_handle->h_transaction != NULL);
1718
1719         result = osd_mkfile(info, obj, mode, hint, th);
1720         if (result == 0) {
1721                 LASSERT(obj->oo_inode != NULL);
1722                 if (feat->dif_flags & DT_IND_VARKEY)
1723                         result = iam_lvar_create(obj->oo_inode,
1724                                                  feat->dif_keysize_max,
1725                                                  feat->dif_ptrsize,
1726                                                  feat->dif_recsize_max,
1727                                                  oth->ot_handle);
1728                 else
1729                         result = iam_lfix_create(obj->oo_inode,
1730                                                  feat->dif_keysize_max,
1731                                                  feat->dif_ptrsize,
1732                                                  feat->dif_recsize_max,
1733                                                  oth->ot_handle);
1734
1735         }
1736         return result;
1737 }
1738
1739 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1740                      struct lu_attr *attr,
1741                      struct dt_allocation_hint *hint,
1742                      struct dt_object_format *dof,
1743                      struct thandle *th)
1744 {
1745         LASSERT(S_ISREG(attr->la_mode));
1746         return osd_mkfile(info, obj, (attr->la_mode &
1747                                (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1748 }
1749
1750 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1751                      struct lu_attr *attr,
1752                      struct dt_allocation_hint *hint,
1753                      struct dt_object_format *dof,
1754                      struct thandle *th)
1755 {
1756         LASSERT(S_ISLNK(attr->la_mode));
1757         return osd_mkfile(info, obj, (attr->la_mode &
1758                               (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1759 }
1760
1761 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1762                      struct lu_attr *attr,
1763                      struct dt_allocation_hint *hint,
1764                      struct dt_object_format *dof,
1765                      struct thandle *th)
1766 {
1767         cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
1768         int result;
1769
1770         LINVRNT(osd_invariant(obj));
1771         LASSERT(obj->oo_inode == NULL);
1772         LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1773                 S_ISFIFO(mode) || S_ISSOCK(mode));
1774
1775         result = osd_mkfile(info, obj, mode, hint, th);
1776         if (result == 0) {
1777                 LASSERT(obj->oo_inode != NULL);
1778                 /*
1779                  * This inode should be marked dirty for i_rdev.  Currently
1780                  * that is done in the osd_attr_init().
1781                  */
1782                 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
1783                                    attr->la_rdev);
1784         }
1785         LINVRNT(osd_invariant(obj));
1786         return result;
1787 }
1788
1789 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1790                               struct lu_attr *,
1791                               struct dt_allocation_hint *hint,
1792                               struct dt_object_format *dof,
1793                               struct thandle *);
1794
1795 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1796 {
1797         osd_obj_type_f result;
1798
1799         switch (type) {
1800         case DFT_DIR:
1801                 result = osd_mkdir;
1802                 break;
1803         case DFT_REGULAR:
1804                 result = osd_mkreg;
1805                 break;
1806         case DFT_SYM:
1807                 result = osd_mksym;
1808                 break;
1809         case DFT_NODE:
1810                 result = osd_mknod;
1811                 break;
1812         case DFT_INDEX:
1813                 result = osd_mk_index;
1814                 break;
1815
1816         default:
1817                 LBUG();
1818                 break;
1819         }
1820         return result;
1821 }
1822
1823
1824 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1825                         struct dt_object *parent, struct dt_object *child,
1826                         cfs_umode_t child_mode)
1827 {
1828         LASSERT(ah);
1829
1830         memset(ah, 0, sizeof(*ah));
1831         ah->dah_parent = parent;
1832         ah->dah_mode = child_mode;
1833 }
1834
1835 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
1836                           struct lu_attr *attr, struct dt_object_format *dof)
1837 {
1838         struct inode   *inode = obj->oo_inode;
1839         __u64           valid = attr->la_valid;
1840         int             result;
1841
1842         attr->la_valid &= ~(LA_TYPE | LA_MODE);
1843
1844         if (dof->dof_type != DFT_NODE)
1845                 attr->la_valid &= ~LA_RDEV;
1846         if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
1847                 attr->la_valid &= ~LA_ATIME;
1848         if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
1849                 attr->la_valid &= ~LA_CTIME;
1850         if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
1851                 attr->la_valid &= ~LA_MTIME;
1852
1853         result = osd_quota_transfer(inode, attr);
1854         if (result)
1855                 return;
1856
1857         if (attr->la_valid != 0) {
1858                 result = osd_inode_setattr(info->oti_env, inode, attr);
1859                 /*
1860                  * The osd_inode_setattr() should always succeed here.  The
1861                  * only error that could be returned is EDQUOT when we are
1862                  * trying to change the UID or GID of the inode. However, this
1863                  * should not happen since quota enforcement is no longer
1864                  * enabled on ldiskfs (lquota takes care of it).
1865                  */
1866                 LASSERTF(result == 0, "%d", result);
1867                 inode->i_sb->s_op->dirty_inode(inode);
1868         }
1869
1870         attr->la_valid = valid;
1871 }
1872
1873 /**
1874  * Helper function for osd_object_create()
1875  *
1876  * \retval 0, on success
1877  */
1878 static int __osd_object_create(struct osd_thread_info *info,
1879                                struct osd_object *obj, struct lu_attr *attr,
1880                                struct dt_allocation_hint *hint,
1881                                struct dt_object_format *dof,
1882                                struct thandle *th)
1883 {
1884         int     result;
1885
1886         result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
1887                                                   th);
1888         if (result == 0) {
1889                 osd_attr_init(info, obj, attr, dof);
1890                 osd_object_init0(obj);
1891                 /* bz 24037 */
1892                 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
1893                         unlock_new_inode(obj->oo_inode);
1894         }
1895
1896         return result;
1897 }
1898
1899 /**
1900  * Helper function for osd_object_create()
1901  *
1902  * \retval 0, on success
1903  */
1904 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
1905                            const struct lu_fid *fid, struct thandle *th)
1906 {
1907         struct osd_thread_info *info = osd_oti_get(env);
1908         struct osd_inode_id    *id   = &info->oti_id;
1909         struct osd_device      *osd  = osd_obj2dev(obj);
1910
1911         LASSERT(obj->oo_inode != NULL);
1912
1913         osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
1914         return osd_oi_insert(info, osd, fid, id, th);
1915 }
1916
1917 static int osd_declare_object_create(const struct lu_env *env,
1918                                      struct dt_object *dt,
1919                                      struct lu_attr *attr,
1920                                      struct dt_allocation_hint *hint,
1921                                      struct dt_object_format *dof,
1922                                      struct thandle *handle)
1923 {
1924         struct osd_thandle      *oh;
1925         int                      rc;
1926         ENTRY;
1927
1928         LASSERT(handle != NULL);
1929
1930         oh = container_of0(handle, struct osd_thandle, ot_super);
1931         LASSERT(oh->ot_handle == NULL);
1932
1933         OSD_DECLARE_OP(oh, create, osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
1934         /* XXX: So far, only normal fid needs be inserted into the oi,
1935          *      things could be changed later. Revise following code then. */
1936         if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
1937                 /* Reuse idle OI block may cause additional one OI block
1938                  * to be changed. */
1939                 OSD_DECLARE_OP(oh, insert,
1940                                osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
1941         }
1942         /* If this is directory, then we expect . and .. to be inserted as
1943          * well. The one directory block always needs to be created for the
1944          * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
1945          * block), there is no danger of needing a tree for the first block.
1946          */
1947         if (attr && S_ISDIR(attr->la_mode)) {
1948                 OSD_DECLARE_OP(oh, insert,
1949                                osd_dto_credits_noquota[DTO_WRITE_BASE]);
1950                 OSD_DECLARE_OP(oh, insert, 0);
1951         }
1952
1953         if (!attr)
1954                 RETURN(0);
1955
1956         rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
1957                                    false, false, NULL, false);
1958         RETURN(rc);
1959 }
1960
1961 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1962                              struct lu_attr *attr,
1963                              struct dt_allocation_hint *hint,
1964                              struct dt_object_format *dof,
1965                              struct thandle *th)
1966 {
1967         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
1968         struct osd_object      *obj    = osd_dt_obj(dt);
1969         struct osd_thread_info *info   = osd_oti_get(env);
1970         int result;
1971
1972         ENTRY;
1973
1974         LINVRNT(osd_invariant(obj));
1975         LASSERT(!dt_object_exists(dt));
1976         LASSERT(osd_write_locked(env, obj));
1977         LASSERT(th != NULL);
1978
1979         if (unlikely(fid_is_acct(fid)))
1980                 /* Quota files can't be created from the kernel any more,
1981                  * 'tune2fs -O quota' will take care of creating them */
1982                 RETURN(-EPERM);
1983
1984         OSD_EXEC_OP(th, create);
1985
1986         result = __osd_object_create(info, obj, attr, hint, dof, th);
1987         if (result == 0)
1988                 result = __osd_oi_insert(env, obj, fid, th);
1989
1990         LASSERT(ergo(result == 0, dt_object_exists(dt)));
1991         LASSERT(osd_invariant(obj));
1992         RETURN(result);
1993 }
1994
1995 /**
1996  * Called to destroy on-disk representation of the object
1997  *
1998  * Concurrency: must be locked
1999  */
2000 static int osd_declare_object_destroy(const struct lu_env *env,
2001                                       struct dt_object *dt,
2002                                       struct thandle *th)
2003 {
2004         struct osd_object  *obj = osd_dt_obj(dt);
2005         struct inode       *inode = obj->oo_inode;
2006         struct osd_thandle *oh;
2007         int                 rc;
2008         ENTRY;
2009
2010         oh = container_of0(th, struct osd_thandle, ot_super);
2011         LASSERT(oh->ot_handle == NULL);
2012         LASSERT(inode);
2013
2014         OSD_DECLARE_OP(oh, delete, osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2015         /* XXX: So far, only normal fid needs to be inserted into the OI,
2016          *      so only normal fid needs to be removed from the OI also.
2017          * Recycle idle OI leaf may cause additional three OI blocks
2018          * to be changed. */
2019         OSD_DECLARE_OP(oh, destroy, fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2020                         osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3 : 0);
2021
2022         /* one less inode */
2023         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
2024                                    false, true, NULL, false);
2025         if (rc)
2026                 RETURN(rc);
2027         /* data to be truncated */
2028         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
2029                                    true, true, NULL, false);
2030         RETURN(rc);
2031 }
2032
2033 static int osd_object_destroy(const struct lu_env *env,
2034                               struct dt_object *dt,
2035                               struct thandle *th)
2036 {
2037         const struct lu_fid    *fid = lu_object_fid(&dt->do_lu);
2038         struct osd_object      *obj = osd_dt_obj(dt);
2039         struct inode           *inode = obj->oo_inode;
2040         struct osd_device      *osd = osd_obj2dev(obj);
2041         struct osd_thandle     *oh;
2042         int                     result;
2043         ENTRY;
2044
2045         oh = container_of0(th, struct osd_thandle, ot_super);
2046         LASSERT(oh->ot_handle);
2047         LASSERT(inode);
2048         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2049
2050         if (unlikely(fid_is_acct(fid)))
2051                 RETURN(-EPERM);
2052
2053         /* Parallel control for OI scrub. For most of cases, there is no
2054          * lock contention. So it will not affect unlink performance. */
2055         mutex_lock(&inode->i_mutex);
2056         if (S_ISDIR(inode->i_mode)) {
2057                 LASSERT(osd_inode_unlinked(inode) ||
2058                         inode->i_nlink == 1);
2059                 spin_lock(&obj->oo_guard);
2060                 clear_nlink(inode);
2061                 spin_unlock(&obj->oo_guard);
2062                 inode->i_sb->s_op->dirty_inode(inode);
2063         } else {
2064                 LASSERT(osd_inode_unlinked(inode));
2065         }
2066
2067         OSD_EXEC_OP(th, destroy);
2068
2069         result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
2070         mutex_unlock(&inode->i_mutex);
2071
2072         /* XXX: add to ext3 orphan list */
2073         /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2074
2075         /* not needed in the cache anymore */
2076         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2077
2078         RETURN(0);
2079 }
2080
2081 /**
2082  * Helper function for osd_xattr_set()
2083  */
2084 static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2085                            const struct lu_buf *buf, const char *name, int fl)
2086 {
2087         struct osd_object      *obj      = osd_dt_obj(dt);
2088         struct inode           *inode    = obj->oo_inode;
2089         struct osd_thread_info *info     = osd_oti_get(env);
2090         struct dentry          *dentry   = &info->oti_child_dentry;
2091         int                     fs_flags = 0;
2092         int                     rc;
2093
2094         LASSERT(dt_object_exists(dt));
2095         LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
2096
2097         if (fl & LU_XATTR_REPLACE)
2098                 fs_flags |= XATTR_REPLACE;
2099
2100         if (fl & LU_XATTR_CREATE)
2101                 fs_flags |= XATTR_CREATE;
2102
2103         ll_vfs_dq_init(inode);
2104         dentry->d_inode = inode;
2105         rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
2106                                    buf->lb_len, fs_flags);
2107         return rc;
2108 }
2109
2110 /**
2111  * Put the fid into lustre_mdt_attrs, and then place the structure
2112  * inode's ea. This fid should not be altered during the life time
2113  * of the inode.
2114  *
2115  * \retval +ve, on success
2116  * \retval -ve, on error
2117  *
2118  * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2119  */
2120 static int osd_ea_fid_set(const struct lu_env *env, struct dt_object *dt,
2121                           const struct lu_fid *fid)
2122 {
2123         struct osd_thread_info  *info      = osd_oti_get(env);
2124         struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
2125
2126         lustre_lma_init(mdt_attrs, fid);
2127         lustre_lma_swab(mdt_attrs);
2128         return __osd_xattr_set(env, dt,
2129                                osd_buf_get(env, mdt_attrs, sizeof *mdt_attrs),
2130                                XATTR_NAME_LMA, LU_XATTR_CREATE);
2131
2132 }
2133
2134 /**
2135  * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2136  * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2137  * To have compatilibility with 1.8 ldiskfs driver we need to have
2138  * magic number at start of fid data.
2139  * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2140  * its inmemory API.
2141  */
2142 void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2143                                   const struct dt_rec *fid)
2144 {
2145         param->edp_magic = LDISKFS_LUFID_MAGIC;
2146         param->edp_len =  sizeof(struct lu_fid) + 1;
2147
2148         fid_cpu_to_be((struct lu_fid *)param->edp_data,
2149                       (struct lu_fid *)fid);
2150 }
2151
2152 /**
2153  * Try to read the fid from inode ea into dt_rec, if return value
2154  * i.e. rc is +ve, then we got fid, otherwise we will have to form igif
2155  *
2156  * \param fid object fid.
2157  *
2158  * \retval 0 on success
2159  */
2160 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2161                           __u32 ino, struct lu_fid *fid,
2162                           struct osd_inode_id *id)
2163 {
2164         struct osd_thread_info *info  = osd_oti_get(env);
2165         struct inode           *inode;
2166         ENTRY;
2167
2168         osd_id_gen(id, ino, OSD_OII_NOGEN);
2169         inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2170         if (IS_ERR(inode))
2171                 RETURN(PTR_ERR(inode));
2172
2173         iput(inode);
2174         RETURN(0);
2175 }
2176
2177 /**
2178  * OSD layer object create function for interoperability mode (b11826).
2179  * This is mostly similar to osd_object_create(). Only difference being, fid is
2180  * inserted into inode ea here.
2181  *
2182  * \retval   0, on success
2183  * \retval -ve, on error
2184  */
2185 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2186                                 struct lu_attr *attr,
2187                                 struct dt_allocation_hint *hint,
2188                                 struct dt_object_format *dof,
2189                                 struct thandle *th)
2190 {
2191         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
2192         struct osd_object      *obj    = osd_dt_obj(dt);
2193         struct osd_thread_info *info   = osd_oti_get(env);
2194         int                     result;
2195
2196         ENTRY;
2197
2198         LASSERT(osd_invariant(obj));
2199         LASSERT(!dt_object_exists(dt));
2200         LASSERT(osd_write_locked(env, obj));
2201         LASSERT(th != NULL);
2202
2203         if (unlikely(fid_is_acct(fid)))
2204                 /* Quota files can't be created from the kernel any more,
2205                  * 'tune2fs -O quota' will take care of creating them */
2206                 RETURN(-EPERM);
2207
2208         OSD_EXEC_OP(th, create);
2209
2210         result = __osd_object_create(info, obj, attr, hint, dof, th);
2211         /* objects under osd root shld have igif fid, so dont add fid EA */
2212         if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL)
2213                 result = osd_ea_fid_set(env, dt, fid);
2214
2215         if (result == 0)
2216                 result = __osd_oi_insert(env, obj, fid, th);
2217
2218         LASSERT(ergo(result == 0, dt_object_exists(dt)));
2219         LINVRNT(osd_invariant(obj));
2220         RETURN(result);
2221 }
2222
2223 static int osd_declare_object_ref_add(const struct lu_env *env,
2224                                       struct dt_object *dt,
2225                                       struct thandle *handle)
2226 {
2227         struct osd_thandle *oh;
2228
2229         /* it's possible that object doesn't exist yet */
2230         LASSERT(handle != NULL);
2231
2232         oh = container_of0(handle, struct osd_thandle, ot_super);
2233         LASSERT(oh->ot_handle == NULL);
2234
2235         OSD_DECLARE_OP(oh, ref_add, osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2236
2237         return 0;
2238 }
2239
2240 /*
2241  * Concurrency: @dt is write locked.
2242  */
2243 static int osd_object_ref_add(const struct lu_env *env,
2244                               struct dt_object *dt, struct thandle *th)
2245 {
2246         struct osd_object *obj = osd_dt_obj(dt);
2247         struct inode      *inode = obj->oo_inode;
2248
2249         LINVRNT(osd_invariant(obj));
2250         LASSERT(dt_object_exists(dt));
2251         LASSERT(osd_write_locked(env, obj));
2252         LASSERT(th != NULL);
2253
2254         OSD_EXEC_OP(th, ref_add);
2255
2256         /*
2257          * DIR_NLINK feature is set for compatibility reasons if:
2258          * 1) nlinks > LDISKFS_LINK_MAX, or
2259          * 2) nlinks == 2, since this indicates i_nlink was previously 1.
2260          *
2261          * It is easier to always set this flag (rather than check and set),
2262          * since it has less overhead, and the superblock will be dirtied
2263          * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
2264          * do not actually care whether this flag is set or not.
2265          */
2266         spin_lock(&obj->oo_guard);
2267         /* inc_nlink from 0 may cause WARN_ON */
2268         if(inode->i_nlink == 0)
2269                 set_nlink(inode, 1);
2270         else
2271                 inc_nlink(inode);
2272         if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
2273                 if (inode->i_nlink >= LDISKFS_LINK_MAX ||
2274                     inode->i_nlink == 2)
2275                         set_nlink(inode, 1);
2276         }
2277         LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2278         spin_unlock(&obj->oo_guard);
2279         inode->i_sb->s_op->dirty_inode(inode);
2280         LINVRNT(osd_invariant(obj));
2281
2282         return 0;
2283 }
2284
2285 static int osd_declare_object_ref_del(const struct lu_env *env,
2286                                       struct dt_object *dt,
2287                                       struct thandle *handle)
2288 {
2289         struct osd_thandle *oh;
2290
2291         LASSERT(dt_object_exists(dt));
2292         LASSERT(handle != NULL);
2293
2294         oh = container_of0(handle, struct osd_thandle, ot_super);
2295         LASSERT(oh->ot_handle == NULL);
2296
2297         OSD_DECLARE_OP(oh, ref_del, osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2298
2299         return 0;
2300 }
2301
2302 /*
2303  * Concurrency: @dt is write locked.
2304  */
2305 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2306                               struct thandle *th)
2307 {
2308         struct osd_object *obj = osd_dt_obj(dt);
2309         struct inode      *inode = obj->oo_inode;
2310
2311         LINVRNT(osd_invariant(obj));
2312         LASSERT(dt_object_exists(dt));
2313         LASSERT(osd_write_locked(env, obj));
2314         LASSERT(th != NULL);
2315
2316         OSD_EXEC_OP(th, ref_del);
2317
2318         spin_lock(&obj->oo_guard);
2319         LASSERT(inode->i_nlink > 0);
2320         drop_nlink(inode);
2321         /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
2322          * then the nlink count is 1. Don't let it be set to 0 or the directory
2323          * inode will be deleted incorrectly. */
2324         if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
2325                 set_nlink(inode, 1);
2326         spin_unlock(&obj->oo_guard);
2327         inode->i_sb->s_op->dirty_inode(inode);
2328         LINVRNT(osd_invariant(obj));
2329
2330         return 0;
2331 }
2332
2333 /*
2334  * Get the 64-bit version for an inode.
2335  */
2336 static int osd_object_version_get(const struct lu_env *env,
2337                                   struct dt_object *dt, dt_obj_version_t *ver)
2338 {
2339         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2340
2341         CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2342                LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2343         *ver = LDISKFS_I(inode)->i_fs_version;
2344         return 0;
2345 }
2346
2347 /*
2348  * Concurrency: @dt is read locked.
2349  */
2350 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2351                          struct lu_buf *buf, const char *name,
2352                          struct lustre_capa *capa)
2353 {
2354         struct osd_object      *obj    = osd_dt_obj(dt);
2355         struct inode           *inode  = obj->oo_inode;
2356         struct osd_thread_info *info   = osd_oti_get(env);
2357         struct dentry          *dentry = &info->oti_obj_dentry;
2358
2359         /* version get is not real XATTR but uses xattr API */
2360         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2361                 /* for version we are just using xattr API but change inode
2362                  * field instead */
2363                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2364                 osd_object_version_get(env, dt, buf->lb_buf);
2365                 return sizeof(dt_obj_version_t);
2366         }
2367
2368         LASSERT(dt_object_exists(dt));
2369         LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
2370
2371         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2372                 return -EACCES;
2373
2374         dentry->d_inode = inode;
2375         return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
2376 }
2377
2378
2379 static int osd_declare_xattr_set(const struct lu_env *env,
2380                                  struct dt_object *dt,
2381                                  const struct lu_buf *buf, const char *name,
2382                                  int fl, struct thandle *handle)
2383 {
2384         struct osd_thandle *oh;
2385
2386         LASSERT(handle != NULL);
2387
2388         oh = container_of0(handle, struct osd_thandle, ot_super);
2389         LASSERT(oh->ot_handle == NULL);
2390
2391         OSD_DECLARE_OP(oh, xattr_set, strcmp(name, XATTR_NAME_VERSION) == 0 ?
2392                        osd_dto_credits_noquota[DTO_ATTR_SET_BASE] :
2393                        osd_dto_credits_noquota[DTO_XATTR_SET]);
2394
2395         return 0;
2396 }
2397
2398 /*
2399  * Set the 64-bit version for object
2400  */
2401 static void osd_object_version_set(const struct lu_env *env,
2402                                    struct dt_object *dt,
2403                                    dt_obj_version_t *new_version)
2404 {
2405         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2406
2407         CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
2408                *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2409
2410         LDISKFS_I(inode)->i_fs_version = *new_version;
2411         /** Version is set after all inode operations are finished,
2412          *  so we should mark it dirty here */
2413         inode->i_sb->s_op->dirty_inode(inode);
2414 }
2415
2416 /*
2417  * Concurrency: @dt is write locked.
2418  */
2419 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2420                          const struct lu_buf *buf, const char *name, int fl,
2421                          struct thandle *handle, struct lustre_capa *capa)
2422 {
2423         LASSERT(handle != NULL);
2424
2425         /* version set is not real XATTR */
2426         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2427                 /* for version we are just using xattr API but change inode
2428                  * field instead */
2429                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2430                 osd_object_version_set(env, dt, buf->lb_buf);
2431                 return sizeof(dt_obj_version_t);
2432         }
2433
2434         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2435                 return -EACCES;
2436
2437         OSD_EXEC_OP(handle, xattr_set);
2438         return __osd_xattr_set(env, dt, buf, name, fl);
2439 }
2440
2441 /*
2442  * Concurrency: @dt is read locked.
2443  */
2444 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
2445                           struct lu_buf *buf, struct lustre_capa *capa)
2446 {
2447         struct osd_object      *obj    = osd_dt_obj(dt);
2448         struct inode           *inode  = obj->oo_inode;
2449         struct osd_thread_info *info   = osd_oti_get(env);
2450         struct dentry          *dentry = &info->oti_obj_dentry;
2451
2452         LASSERT(dt_object_exists(dt));
2453         LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
2454         LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2455
2456         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2457                 return -EACCES;
2458
2459         dentry->d_inode = inode;
2460         return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
2461 }
2462
2463 static int osd_declare_xattr_del(const struct lu_env *env,
2464                                  struct dt_object *dt, const char *name,
2465                                  struct thandle *handle)
2466 {
2467         struct osd_thandle *oh;
2468
2469         LASSERT(dt_object_exists(dt));
2470         LASSERT(handle != NULL);
2471
2472         oh = container_of0(handle, struct osd_thandle, ot_super);
2473         LASSERT(oh->ot_handle == NULL);
2474
2475         OSD_DECLARE_OP(oh, xattr_set, osd_dto_credits_noquota[DTO_XATTR_SET]);
2476
2477         return 0;
2478 }
2479
2480 /*
2481  * Concurrency: @dt is write locked.
2482  */
2483 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
2484                          const char *name, struct thandle *handle,
2485                          struct lustre_capa *capa)
2486 {
2487         struct osd_object      *obj    = osd_dt_obj(dt);
2488         struct inode           *inode  = obj->oo_inode;
2489         struct osd_thread_info *info   = osd_oti_get(env);
2490         struct dentry          *dentry = &info->oti_obj_dentry;
2491         int                     rc;
2492
2493         LASSERT(dt_object_exists(dt));
2494         LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
2495         LASSERT(osd_write_locked(env, obj));
2496         LASSERT(handle != NULL);
2497
2498         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2499                 return -EACCES;
2500
2501         OSD_EXEC_OP(handle, xattr_set);
2502
2503         ll_vfs_dq_init(inode);
2504         dentry->d_inode = inode;
2505         rc = inode->i_op->removexattr(dentry, name);
2506         return rc;
2507 }
2508
2509 static struct obd_capa *osd_capa_get(const struct lu_env *env,
2510                                      struct dt_object *dt,
2511                                      struct lustre_capa *old,
2512                                      __u64 opc)
2513 {
2514         struct osd_thread_info *info = osd_oti_get(env);
2515         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2516         struct osd_object *obj = osd_dt_obj(dt);
2517         struct osd_device *dev = osd_obj2dev(obj);
2518         struct lustre_capa_key *key = &info->oti_capa_key;
2519         struct lustre_capa *capa = &info->oti_capa;
2520         struct obd_capa *oc;
2521         struct md_capainfo *ci;
2522         int rc;
2523         ENTRY;
2524
2525         if (!dev->od_fl_capa)
2526                 RETURN(ERR_PTR(-ENOENT));
2527
2528         LASSERT(dt_object_exists(dt));
2529         LINVRNT(osd_invariant(obj));
2530
2531         /* renewal sanity check */
2532         if (old && osd_object_auth(env, dt, old, opc))
2533                 RETURN(ERR_PTR(-EACCES));
2534
2535         ci = md_capainfo(env);
2536         if (unlikely(!ci))
2537                 RETURN(ERR_PTR(-ENOENT));
2538
2539         switch (ci->mc_auth) {
2540         case LC_ID_NONE:
2541                 RETURN(NULL);
2542         case LC_ID_PLAIN:
2543                 capa->lc_uid = obj->oo_inode->i_uid;
2544                 capa->lc_gid = obj->oo_inode->i_gid;
2545                 capa->lc_flags = LC_ID_PLAIN;
2546                 break;
2547         case LC_ID_CONVERT: {
2548                 __u32 d[4], s[4];
2549
2550                 s[0] = obj->oo_inode->i_uid;
2551                 cfs_get_random_bytes(&(s[1]), sizeof(__u32));
2552                 s[2] = obj->oo_inode->i_gid;
2553                 cfs_get_random_bytes(&(s[3]), sizeof(__u32));
2554                 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
2555                 if (unlikely(rc))
2556                         RETURN(ERR_PTR(rc));
2557
2558                 capa->lc_uid   = ((__u64)d[1] << 32) | d[0];
2559                 capa->lc_gid   = ((__u64)d[3] << 32) | d[2];
2560                 capa->lc_flags = LC_ID_CONVERT;
2561                 break;
2562         }
2563         default:
2564                 RETURN(ERR_PTR(-EINVAL));
2565         }
2566
2567         capa->lc_fid = *fid;
2568         capa->lc_opc = opc;
2569         capa->lc_flags |= dev->od_capa_alg << 24;
2570         capa->lc_timeout = dev->od_capa_timeout;
2571         capa->lc_expiry = 0;
2572
2573         oc = capa_lookup(dev->od_capa_hash, capa, 1);
2574         if (oc) {
2575                 LASSERT(!capa_is_expired(oc));
2576                 RETURN(oc);
2577         }
2578
2579         spin_lock(&capa_lock);
2580         *key = dev->od_capa_keys[1];
2581         spin_unlock(&capa_lock);
2582
2583         capa->lc_keyid = key->lk_keyid;
2584         capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
2585
2586         rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
2587         if (rc) {
2588                 DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
2589                 RETURN(ERR_PTR(rc));
2590         }
2591
2592         oc = capa_add(dev->od_capa_hash, capa);
2593         RETURN(oc);
2594 }
2595
2596 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
2597 {
2598         struct osd_object       *obj    = osd_dt_obj(dt);
2599         struct inode            *inode  = obj->oo_inode;
2600         struct osd_thread_info  *info   = osd_oti_get(env);
2601         struct dentry           *dentry = &info->oti_obj_dentry;
2602         struct file             *file   = &info->oti_file;
2603         int                     rc;
2604
2605         ENTRY;
2606
2607         dentry->d_inode = inode;
2608         file->f_dentry = dentry;
2609         file->f_mapping = inode->i_mapping;
2610         file->f_op = inode->i_fop;
2611         mutex_lock(&inode->i_mutex);
2612         rc = file->f_op->fsync(file, dentry, 0);
2613         mutex_unlock(&inode->i_mutex);
2614         RETURN(rc);
2615 }
2616
2617 static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
2618                         void **data)
2619 {
2620         struct osd_object *obj = osd_dt_obj(dt);
2621         ENTRY;
2622
2623         *data = (void *)obj->oo_inode;
2624         RETURN(0);
2625 }
2626
2627 /*
2628  * Index operations.
2629  */
2630
2631 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
2632                            const struct dt_index_features *feat)
2633 {
2634         struct iam_descr *descr;
2635
2636         if (osd_object_is_root(o))
2637                 return feat == &dt_directory_features;
2638
2639         LASSERT(o->oo_dir != NULL);
2640
2641         descr = o->oo_dir->od_container.ic_descr;
2642         if (feat == &dt_directory_features) {
2643                 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
2644                         return 1;
2645                 else
2646                         return 0;
2647         } else {
2648                 return
2649                         feat->dif_keysize_min <= descr->id_key_size &&
2650                         descr->id_key_size <= feat->dif_keysize_max &&
2651                         feat->dif_recsize_min <= descr->id_rec_size &&
2652                         descr->id_rec_size <= feat->dif_recsize_max &&
2653                         !(feat->dif_flags & (DT_IND_VARKEY |
2654                                              DT_IND_VARREC | DT_IND_NONUNQ)) &&
2655                         ergo(feat->dif_flags & DT_IND_UPDATE,
2656                              1 /* XXX check that object (and file system) is
2657                                 * writable */);
2658         }
2659 }
2660
2661 static int osd_iam_container_init(const struct lu_env *env,
2662                                   struct osd_object *obj,
2663                                   struct osd_directory *dir)
2664 {
2665         struct iam_container *bag = &dir->od_container;
2666         int result;
2667
2668         result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
2669         if (result != 0)
2670                 return result;
2671
2672         result = iam_container_setup(bag);
2673         if (result == 0)
2674                 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
2675         else
2676                 iam_container_fini(bag);
2677
2678         return result;
2679 }
2680
2681
2682 /*
2683  * Concurrency: no external locking is necessary.
2684  */
2685 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
2686                          const struct dt_index_features *feat)
2687 {
2688         int                      result;
2689         int                      skip_iam = 0;
2690         struct osd_object       *obj = osd_dt_obj(dt);
2691
2692         LINVRNT(osd_invariant(obj));
2693         LASSERT(dt_object_exists(dt));
2694
2695         if (osd_object_is_root(obj)) {
2696                 dt->do_index_ops = &osd_index_ea_ops;
2697                 result = 0;
2698         } else if (feat == &dt_directory_features) {
2699                 dt->do_index_ops = &osd_index_ea_ops;
2700                 if (S_ISDIR(obj->oo_inode->i_mode))
2701                         result = 0;
2702                 else
2703                         result = -ENOTDIR;
2704                 skip_iam = 1;
2705         } else if (unlikely(feat == &dt_otable_features)) {
2706                 dt->do_index_ops = &osd_otable_ops;
2707                 return 0;
2708         } else if (feat == &dt_acct_features) {
2709                 dt->do_index_ops = &osd_acct_index_ops;
2710                 result = 0;
2711                 skip_iam = 1;
2712         } else if (!osd_has_index(obj)) {
2713                 struct osd_directory *dir;
2714
2715                 OBD_ALLOC_PTR(dir);
2716                 if (dir != NULL) {
2717
2718                         spin_lock(&obj->oo_guard);
2719                         if (obj->oo_dir == NULL)
2720                                 obj->oo_dir = dir;
2721                         else
2722                                 /*
2723                                  * Concurrent thread allocated container data.
2724                                  */
2725                                 OBD_FREE_PTR(dir);
2726                         spin_unlock(&obj->oo_guard);
2727                         /*
2728                          * Now, that we have container data, serialize its
2729                          * initialization.
2730                          */
2731                         down_write(&obj->oo_ext_idx_sem);
2732                         /*
2733                          * recheck under lock.
2734                          */
2735                         if (!osd_has_index(obj))
2736                                 result = osd_iam_container_init(env, obj, dir);
2737                         else
2738                                 result = 0;
2739                         up_write(&obj->oo_ext_idx_sem);
2740                 } else {
2741                         result = -ENOMEM;
2742                 }
2743         } else {
2744                 result = 0;
2745         }
2746
2747         if (result == 0 && skip_iam == 0) {
2748                 if (!osd_iam_index_probe(env, obj, feat))
2749                         result = -ENOTDIR;
2750         }
2751         LINVRNT(osd_invariant(obj));
2752
2753         if (is_quota_glb_feat(feat))
2754                 result = osd_quota_migration(env, dt, feat);
2755
2756         return result;
2757 }
2758
2759 static int osd_otable_it_attr_get(const struct lu_env *env,
2760                                  struct dt_object *dt,
2761                                  struct lu_attr *attr,
2762                                  struct lustre_capa *capa)
2763 {
2764         attr->la_valid = 0;
2765         return 0;
2766 }
2767
2768 static const struct dt_object_operations osd_obj_ops = {
2769         .do_read_lock         = osd_object_read_lock,
2770         .do_write_lock        = osd_object_write_lock,
2771         .do_read_unlock       = osd_object_read_unlock,
2772         .do_write_unlock      = osd_object_write_unlock,
2773         .do_write_locked      = osd_object_write_locked,
2774         .do_attr_get          = osd_attr_get,
2775         .do_declare_attr_set  = osd_declare_attr_set,
2776         .do_attr_set          = osd_attr_set,
2777         .do_ah_init           = osd_ah_init,
2778         .do_declare_create    = osd_declare_object_create,
2779         .do_create            = osd_object_create,
2780         .do_declare_destroy   = osd_declare_object_destroy,
2781         .do_destroy           = osd_object_destroy,
2782         .do_index_try         = osd_index_try,
2783         .do_declare_ref_add   = osd_declare_object_ref_add,
2784         .do_ref_add           = osd_object_ref_add,
2785         .do_declare_ref_del   = osd_declare_object_ref_del,
2786         .do_ref_del           = osd_object_ref_del,
2787         .do_xattr_get         = osd_xattr_get,
2788         .do_declare_xattr_set = osd_declare_xattr_set,
2789         .do_xattr_set         = osd_xattr_set,
2790         .do_declare_xattr_del = osd_declare_xattr_del,
2791         .do_xattr_del         = osd_xattr_del,
2792         .do_xattr_list        = osd_xattr_list,
2793         .do_capa_get          = osd_capa_get,
2794         .do_object_sync       = osd_object_sync,
2795         .do_data_get          = osd_data_get,
2796 };
2797
2798 /**
2799  * dt_object_operations for interoperability mode
2800  * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
2801  */
2802 static const struct dt_object_operations osd_obj_ea_ops = {
2803         .do_read_lock         = osd_object_read_lock,
2804         .do_write_lock        = osd_object_write_lock,
2805         .do_read_unlock       = osd_object_read_unlock,
2806         .do_write_unlock      = osd_object_write_unlock,
2807         .do_write_locked      = osd_object_write_locked,
2808         .do_attr_get          = osd_attr_get,
2809         .do_declare_attr_set  = osd_declare_attr_set,
2810         .do_attr_set          = osd_attr_set,
2811         .do_ah_init           = osd_ah_init,
2812         .do_declare_create    = osd_declare_object_create,
2813         .do_create            = osd_object_ea_create,
2814         .do_declare_destroy   = osd_declare_object_destroy,
2815         .do_destroy           = osd_object_destroy,
2816         .do_index_try         = osd_index_try,
2817         .do_declare_ref_add   = osd_declare_object_ref_add,
2818         .do_ref_add           = osd_object_ref_add,
2819         .do_declare_ref_del   = osd_declare_object_ref_del,
2820         .do_ref_del           = osd_object_ref_del,
2821         .do_xattr_get         = osd_xattr_get,
2822         .do_declare_xattr_set = osd_declare_xattr_set,
2823         .do_xattr_set         = osd_xattr_set,
2824         .do_declare_xattr_del = osd_declare_xattr_del,
2825         .do_xattr_del         = osd_xattr_del,
2826         .do_xattr_list        = osd_xattr_list,
2827         .do_capa_get          = osd_capa_get,
2828         .do_object_sync       = osd_object_sync,
2829         .do_data_get          = osd_data_get,
2830 };
2831
2832 static const struct dt_object_operations osd_obj_otable_it_ops = {
2833         .do_attr_get    = osd_otable_it_attr_get,
2834         .do_index_try   = osd_index_try,
2835 };
2836
2837 static int osd_index_declare_iam_delete(const struct lu_env *env,
2838                                         struct dt_object *dt,
2839                                         const struct dt_key *key,
2840                                         struct thandle *handle)
2841 {
2842         struct osd_thandle    *oh;
2843
2844         oh = container_of0(handle, struct osd_thandle, ot_super);
2845         LASSERT(oh->ot_handle == NULL);
2846
2847         OSD_DECLARE_OP(oh, delete, osd_dto_credits_noquota[DTO_INDEX_DELETE]);
2848
2849         return 0;
2850 }
2851
2852 /**
2853  *      delete a (key, value) pair from index \a dt specified by \a key
2854  *
2855  *      \param  dt      osd index object
2856  *      \param  key     key for index
2857  *      \param  rec     record reference
2858  *      \param  handle  transaction handler
2859  *
2860  *      \retval  0  success
2861  *      \retval -ve   failure
2862  */
2863
2864 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
2865                                 const struct dt_key *key,
2866                                 struct thandle *handle,
2867                                 struct lustre_capa *capa)
2868 {
2869         struct osd_thread_info *oti = osd_oti_get(env);
2870         struct osd_object      *obj = osd_dt_obj(dt);
2871         struct osd_thandle     *oh;
2872         struct iam_path_descr  *ipd;
2873         struct iam_container   *bag = &obj->oo_dir->od_container;
2874         int                     rc;
2875
2876         ENTRY;
2877
2878         LINVRNT(osd_invariant(obj));
2879         LASSERT(dt_object_exists(dt));
2880         LASSERT(bag->ic_object == obj->oo_inode);
2881         LASSERT(handle != NULL);
2882
2883         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2884                 RETURN(-EACCES);
2885
2886         OSD_EXEC_OP(handle, delete);
2887
2888         ipd = osd_idx_ipd_get(env, bag);
2889         if (unlikely(ipd == NULL))
2890                 RETURN(-ENOMEM);
2891
2892         oh = container_of0(handle, struct osd_thandle, ot_super);
2893         LASSERT(oh->ot_handle != NULL);
2894         LASSERT(oh->ot_handle->h_transaction != NULL);
2895
2896         if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
2897                 /* swab quota uid/gid provided by caller */
2898                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
2899                 key = (const struct dt_key *)&oti->oti_quota_id;
2900         }
2901
2902         rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
2903         osd_ipd_put(env, bag, ipd);
2904         LINVRNT(osd_invariant(obj));
2905         RETURN(rc);
2906 }
2907
2908 static int osd_index_declare_ea_delete(const struct lu_env *env,
2909                                        struct dt_object *dt,
2910                                        const struct dt_key *key,
2911                                        struct thandle *handle)
2912 {
2913         struct osd_thandle *oh;
2914         struct inode       *inode;
2915         int                 rc;
2916         ENTRY;
2917
2918         LASSERT(dt_object_exists(dt));
2919         LASSERT(handle != NULL);
2920
2921         oh = container_of0(handle, struct osd_thandle, ot_super);
2922         LASSERT(oh->ot_handle == NULL);
2923
2924         OSD_DECLARE_OP(oh, delete, osd_dto_credits_noquota[DTO_INDEX_DELETE]);
2925
2926         inode = osd_dt_obj(dt)->oo_inode;
2927         LASSERT(inode);
2928
2929         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
2930                                    true, true, NULL, false);
2931         RETURN(rc);
2932 }
2933
2934 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
2935                                           struct dt_rec *fid)
2936 {
2937         struct osd_fid_pack *rec;
2938         int                  rc = -ENODATA;
2939
2940         if (de->file_type & LDISKFS_DIRENT_LUFID) {
2941                 rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
2942                 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
2943         }
2944         RETURN(rc);
2945 }
2946
2947 /**
2948  * Index delete function for interoperability mode (b11826).
2949  * It will remove the directory entry added by osd_index_ea_insert().
2950  * This entry is needed to maintain name->fid mapping.
2951  *
2952  * \param key,  key i.e. file entry to be deleted
2953  *
2954  * \retval   0, on success
2955  * \retval -ve, on error
2956  */
2957 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
2958                                const struct dt_key *key,
2959                                struct thandle *handle,
2960                                struct lustre_capa *capa)
2961 {
2962         struct osd_object          *obj    = osd_dt_obj(dt);
2963         struct inode               *dir    = obj->oo_inode;
2964         struct dentry              *dentry;
2965         struct osd_thandle         *oh;
2966         struct ldiskfs_dir_entry_2 *de;
2967         struct buffer_head         *bh;
2968         struct htree_lock          *hlock = NULL;
2969         int                         rc;
2970
2971         ENTRY;
2972
2973         LINVRNT(osd_invariant(obj));
2974         LASSERT(dt_object_exists(dt));
2975         LASSERT(handle != NULL);
2976
2977         OSD_EXEC_OP(handle, delete);
2978
2979         oh = container_of(handle, struct osd_thandle, ot_super);
2980         LASSERT(oh->ot_handle != NULL);
2981         LASSERT(oh->ot_handle->h_transaction != NULL);
2982
2983         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2984                 RETURN(-EACCES);
2985
2986         ll_vfs_dq_init(dir);
2987         dentry = osd_child_dentry_get(env, obj,
2988                                       (char *)key, strlen((char *)key));
2989
2990         if (obj->oo_hl_head != NULL) {
2991                 hlock = osd_oti_get(env)->oti_hlock;
2992                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
2993                                    dir, LDISKFS_HLOCK_DEL);
2994         } else {
2995                 down_write(&obj->oo_ext_idx_sem);
2996         }
2997
2998         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
2999         if (bh) {
3000                 rc = ldiskfs_delete_entry(oh->ot_handle,
3001                                           dir, de, bh);
3002                 brelse(bh);
3003         } else {
3004                 rc = -ENOENT;
3005         }
3006         if (hlock != NULL)
3007                 ldiskfs_htree_unlock(hlock);
3008         else
3009                 up_write(&obj->oo_ext_idx_sem);
3010
3011         LASSERT(osd_invariant(obj));
3012         RETURN(rc);
3013 }
3014
3015 /**
3016  *      Lookup index for \a key and copy record to \a rec.
3017  *
3018  *      \param  dt      osd index object
3019  *      \param  key     key for index
3020  *      \param  rec     record reference
3021  *
3022  *      \retval  +ve  success : exact mach
3023  *      \retval  0    return record with key not greater than \a key
3024  *      \retval -ve   failure
3025  */
3026 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
3027                                 struct dt_rec *rec, const struct dt_key *key,
3028                                 struct lustre_capa *capa)
3029 {
3030         struct osd_object      *obj = osd_dt_obj(dt);
3031         struct iam_path_descr  *ipd;
3032         struct iam_container   *bag = &obj->oo_dir->od_container;
3033         struct osd_thread_info *oti = osd_oti_get(env);
3034         struct iam_iterator    *it = &oti->oti_idx_it;
3035         struct iam_rec         *iam_rec;
3036         int                     rc;
3037
3038         ENTRY;
3039
3040         LASSERT(osd_invariant(obj));
3041         LASSERT(dt_object_exists(dt));
3042         LASSERT(bag->ic_object == obj->oo_inode);
3043
3044         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
3045                 RETURN(-EACCES);
3046
3047         ipd = osd_idx_ipd_get(env, bag);
3048         if (IS_ERR(ipd))
3049                 RETURN(-ENOMEM);
3050
3051         /* got ipd now we can start iterator. */
3052         iam_it_init(it, bag, 0, ipd);
3053
3054         if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3055                 /* swab quota uid/gid provided by caller */
3056                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3057                 key = (const struct dt_key *)&oti->oti_quota_id;
3058         }
3059
3060         rc = iam_it_get(it, (struct iam_key *)key);
3061         if (rc >= 0) {
3062                 if (S_ISDIR(obj->oo_inode->i_mode))
3063                         iam_rec = (struct iam_rec *)oti->oti_ldp;
3064                 else
3065                         iam_rec = (struct iam_rec *) rec;
3066
3067                 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
3068
3069                 if (S_ISDIR(obj->oo_inode->i_mode))
3070                         osd_fid_unpack((struct lu_fid *) rec,
3071                                        (struct osd_fid_pack *)iam_rec);
3072                 else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
3073                         osd_quota_unpack(obj, rec);
3074         }
3075
3076         iam_it_put(it);
3077         iam_it_fini(it);
3078         osd_ipd_put(env, bag, ipd);
3079
3080         LINVRNT(osd_invariant(obj));
3081
3082         RETURN(rc);
3083 }
3084
3085 static int osd_index_declare_iam_insert(const struct lu_env *env,
3086                                         struct dt_object *dt,
3087                                         const struct dt_rec *rec,
3088                                         const struct dt_key *key,
3089                                         struct thandle *handle)
3090 {
3091         struct osd_thandle *oh;
3092
3093         LASSERT(dt_object_exists(dt));
3094         LASSERT(handle != NULL);
3095
3096         oh = container_of0(handle, struct osd_thandle, ot_super);
3097         LASSERT(oh->ot_handle == NULL);
3098
3099         OSD_DECLARE_OP(oh, insert, osd_dto_credits_noquota[DTO_INDEX_INSERT]);
3100
3101         return 0;
3102 }
3103
3104 /**
3105  *      Inserts (key, value) pair in \a dt index object.
3106  *
3107  *      \param  dt      osd index object
3108  *      \param  key     key for index
3109  *      \param  rec     record reference
3110  *      \param  th      transaction handler
3111  *
3112  *      \retval  0  success
3113  *      \retval -ve failure
3114  */
3115 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
3116                                 const struct dt_rec *rec,
3117                                 const struct dt_key *key, struct thandle *th,
3118                                 struct lustre_capa *capa, int ignore_quota)
3119 {
3120         struct osd_object     *obj = osd_dt_obj(dt);
3121         struct iam_path_descr *ipd;
3122         struct osd_thandle    *oh;
3123         struct iam_container  *bag = &obj->oo_dir->od_container;
3124         struct osd_thread_info *oti = osd_oti_get(env);
3125         struct iam_rec         *iam_rec;
3126         int                     rc;
3127
3128         ENTRY;
3129
3130         LINVRNT(osd_invariant(obj));
3131         LASSERT(dt_object_exists(dt));
3132         LASSERT(bag->ic_object == obj->oo_inode);
3133         LASSERT(th != NULL);
3134
3135         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3136                 RETURN(-EACCES);
3137
3138         OSD_EXEC_OP(th, insert);
3139
3140         ipd = osd_idx_ipd_get(env, bag);
3141         if (unlikely(ipd == NULL))
3142                 RETURN(-ENOMEM);
3143
3144         oh = container_of0(th, struct osd_thandle, ot_super);
3145         LASSERT(oh->ot_handle != NULL);
3146         LASSERT(oh->ot_handle->h_transaction != NULL);
3147         if (S_ISDIR(obj->oo_inode->i_mode)) {
3148                 iam_rec = (struct iam_rec *)oti->oti_ldp;
3149                 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
3150         } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3151                 /* pack quota uid/gid */
3152                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3153                 key = (const struct dt_key *)&oti->oti_quota_id;
3154                 /* pack quota record */
3155                 rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
3156                 iam_rec = (struct iam_rec *)rec;
3157         } else {
3158                 iam_rec = (struct iam_rec *)rec;
3159         }
3160
3161         rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
3162                         iam_rec, ipd);
3163         osd_ipd_put(env, bag, ipd);
3164         LINVRNT(osd_invariant(obj));
3165         RETURN(rc);
3166 }
3167
3168 /**
3169  * Calls ldiskfs_add_entry() to add directory entry
3170  * into the directory. This is required for
3171  * interoperability mode (b11826)
3172  *
3173  * \retval   0, on success
3174  * \retval -ve, on error
3175  */
3176 static int __osd_ea_add_rec(struct osd_thread_info *info,
3177                             struct osd_object *pobj, struct inode  *cinode,
3178                             const char *name, const struct dt_rec *fid,
3179                             struct htree_lock *hlock, struct thandle *th)
3180 {
3181         struct ldiskfs_dentry_param *ldp;
3182         struct dentry               *child;
3183         struct osd_thandle          *oth;
3184         int                          rc;
3185
3186         oth = container_of(th, struct osd_thandle, ot_super);
3187         LASSERT(oth->ot_handle != NULL);
3188         LASSERT(oth->ot_handle->h_transaction != NULL);
3189
3190         child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
3191
3192         /* XXX: remove fid_is_igif() check here.
3193          * IGIF check is just to handle insertion of .. when it is 'ROOT',
3194          * it is IGIF now but needs FID in dir entry as well for readdir
3195          * to work.
3196          * LU-838 should fix that and remove fid_is_igif() check */
3197         if (fid_is_igif((struct lu_fid *)fid) ||
3198             fid_is_norm((struct lu_fid *)fid)) {
3199                 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3200                 osd_get_ldiskfs_dirent_param(ldp, fid);
3201                 child->d_fsdata = (void *)ldp;
3202         } else {
3203                 child->d_fsdata = NULL;
3204         }
3205         LASSERT(pobj->oo_inode);
3206         ll_vfs_dq_init(pobj->oo_inode);
3207         rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
3208
3209         RETURN(rc);
3210 }
3211
3212 /**
3213  * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
3214  * into the directory.Also sets flags into osd object to
3215  * indicate dot and dotdot are created. This is required for
3216  * interoperability mode (b11826)
3217  *
3218  * \param dir   directory for dot and dotdot fixup.
3219  * \param obj   child object for linking
3220  *
3221  * \retval   0, on success
3222  * \retval -ve, on error
3223  */
3224 static int osd_add_dot_dotdot(struct osd_thread_info *info,
3225                               struct osd_object *dir,
3226                               struct inode  *parent_dir, const char *name,
3227                               const struct dt_rec *dot_fid,
3228                               const struct dt_rec *dot_dot_fid,
3229                               struct thandle *th)
3230 {
3231         struct inode                *inode = dir->oo_inode;
3232         struct ldiskfs_dentry_param *dot_ldp;
3233         struct ldiskfs_dentry_param *dot_dot_ldp;
3234         struct osd_thandle          *oth;
3235         int result = 0;
3236
3237         oth = container_of(th, struct osd_thandle, ot_super);
3238         LASSERT(oth->ot_handle->h_transaction != NULL);
3239         LASSERT(S_ISDIR(dir->oo_inode->i_mode));
3240
3241         if (strcmp(name, dot) == 0) {
3242                 if (dir->oo_compat_dot_created) {
3243                         result = -EEXIST;
3244                 } else {
3245                         LASSERT(inode == parent_dir);
3246                         dir->oo_compat_dot_created = 1;
3247                         result = 0;
3248                 }
3249         } else if(strcmp(name, dotdot) == 0) {
3250                 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3251                 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3252
3253                 if (!dir->oo_compat_dot_created)
3254                         return -EINVAL;
3255                 if (!fid_is_igif((struct lu_fid *)dot_fid)) {
3256                         osd_get_ldiskfs_dirent_param(dot_ldp, dot_fid);
3257                         osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3258                 } else {
3259                         dot_ldp = NULL;
3260                         dot_dot_ldp = NULL;
3261                 }
3262                 /* in case of rename, dotdot is already created */
3263                 if (dir->oo_compat_dotdot_created) {
3264                         return __osd_ea_add_rec(info, dir, parent_dir, name,
3265                                                 dot_dot_fid, NULL, th);
3266                 }
3267
3268                 result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3269                                                 inode, dot_ldp, dot_dot_ldp);
3270                 if (result == 0)
3271                        dir->oo_compat_dotdot_created = 1;
3272         }
3273
3274         return result;
3275 }
3276
3277
3278 /**
3279  * It will call the appropriate osd_add* function and return the
3280  * value, return by respective functions.
3281  */
3282 static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
3283                           struct inode *cinode, const char *name,
3284                           const struct dt_rec *fid, struct thandle *th)
3285 {
3286         struct osd_thread_info *info   = osd_oti_get(env);
3287         struct htree_lock      *hlock;
3288         int                     rc;
3289
3290         hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
3291
3292         if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' &&
3293                                                    name[2] =='\0'))) {
3294                 if (hlock != NULL) {
3295                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3296                                            pobj->oo_inode, 0);
3297                 } else {
3298                         down_write(&pobj->oo_ext_idx_sem);
3299                 }
3300                 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
3301                      (struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
3302                                         fid, th);
3303         } else {
3304                 if (hlock != NULL) {
3305                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3306                                            pobj->oo_inode, LDISKFS_HLOCK_ADD);
3307                 } else {
3308                         down_write(&pobj->oo_ext_idx_sem);
3309                 }
3310
3311                 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
3312                                       hlock, th);
3313         }
3314         if (hlock != NULL)
3315                 ldiskfs_htree_unlock(hlock);
3316         else
3317                 up_write(&pobj->oo_ext_idx_sem);
3318
3319         return rc;
3320 }
3321
3322 static void
3323 osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
3324                       struct osd_idmap_cache *oic)
3325 {
3326         struct osd_scrub    *scrub = &dev->od_scrub;
3327         struct lu_fid       *fid   = &oic->oic_fid;
3328         struct osd_inode_id *id    = &oti->oti_id;
3329         int                  once  = 0;
3330         int                  rc;
3331         ENTRY;
3332
3333         if (!fid_is_norm(fid) && !fid_is_igif(fid))
3334                 RETURN_EXIT;
3335
3336 again:
3337         rc = osd_oi_lookup(oti, dev, fid, id);
3338         if (rc != 0 && rc != -ENOENT)
3339                 RETURN_EXIT;
3340
3341         if (rc == 0 && osd_id_eq(id, &oic->oic_lid))
3342                 RETURN_EXIT;
3343
3344         if (thread_is_running(&scrub->os_thread)) {
3345                 rc = osd_oii_insert(dev, oic, rc == -ENOENT);
3346                 /* There is race condition between osd_oi_lookup and OI scrub.
3347                  * The OI scrub finished just after osd_oi_lookup() failure.
3348                  * Under such case, it is unnecessary to trigger OI scrub again,
3349                  * but try to call osd_oi_lookup() again. */
3350                 if (unlikely(rc == -EAGAIN))
3351                         goto again;
3352
3353                 RETURN_EXIT;
3354         }
3355
3356         if (!dev->od_noscrub && ++once == 1) {
3357                 CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
3358                        PFID(fid));
3359                 rc = osd_scrub_start(dev);
3360                 LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC for "DFID
3361                                ", rc = %d [2]\n",
3362                                LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
3363                                PFID(fid), rc);
3364                 if (rc == 0)
3365                         goto again;
3366         }
3367
3368         EXIT;
3369 }
3370
3371 /**
3372  * Calls ->lookup() to find dentry. From dentry get inode and
3373  * read inode's ea to get fid. This is required for  interoperability
3374  * mode (b11826)
3375  *
3376  * \retval   0, on success
3377  * \retval -ve, on error
3378  */
3379 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
3380                              struct dt_rec *rec, const struct dt_key *key)
3381 {
3382         struct inode               *dir    = obj->oo_inode;
3383         struct dentry              *dentry;
3384         struct ldiskfs_dir_entry_2 *de;
3385         struct buffer_head         *bh;
3386         struct lu_fid              *fid = (struct lu_fid *) rec;
3387         struct htree_lock          *hlock = NULL;
3388         int                         ino;
3389         int                         rc;
3390
3391         LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
3392
3393         dentry = osd_child_dentry_get(env, obj,
3394                                       (char *)key, strlen((char *)key));
3395
3396         if (obj->oo_hl_head != NULL) {
3397                 hlock = osd_oti_get(env)->oti_hlock;
3398                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3399                                    dir, LDISKFS_HLOCK_LOOKUP);
3400         } else {
3401                 down_read(&obj->oo_ext_idx_sem);
3402         }
3403
3404         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
3405         if (bh) {
3406                 struct osd_thread_info *oti = osd_oti_get(env);
3407                 struct osd_idmap_cache *oic = &oti->oti_cache;
3408                 struct osd_device *dev = osd_obj2dev(obj);
3409                 struct osd_scrub *scrub = &dev->od_scrub;
3410                 struct scrub_file *sf = &scrub->os_file;
3411
3412                 ino = le32_to_cpu(de->inode);
3413                 rc = osd_get_fid_from_dentry(de, rec);
3414
3415                 /* done with de, release bh */
3416                 brelse(bh);
3417                 if (rc != 0)
3418                         rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
3419                 else
3420                         osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
3421                 if (rc != 0) {
3422                         fid_zero(&oic->oic_fid);
3423                         GOTO(out, rc);
3424                 }
3425
3426                 oic->oic_fid = *fid;
3427                 if ((scrub->os_pos_current <= ino) &&
3428                     (sf->sf_flags & SF_INCONSISTENT ||
3429                      ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
3430                                       sf->sf_oi_bitmap)))
3431                         osd_consistency_check(oti, dev, oic);
3432         } else {
3433                 rc = -ENOENT;
3434         }
3435
3436         GOTO(out, rc);
3437
3438 out:
3439         if (hlock != NULL)
3440                 ldiskfs_htree_unlock(hlock);
3441         else
3442                 up_read(&obj->oo_ext_idx_sem);
3443         return rc;
3444 }
3445
3446 /**
3447  * Find the osd object for given fid.
3448  *
3449  * \param fid need to find the osd object having this fid
3450  *
3451  * \retval osd_object on success
3452  * \retval        -ve on error
3453  */
3454 struct osd_object *osd_object_find(const struct lu_env *env,
3455                                    struct dt_object *dt,
3456                                    const struct lu_fid *fid)
3457 {
3458         struct lu_device  *ludev = dt->do_lu.lo_dev;
3459         struct osd_object *child = NULL;
3460         struct lu_object  *luch;
3461         struct lu_object  *lo;
3462
3463         /*
3464          * at this point topdev might not exist yet
3465          * (i.e. MGS is preparing profiles). so we can
3466          * not rely on topdev and instead lookup with
3467          * our device passed as topdev. this can't work
3468          * if the object isn't cached yet (as osd doesn't
3469          * allocate lu_header). IOW, the object must be
3470          * in the cache, otherwise lu_object_alloc() crashes
3471          * -bzzz
3472          */
3473         luch = lu_object_find_at(env, ludev, fid, NULL);
3474         if (!IS_ERR(luch)) {
3475                 if (lu_object_exists(luch)) {
3476                         lo = lu_object_locate(luch->lo_header, ludev->ld_type);
3477                         if (lo != NULL)
3478                                 child = osd_obj(lo);
3479                         else
3480                                 LU_OBJECT_DEBUG(D_ERROR, env, luch,
3481                                                 "lu_object can't be located"
3482                                                 DFID"\n", PFID(fid));
3483
3484                         if (child == NULL) {
3485                                 lu_object_put(env, luch);
3486                                 CERROR("Unable to get osd_object\n");
3487                                 child = ERR_PTR(-ENOENT);
3488                         }
3489                 } else {
3490                         LU_OBJECT_DEBUG(D_ERROR, env, luch,
3491                                         "lu_object does not exists "DFID"\n",
3492                                         PFID(fid));
3493                         lu_object_put(env, luch);
3494                         child = ERR_PTR(-ENOENT);
3495                 }
3496         } else
3497                 child = (void *)luch;
3498
3499         return child;
3500 }
3501
3502 /**
3503  * Put the osd object once done with it.
3504  *
3505  * \param obj osd object that needs to be put
3506  */
3507 static inline void osd_object_put(const struct lu_env *env,
3508                                   struct osd_object *obj)
3509 {
3510         lu_object_put(env, &obj->oo_dt.do_lu);
3511 }
3512
3513 static int osd_index_declare_ea_insert(const struct lu_env *env,
3514                                        struct dt_object *dt,
3515                                        const struct dt_rec *rec,
3516                                        const struct dt_key *key,
3517                                        struct thandle *handle)
3518 {
3519         struct osd_thandle *oh;
3520         struct inode       *inode;
3521         int                 rc;
3522         ENTRY;
3523
3524         LASSERT(dt_object_exists(dt));
3525         LASSERT(handle != NULL);
3526
3527         oh = container_of0(handle, struct osd_thandle, ot_super);
3528         LASSERT(oh->ot_handle == NULL);
3529
3530         OSD_DECLARE_OP(oh, insert, osd_dto_credits_noquota[DTO_INDEX_INSERT]);
3531
3532         inode = osd_dt_obj(dt)->oo_inode;
3533         LASSERT(inode);
3534
3535         /* We ignore block quota on meta pool (MDTs), so needn't
3536          * calculate how many blocks will be consumed by this index
3537          * insert */
3538         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
3539                                    true, true, NULL, false);
3540         RETURN(rc);
3541 }
3542
3543 /**
3544  * Index add function for interoperability mode (b11826).
3545  * It will add the directory entry.This entry is needed to
3546  * maintain name->fid mapping.
3547  *
3548  * \param key it is key i.e. file entry to be inserted
3549  * \param rec it is value of given key i.e. fid
3550  *
3551  * \retval   0, on success
3552  * \retval -ve, on error
3553  */
3554 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
3555                                const struct dt_rec *rec,
3556                                const struct dt_key *key, struct thandle *th,
3557                                struct lustre_capa *capa, int ignore_quota)
3558 {
3559         struct osd_object *obj   = osd_dt_obj(dt);
3560         struct lu_fid     *fid   = (struct lu_fid *) rec;
3561         const char        *name  = (const char *)key;
3562         struct osd_object *child;
3563         int                rc;
3564
3565         ENTRY;
3566
3567         LASSERT(osd_invariant(obj));
3568         LASSERT(dt_object_exists(dt));
3569         LASSERT(th != NULL);
3570
3571         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3572                 RETURN(-EACCES);
3573
3574         child = osd_object_find(env, dt, fid);
3575         if (!IS_ERR(child)) {
3576                 rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
3577                 osd_object_put(env, child);
3578         } else {
3579                 rc = PTR_ERR(child);
3580         }
3581
3582         LASSERT(osd_invariant(obj));
3583         RETURN(rc);
3584 }
3585
3586 /**
3587  *  Initialize osd Iterator for given osd index object.
3588  *
3589  *  \param  dt      osd index object
3590  */
3591
3592 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
3593                                      struct dt_object *dt,
3594                                      __u32 unused,
3595                                      struct lustre_capa *capa)
3596 {
3597         struct osd_it_iam      *it;
3598         struct osd_thread_info *oti = osd_oti_get(env);
3599         struct osd_object      *obj = osd_dt_obj(dt);
3600         struct lu_object       *lo  = &dt->do_lu;
3601         struct iam_path_descr  *ipd;
3602         struct iam_container   *bag = &obj->oo_dir->od_container;
3603
3604         LASSERT(lu_object_exists(lo));
3605
3606         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
3607                 return ERR_PTR(-EACCES);
3608
3609         it = &oti->oti_it;
3610         ipd = osd_it_ipd_get(env, bag);
3611         if (likely(ipd != NULL)) {
3612                 it->oi_obj = obj;
3613                 it->oi_ipd = ipd;
3614                 lu_object_get(lo);
3615                 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
3616                 return (struct dt_it *)it;
3617         }
3618         return ERR_PTR(-ENOMEM);
3619 }
3620
3621 /**
3622  * free given Iterator.
3623  */
3624
3625 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
3626 {
3627         struct osd_it_iam *it = (struct osd_it_iam *)di;
3628         struct osd_object *obj = it->oi_obj;
3629
3630         iam_it_fini(&it->oi_it);
3631         osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
3632         lu_object_put(env, &obj->oo_dt.do_lu);
3633 }
3634
3635 /**
3636  *  Move Iterator to record specified by \a key
3637  *
3638  *  \param  di      osd iterator
3639  *  \param  key     key for index
3640  *
3641  *  \retval +ve  di points to record with least key not larger than key
3642  *  \retval  0   di points to exact matched key
3643  *  \retval -ve  failure
3644  */
3645
3646 static int osd_it_iam_get(const struct lu_env *env,
3647                           struct dt_it *di, const struct dt_key *key)
3648 {
3649         struct osd_thread_info  *oti = osd_oti_get(env);
3650         struct osd_it_iam       *it = (struct osd_it_iam *)di;
3651
3652         if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
3653                 /* swab quota uid/gid */
3654                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3655                 key = (struct dt_key *)&oti->oti_quota_id;
3656         }
3657
3658         return iam_it_get(&it->oi_it, (const struct iam_key *)key);
3659 }
3660
3661 /**
3662  *  Release Iterator
3663  *
3664  *  \param  di      osd iterator
3665  */
3666
3667 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
3668 {
3669         struct osd_it_iam *it = (struct osd_it_iam *)di;
3670
3671         iam_it_put(&it->oi_it);
3672 }
3673
3674 /**
3675  *  Move iterator by one record
3676  *
3677  *  \param  di      osd iterator
3678  *
3679  *  \retval +1   end of container reached
3680  *  \retval  0   success
3681  *  \retval -ve  failure
3682  */
3683
3684 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
3685 {
3686         struct osd_it_iam *it = (struct osd_it_iam *)di;
3687
3688         return iam_it_next(&it->oi_it);
3689 }
3690
3691 /**
3692  * Return pointer to the key under iterator.
3693  */
3694
3695 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
3696                                  const struct dt_it *di)
3697 {
3698         struct osd_thread_info *oti = osd_oti_get(env);
3699         struct osd_it_iam      *it = (struct osd_it_iam *)di;
3700         struct osd_object      *obj = it->oi_obj;
3701         struct dt_key          *key;
3702
3703         key = (struct dt_key *)iam_it_key_get(&it->oi_it);
3704
3705         if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
3706                 /* swab quota uid/gid */
3707                 oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
3708                 key = (struct dt_key *)&oti->oti_quota_id;
3709         }
3710
3711         return key;
3712 }
3713
3714 /**
3715  * Return size of key under iterator (in bytes)
3716  */
3717
3718 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
3719 {
3720         struct osd_it_iam *it = (struct osd_it_iam *)di;
3721
3722         return iam_it_key_size(&it->oi_it);
3723 }
3724
3725 static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
3726                                        int len, __u16 type)
3727 {
3728         struct luda_type *lt;
3729         const unsigned    align = sizeof(struct luda_type) - 1;
3730
3731         /* check if file type is required */
3732         if (attr & LUDA_TYPE) {
3733                         len = (len + align) & ~align;
3734
3735                         lt = (void *) ent->lde_name + len;
3736                         lt->lt_type = cpu_to_le16(CFS_DTTOIF(type));
3737                         ent->lde_attrs |= LUDA_TYPE;
3738         }
3739
3740         ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
3741 }
3742
3743 /**
3744  * build lu direct from backend fs dirent.
3745  */
3746
3747 static inline void osd_it_pack_dirent(struct lu_dirent *ent,
3748                                       struct lu_fid *fid, __u64 offset,
3749                                       char *name, __u16 namelen,
3750                                       __u16 type, __u32 attr)
3751 {
3752         fid_cpu_to_le(&ent->lde_fid, fid);
3753         ent->lde_attrs = LUDA_FID;
3754
3755         ent->lde_hash = cpu_to_le64(offset);
3756         ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
3757
3758         strncpy(ent->lde_name, name, namelen);
3759         ent->lde_namelen = cpu_to_le16(namelen);
3760
3761         /* append lustre attributes */
3762         osd_it_append_attrs(ent, attr, namelen, type);
3763 }
3764
3765 /**
3766  * Return pointer to the record under iterator.
3767  */
3768 static int osd_it_iam_rec(const struct lu_env *env,
3769                           const struct dt_it *di,
3770                           struct dt_rec *dtrec, __u32 attr)
3771 {
3772         struct osd_it_iam      *it   = (struct osd_it_iam *)di;
3773         struct osd_thread_info *info = osd_oti_get(env);
3774         ENTRY;
3775
3776         if (S_ISDIR(it->oi_obj->oo_inode->i_mode)) {
3777                 const struct osd_fid_pack *rec;
3778                 struct lu_fid             *fid = &info->oti_fid;
3779                 struct lu_dirent          *lde = (struct lu_dirent *)dtrec;
3780                 char                      *name;
3781                 int                        namelen;
3782                 __u64                      hash;
3783                 int                        rc;
3784
3785                 name = (char *)iam_it_key_get(&it->oi_it);
3786                 if (IS_ERR(name))
3787                         RETURN(PTR_ERR(name));
3788
3789                 namelen = iam_it_key_size(&it->oi_it);
3790
3791                 rec = (const struct osd_fid_pack *)iam_it_rec_get(&it->oi_it);
3792                 if (IS_ERR(rec))
3793                         RETURN(PTR_ERR(rec));
3794
3795                 rc = osd_fid_unpack(fid, rec);
3796                 if (rc)
3797                         RETURN(rc);
3798
3799                 hash = iam_it_store(&it->oi_it);
3800
3801                 /* IAM does not store object type in IAM index (dir) */
3802                 osd_it_pack_dirent(lde, fid, hash, name, namelen,
3803                                    0, LUDA_FID);
3804         } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
3805                 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
3806                            (struct iam_rec *)dtrec);
3807                 osd_quota_unpack(it->oi_obj, dtrec);
3808         } else {
3809                 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
3810                            (struct iam_rec *)dtrec);
3811         }
3812
3813         RETURN(0);
3814 }
3815
3816 /**
3817  * Returns cookie for current Iterator position.
3818  */
3819 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
3820 {
3821         struct osd_it_iam *it = (struct osd_it_iam *)di;
3822
3823         return iam_it_store(&it->oi_it);
3824 }
3825
3826 /**
3827  * Restore iterator from cookie.
3828  *
3829  * \param  di      osd iterator
3830  * \param  hash    Iterator location cookie
3831  *
3832  * \retval +ve  di points to record with least key not larger than key.
3833  * \retval  0   di points to exact matched key
3834  * \retval -ve  failure
3835  */
3836
3837 static int osd_it_iam_load(const struct lu_env *env,
3838                            const struct dt_it *di, __u64 hash)
3839 {
3840         struct osd_it_iam *it = (struct osd_it_iam *)di;
3841
3842         return iam_it_load(&it->oi_it, hash);
3843 }
3844
3845 static const struct dt_index_operations osd_index_iam_ops = {
3846         .dio_lookup         = osd_index_iam_lookup,
3847         .dio_declare_insert = osd_index_declare_iam_insert,
3848         .dio_insert         = osd_index_iam_insert,
3849         .dio_declare_delete = osd_index_declare_iam_delete,
3850         .dio_delete         = osd_index_iam_delete,
3851         .dio_it     = {
3852                 .init     = osd_it_iam_init,
3853                 .fini     = osd_it_iam_fini,
3854                 .get      = osd_it_iam_get,
3855                 .put      = osd_it_iam_put,
3856                 .next     = osd_it_iam_next,
3857                 .key      = osd_it_iam_key,
3858                 .key_size = osd_it_iam_key_size,
3859                 .rec      = osd_it_iam_rec,
3860                 .store    = osd_it_iam_store,
3861                 .load     = osd_it_iam_load
3862         }
3863 };
3864
3865
3866 /**
3867  * Creates or initializes iterator context.
3868  *
3869  * \retval struct osd_it_ea, iterator structure on success
3870  *
3871  */
3872 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
3873                                     struct dt_object *dt,
3874                                     __u32 attr,
3875                                     struct lustre_capa *capa)
3876 {
3877         struct osd_object       *obj  = osd_dt_obj(dt);
3878         struct osd_thread_info  *info = osd_oti_get(env);
3879         struct osd_it_ea        *it   = &info->oti_it_ea;
3880         struct lu_object        *lo   = &dt->do_lu;
3881         struct dentry           *obj_dentry = &info->oti_it_dentry;
3882         ENTRY;
3883         LASSERT(lu_object_exists(lo));
3884
3885         obj_dentry->d_inode = obj->oo_inode;
3886         obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
3887         obj_dentry->d_name.hash = 0;
3888
3889         it->oie_rd_dirent       = 0;
3890         it->oie_it_dirent       = 0;
3891         it->oie_dirent          = NULL;
3892         it->oie_buf             = info->oti_it_ea_buf;
3893         it->oie_obj             = obj;
3894         it->oie_file.f_pos      = 0;
3895         it->oie_file.f_dentry   = obj_dentry;
3896         if (attr & LUDA_64BITHASH)
3897                 it->oie_file.f_mode |= FMODE_64BITHASH;
3898         else
3899                 it->oie_file.f_mode |= FMODE_32BITHASH;
3900         it->oie_file.f_mapping    = obj->oo_inode->i_mapping;
3901         it->oie_file.f_op         = obj->oo_inode->i_fop;
3902         it->oie_file.private_data = NULL;
3903         lu_object_get(lo);
3904         RETURN((struct dt_it *) it);
3905 }
3906
3907 /**
3908  * Destroy or finishes iterator context.
3909  *
3910  * \param di iterator structure to be destroyed
3911  */
3912 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
3913 {
3914         struct osd_it_ea     *it   = (struct osd_it_ea *)di;
3915         struct osd_object    *obj  = it->oie_obj;
3916         struct inode       *inode  = obj->oo_inode;
3917
3918         ENTRY;
3919         it->oie_file.f_op->release(inode, &it->oie_file);
3920         lu_object_put(env, &obj->oo_dt.do_lu);
3921         EXIT;
3922 }
3923
3924 /**
3925  * It position the iterator at given key, so that next lookup continues from
3926  * that key Or it is similar to dio_it->load() but based on a key,
3927  * rather than file position.
3928  *
3929  * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
3930  * to the beginning.
3931  *
3932  * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
3933  */
3934 static int osd_it_ea_get(const struct lu_env *env,
3935                          struct dt_it *di, const struct dt_key *key)
3936 {
3937         struct osd_it_ea     *it   = (struct osd_it_ea *)di;
3938
3939         ENTRY;
3940         LASSERT(((const char *)key)[0] == '\0');
3941         it->oie_file.f_pos      = 0;
3942         it->oie_rd_dirent       = 0;
3943         it->oie_it_dirent       = 0;
3944         it->oie_dirent          = NULL;
3945
3946         RETURN(+1);
3947 }
3948
3949 /**
3950  * Does nothing
3951  */
3952 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
3953 {
3954 }
3955
3956 /**
3957  * It is called internally by ->readdir(). It fills the
3958  * iterator's in-memory data structure with required
3959  * information i.e. name, namelen, rec_size etc.
3960  *
3961  * \param buf in which information to be filled in.
3962  * \param name name of the file in given dir
3963  *
3964  * \retval 0 on success
3965  * \retval 1 on buffer full
3966  */
3967 static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
3968                                loff_t offset, __u64 ino,
3969                                unsigned d_type)
3970 {
3971         struct osd_it_ea        *it   = (struct osd_it_ea *)buf;
3972         struct osd_it_ea_dirent *ent  = it->oie_dirent;
3973         struct lu_fid           *fid  = &ent->oied_fid;
3974         struct osd_fid_pack     *rec;
3975         ENTRY;
3976
3977         /* this should never happen */
3978         if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
3979                 CERROR("ldiskfs return invalid namelen %d\n", namelen);
3980                 RETURN(-EIO);
3981         }
3982
3983         if ((void *) ent - it->oie_buf + sizeof(*ent) + namelen >
3984             OSD_IT_EA_BUFSIZE)
3985                 RETURN(1);
3986
3987         if (d_type & LDISKFS_DIRENT_LUFID) {
3988                 rec = (struct osd_fid_pack*) (name + namelen + 1);
3989
3990                 if (osd_fid_unpack(fid, rec) != 0)
3991                         fid_zero(fid);
3992
3993                 d_type &= ~LDISKFS_DIRENT_LUFID;
3994         } else {
3995                 fid_zero(fid);
3996         }
3997
3998         ent->oied_ino     = ino;
3999         ent->oied_off     = offset;
4000         ent->oied_namelen = namelen;
4001         ent->oied_type    = d_type;
4002
4003         memcpy(ent->oied_name, name, namelen);
4004
4005         it->oie_rd_dirent++;
4006         it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
4007         RETURN(0);
4008 }
4009
4010 /**
4011  * Calls ->readdir() to load a directory entry at a time
4012  * and stored it in iterator's in-memory data structure.
4013  *
4014  * \param di iterator's in memory structure
4015  *
4016  * \retval   0 on success
4017  * \retval -ve on error
4018  */
4019 static int osd_ldiskfs_it_fill(const struct lu_env *env,
4020                                const struct dt_it *di)
4021 {
4022         struct osd_it_ea   *it    = (struct osd_it_ea *)di;
4023         struct osd_object  *obj   = it->oie_obj;
4024         struct inode       *inode = obj->oo_inode;
4025         struct htree_lock  *hlock = NULL;
4026         int                 result = 0;
4027
4028         ENTRY;
4029         it->oie_dirent = it->oie_buf;
4030         it->oie_rd_dirent = 0;
4031
4032         if (obj->oo_hl_head != NULL) {
4033                 hlock = osd_oti_get(env)->oti_hlock;
4034                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
4035                                    inode, LDISKFS_HLOCK_READDIR);
4036         } else {
4037                 down_read(&obj->oo_ext_idx_sem);
4038         }
4039
4040         result = inode->i_fop->readdir(&it->oie_file, it,
4041                                        (filldir_t) osd_ldiskfs_filldir);
4042
4043         if (hlock != NULL)
4044                 ldiskfs_htree_unlock(hlock);
4045         else
4046                 up_read(&obj->oo_ext_idx_sem);
4047
4048         if (it->oie_rd_dirent == 0) {
4049                 result = -EIO;
4050         } else {
4051                 it->oie_dirent = it->oie_buf;
4052                 it->oie_it_dirent = 1;
4053         }
4054
4055         RETURN(result);
4056 }
4057
4058 /**
4059  * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4060  * to load a directory entry at a time and stored it in
4061  * iterator's in-memory data structure.
4062  *
4063  * \param di iterator's in memory structure
4064  *
4065  * \retval +ve iterator reached to end
4066  * \retval   0 iterator not reached to end
4067  * \retval -ve on error
4068  */
4069 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
4070 {
4071         struct osd_it_ea *it = (struct osd_it_ea *)di;
4072         int rc;
4073
4074         ENTRY;
4075
4076         if (it->oie_it_dirent < it->oie_rd_dirent) {
4077                 it->oie_dirent =
4078                         (void *) it->oie_dirent +
4079                         cfs_size_round(sizeof(struct osd_it_ea_dirent) +
4080                                        it->oie_dirent->oied_namelen);
4081                 it->oie_it_dirent++;
4082                 RETURN(0);
4083         } else {
4084                 if (it->oie_file.f_pos == ldiskfs_get_htree_eof(&it->oie_file))
4085                         rc = +1;
4086                 else
4087                         rc = osd_ldiskfs_it_fill(env, di);
4088         }
4089
4090         RETURN(rc);
4091 }
4092
4093 /**
4094  * Returns the key at current position from iterator's in memory structure.
4095  *
4096  * \param di iterator's in memory structure
4097  *
4098  * \retval key i.e. struct dt_key on success
4099  */
4100 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
4101                                     const struct dt_it *di)
4102 {
4103         struct osd_it_ea *it = (struct osd_it_ea *)di;
4104
4105         return (struct dt_key *)it->oie_dirent->oied_name;
4106 }
4107
4108 /**
4109  * Returns the key's size at current position from iterator's in memory structure.
4110  *
4111  * \param di iterator's in memory structure
4112  *
4113  * \retval key_size i.e. struct dt_key on success
4114  */
4115 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
4116 {
4117         struct osd_it_ea *it = (struct osd_it_ea *)di;
4118
4119         return it->oie_dirent->oied_namelen;
4120 }
4121
4122
4123 /**
4124  * Returns the value (i.e. fid/igif) at current position from iterator's
4125  * in memory structure.
4126  *
4127  * \param di struct osd_it_ea, iterator's in memory structure
4128  * \param attr attr requested for dirent.
4129  * \param lde lustre dirent
4130  *
4131  * \retval   0 no error and \param lde has correct lustre dirent.
4132  * \retval -ve on error
4133  */
4134 static inline int osd_it_ea_rec(const struct lu_env *env,
4135                                 const struct dt_it *di,
4136                                 struct dt_rec *dtrec, __u32 attr)
4137 {
4138         struct osd_it_ea       *it    = (struct osd_it_ea *)di;
4139         struct osd_object      *obj   = it->oie_obj;
4140         struct osd_device      *dev   = osd_obj2dev(obj);
4141         struct osd_scrub       *scrub = &dev->od_scrub;
4142         struct scrub_file      *sf    = &scrub->os_file;
4143         struct osd_thread_info *oti   = osd_oti_get(env);
4144         struct osd_idmap_cache *oic   = &oti->oti_cache;
4145         struct lu_fid          *fid   = &it->oie_dirent->oied_fid;
4146         struct lu_dirent       *lde   = (struct lu_dirent *)dtrec;
4147         __u32                   ino   = it->oie_dirent->oied_ino;
4148         int                     rc    = 0;
4149         ENTRY;
4150
4151         if (!fid_is_sane(fid)) {
4152                 rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
4153                 if (rc != 0) {
4154                         fid_zero(&oic->oic_fid);
4155                         RETURN(rc);
4156                 }
4157         } else {
4158                 osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
4159         }
4160
4161         osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
4162                            it->oie_dirent->oied_name,
4163                            it->oie_dirent->oied_namelen,
4164                            it->oie_dirent->oied_type, attr);
4165         oic->oic_fid = *fid;
4166         if ((scrub->os_pos_current <= ino) &&
4167             (sf->sf_flags & SF_INCONSISTENT ||
4168              ldiskfs_test_bit(osd_oi_fid2idx(dev, fid), sf->sf_oi_bitmap)))
4169                 osd_consistency_check(oti, dev, oic);
4170
4171         RETURN(rc);
4172 }
4173
4174 /**
4175  * Returns a cookie for current position of the iterator head, so that
4176  * user can use this cookie to load/start the iterator next time.
4177  *
4178  * \param di iterator's in memory structure
4179  *
4180  * \retval cookie for current position, on success
4181  */
4182 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
4183 {
4184         struct osd_it_ea *it = (struct osd_it_ea *)di;
4185
4186         return it->oie_dirent->oied_off;
4187 }
4188
4189 /**
4190  * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4191  * to load a directory entry at a time and stored it i inn,
4192  * in iterator's in-memory data structure.
4193  *
4194  * \param di struct osd_it_ea, iterator's in memory structure
4195  *
4196  * \retval +ve on success
4197  * \retval -ve on error
4198  */
4199 static int osd_it_ea_load(const struct lu_env *env,
4200                           const struct dt_it *di, __u64 hash)
4201 {
4202         struct osd_it_ea *it = (struct osd_it_ea *)di;
4203         int rc;
4204
4205         ENTRY;
4206         it->oie_file.f_pos = hash;
4207
4208         rc =  osd_ldiskfs_it_fill(env, di);
4209         if (rc == 0)
4210                 rc = +1;
4211
4212         RETURN(rc);
4213 }
4214
4215 /**
4216  * Index lookup function for interoperability mode (b11826).
4217  *
4218  * \param key,  key i.e. file name to be searched
4219  *
4220  * \retval +ve, on success
4221  * \retval -ve, on error
4222  */
4223 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
4224                                struct dt_rec *rec, const struct dt_key *key,
4225                                struct lustre_capa *capa)
4226 {
4227         struct osd_object *obj = osd_dt_obj(dt);
4228         int rc = 0;
4229
4230         ENTRY;
4231
4232         LASSERT(S_ISDIR(obj->oo_inode->i_mode));
4233         LINVRNT(osd_invariant(obj));
4234
4235         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
4236                 return -EACCES;
4237
4238         rc = osd_ea_lookup_rec(env, obj, rec, key);
4239         if (rc == 0)
4240                 rc = +1;
4241         RETURN(rc);
4242 }
4243
4244 /**
4245  * Index and Iterator operations for interoperability
4246  * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
4247  */
4248 static const struct dt_index_operations osd_index_ea_ops = {
4249         .dio_lookup         = osd_index_ea_lookup,
4250         .dio_declare_insert = osd_index_declare_ea_insert,
4251         .dio_insert         = osd_index_ea_insert,
4252         .dio_declare_delete = osd_index_declare_ea_delete,
4253         .dio_delete         = osd_index_ea_delete,
4254         .dio_it     = {
4255                 .init     = osd_it_ea_init,
4256                 .fini     = osd_it_ea_fini,
4257                 .get      = osd_it_ea_get,
4258                 .put      = osd_it_ea_put,
4259                 .next     = osd_it_ea_next,
4260                 .key      = osd_it_ea_key,
4261                 .key_size = osd_it_ea_key_size,
4262                 .rec      = osd_it_ea_rec,
4263                 .store    = osd_it_ea_store,
4264                 .load     = osd_it_ea_load
4265         }
4266 };
4267
4268 static void *osd_key_init(const struct lu_context *ctx,
4269                           struct lu_context_key *key)
4270 {
4271         struct osd_thread_info *info;
4272
4273         OBD_ALLOC_PTR(info);
4274         if (info == NULL)
4275                 return ERR_PTR(-ENOMEM);
4276
4277         OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4278         if (info->oti_it_ea_buf == NULL)
4279                 goto out_free_info;
4280
4281         info->oti_env = container_of(ctx, struct lu_env, le_ctx);
4282
4283         info->oti_hlock = ldiskfs_htree_lock_alloc();
4284         if (info->oti_hlock == NULL)
4285                 goto out_free_ea;
4286
4287         return info;
4288
4289  out_free_ea:
4290         OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4291  out_free_info:
4292         OBD_FREE_PTR(info);
4293         return ERR_PTR(-ENOMEM);
4294 }
4295
4296 static void osd_key_fini(const struct lu_context *ctx,
4297                          struct lu_context_key *key, void* data)
4298 {
4299         struct osd_thread_info *info = data;
4300
4301         if (info->oti_hlock != NULL)
4302                 ldiskfs_htree_lock_free(info->oti_hlock);
4303         OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4304         OBD_FREE_PTR(info);
4305 }
4306
4307 static void osd_key_exit(const struct lu_context *ctx,
4308                          struct lu_context_key *key, void *data)
4309 {
4310         struct osd_thread_info *info = data;
4311
4312         LASSERT(info->oti_r_locks == 0);
4313         LASSERT(info->oti_w_locks == 0);
4314         LASSERT(info->oti_txns    == 0);
4315 }
4316
4317 /* type constructor/destructor: osd_type_init, osd_type_fini */
4318 LU_TYPE_INIT_FINI(osd, &osd_key);
4319
4320 struct lu_context_key osd_key = {
4321         .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
4322         .lct_init = osd_key_init,
4323         .lct_fini = osd_key_fini,
4324         .lct_exit = osd_key_exit
4325 };
4326
4327
4328 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
4329                            const char *name, struct lu_device *next)
4330 {
4331         struct osd_device *osd = osd_dev(d);
4332
4333         strncpy(osd->od_svname, name, MAX_OBD_NAME);
4334         return osd_procfs_init(osd, name);
4335 }
4336
4337 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
4338 {
4339         ENTRY;
4340
4341         osd_scrub_cleanup(env, o);
4342
4343         if (o->od_fsops) {
4344                 fsfilt_put_ops(o->od_fsops);
4345                 o->od_fsops = NULL;
4346         }
4347
4348         /* shutdown quota slave instance associated with the device */
4349         if (o->od_quota_slave != NULL) {
4350                 qsd_fini(env, o->od_quota_slave);
4351                 o->od_quota_slave = NULL;
4352         }
4353
4354         RETURN(0);
4355 }
4356
4357 static int osd_mount(const struct lu_env *env,
4358                      struct osd_device *o, struct lustre_cfg *cfg)
4359 {
4360         const char              *name  = lustre_cfg_string(cfg, 0);
4361         const char              *dev  = lustre_cfg_string(cfg, 1);
4362         const char              *opts;
4363         unsigned long            page, s_flags, lmd_flags = 0;
4364         struct page             *__page;
4365         struct file_system_type *type;
4366         char                    *options = NULL;
4367         char                    *str;
4368         int                       rc = 0;
4369         ENTRY;
4370
4371         if (o->od_mnt != NULL)
4372                 RETURN(0);
4373
4374         if (strlen(dev) >= sizeof(o->od_mntdev))
4375                 RETURN(-E2BIG);
4376         strcpy(o->od_mntdev, dev);
4377
4378         o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
4379         if (o->od_fsops == NULL) {
4380                 CERROR("Can't find fsfilt_ldiskfs\n");
4381                 RETURN(-ENOTSUPP);
4382         }
4383
4384         OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
4385         if (__page == NULL)
4386                 GOTO(out, rc = -ENOMEM);
4387
4388         str = lustre_cfg_string(cfg, 2);
4389         s_flags = simple_strtoul(str, NULL, 0);
4390         str = strstr(str, ":");
4391         if (str)
4392                 lmd_flags = simple_strtoul(str + 1, NULL, 0);
4393         opts = lustre_cfg_string(cfg, 3);
4394         page = (unsigned long)cfs_page_address(__page);
4395         options = (char *)page;
4396         *options = '\0';
4397         if (opts == NULL)
4398                 strcat(options, "user_xattr,acl");
4399         else
4400                 strcat(options, opts);
4401
4402         /* Glom up mount options */
4403         if (*options != '\0')
4404                 strcat(options, ",");
4405         strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
4406
4407         type = get_fs_type("ldiskfs");
4408         if (!type) {
4409                 CERROR("%s: cannot find ldiskfs module\n", name);
4410                 GOTO(out, rc = -ENODEV);
4411         }
4412
4413         o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
4414         cfs_module_put(type->owner);
4415
4416         if (IS_ERR(o->od_mnt)) {
4417                 rc = PTR_ERR(o->od_mnt);
4418                 CERROR("%s: can't mount %s: %d\n", name, dev, rc);
4419                 o->od_mnt = NULL;
4420                 GOTO(out, rc);
4421         }
4422
4423         if (lvfs_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
4424                 CERROR("%s: underlying device %s is marked as read-only. "
4425                        "Setup failed\n", name, dev);
4426                 mntput(o->od_mnt);
4427                 o->od_mnt = NULL;
4428                 GOTO(out, rc = -EROFS);
4429         }
4430
4431         if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
4432             LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
4433                 CERROR("%s: device %s is mounted w/o journal\n", name, dev);
4434                 mntput(o->od_mnt);
4435                 o->od_mnt = NULL;
4436                 GOTO(out, rc = -EINVAL);
4437         }
4438
4439         if (lmd_flags & LMD_FLG_NOSCRUB)
4440                 o->od_noscrub = 1;
4441
4442 out:
4443         if (__page)
4444                 OBD_PAGE_FREE(__page);
4445         if (rc)
4446                 fsfilt_put_ops(o->od_fsops);
4447
4448         RETURN(rc);
4449 }
4450
4451 static struct lu_device *osd_device_fini(const struct lu_env *env,
4452                                          struct lu_device *d)
4453 {
4454         int rc;
4455         ENTRY;
4456
4457         rc = osd_shutdown(env, osd_dev(d));
4458
4459         osd_obj_map_fini(osd_dev(d));
4460
4461         shrink_dcache_sb(osd_sb(osd_dev(d)));
4462         osd_sync(env, lu2dt_dev(d));
4463
4464         rc = osd_procfs_fini(osd_dev(d));
4465         if (rc) {
4466                 CERROR("proc fini error %d \n", rc);
4467                 RETURN (ERR_PTR(rc));
4468         }
4469
4470         if (osd_dev(d)->od_mnt) {
4471                 mntput(osd_dev(d)->od_mnt);
4472                 osd_dev(d)->od_mnt = NULL;
4473         }
4474
4475         RETURN(NULL);
4476 }
4477
4478 static int osd_device_init0(const struct lu_env *env,
4479                             struct osd_device *o,
4480                             struct lustre_cfg *cfg)
4481 {
4482         struct lu_device        *l = osd2lu_dev(o);
4483         struct osd_thread_info *info;
4484         int                     rc;
4485
4486         /* if the module was re-loaded, env can loose its keys */
4487         rc = lu_env_refill((struct lu_env *) env);
4488         if (rc)
4489                 GOTO(out, rc);
4490         info = osd_oti_get(env);
4491         LASSERT(info);
4492
4493         l->ld_ops = &osd_lu_ops;
4494         o->od_dt_dev.dd_ops = &osd_dt_ops;
4495
4496         spin_lock_init(&o->od_osfs_lock);
4497         mutex_init(&o->od_otable_mutex);
4498         o->od_osfs_age = cfs_time_shift_64(-1000);
4499
4500         o->od_capa_hash = init_capa_hash();
4501         if (o->od_capa_hash == NULL)
4502                 GOTO(out, rc = -ENOMEM);
4503
4504         o->od_read_cache = 1;
4505         o->od_writethrough_cache = 1;
4506         o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
4507
4508         rc = osd_mount(env, o, cfg);
4509         if (rc)
4510                 GOTO(out_capa, rc);
4511
4512         /* setup scrub, including OI files initialization */
4513         rc = osd_scrub_setup(env, o);
4514         if (rc < 0)
4515                 GOTO(out_mnt, rc);
4516
4517         strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
4518                         sizeof(o->od_svname) - 1);
4519
4520         rc = osd_obj_map_init(o);
4521         if (rc != 0)
4522                 GOTO(out_scrub, rc);
4523
4524         rc = lu_site_init(&o->od_site, l);
4525         if (rc)
4526                 GOTO(out_compat, rc);
4527         o->od_site.ls_bottom_dev = l;
4528
4529         rc = lu_site_init_finish(&o->od_site);
4530         if (rc)
4531                 GOTO(out_site, rc);
4532
4533         rc = osd_procfs_init(o, o->od_svname);
4534         if (rc != 0) {
4535                 CERROR("%s: can't initialize procfs: rc = %d\n",
4536                        o->od_svname, rc);
4537                 GOTO(out_site, rc);
4538         }
4539
4540         LASSERT(l->ld_site->ls_linkage.next && l->ld_site->ls_linkage.prev);
4541
4542         /* initialize quota slave instance */
4543         o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
4544                                      o->od_proc_entry);
4545         if (IS_ERR(o->od_quota_slave)) {
4546                 rc = PTR_ERR(o->od_quota_slave);
4547                 o->od_quota_slave = NULL;
4548                 GOTO(out_procfs, rc);
4549         }
4550
4551         RETURN(0);
4552 out_procfs:
4553         osd_procfs_fini(o);
4554 out_site:
4555         lu_site_fini(&o->od_site);
4556 out_compat:
4557         osd_obj_map_fini(o);
4558 out_scrub:
4559         osd_scrub_cleanup(env, o);
4560 out_mnt:
4561         osd_oi_fini(info, o);
4562         osd_shutdown(env, o);
4563         mntput(o->od_mnt);
4564         o->od_mnt = NULL;
4565 out_capa:
4566         cleanup_capa_hash(o->od_capa_hash);
4567 out:
4568         RETURN(rc);
4569 }
4570
4571 static struct lu_device *osd_device_alloc(const struct lu_env *env,
4572                                           struct lu_device_type *t,
4573                                           struct lustre_cfg *cfg)
4574 {
4575         struct osd_device *o;
4576         int                rc;
4577
4578         OBD_ALLOC_PTR(o);
4579         if (o == NULL)
4580                 return ERR_PTR(-ENOMEM);
4581
4582         rc = dt_device_init(&o->od_dt_dev, t);
4583         if (rc == 0) {
4584                 /* Because the ctx might be revived in dt_device_init,
4585                  * refill the env here */
4586                 lu_env_refill((struct lu_env *)env);
4587                 rc = osd_device_init0(env, o, cfg);
4588                 if (rc)
4589                         dt_device_fini(&o->od_dt_dev);
4590         }
4591
4592         if (unlikely(rc != 0))
4593                 OBD_FREE_PTR(o);
4594
4595         return rc == 0 ? osd2lu_dev(o) : ERR_PTR(rc);
4596 }
4597
4598 static struct lu_device *osd_device_free(const struct lu_env *env,
4599                                          struct lu_device *d)
4600 {
4601         struct osd_device *o = osd_dev(d);
4602         ENTRY;
4603
4604         cleanup_capa_hash(o->od_capa_hash);
4605         /* XXX: make osd top device in order to release reference */
4606         d->ld_site->ls_top_dev = d;
4607         lu_site_purge(env, d->ld_site, -1);
4608         if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
4609                 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
4610                 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
4611         }
4612         lu_site_fini(&o->od_site);
4613         dt_device_fini(&o->od_dt_dev);
4614         OBD_FREE_PTR(o);
4615         RETURN(NULL);
4616 }
4617
4618 static int osd_process_config(const struct lu_env *env,
4619                               struct lu_device *d, struct lustre_cfg *cfg)
4620 {
4621         struct osd_device *o = osd_dev(d);
4622         int err;
4623         ENTRY;
4624
4625         switch(cfg->lcfg_command) {
4626         case LCFG_SETUP:
4627                 err = osd_mount(env, o, cfg);
4628                 break;
4629         case LCFG_CLEANUP:
4630                 lu_dev_del_linkage(d->ld_site, d);
4631                 err = osd_shutdown(env, o);
4632                 break;
4633         default:
4634                 err = -ENOSYS;
4635         }
4636
4637         RETURN(err);
4638 }
4639
4640 static int osd_recovery_complete(const struct lu_env *env,
4641                                  struct lu_device *d)
4642 {
4643         struct osd_device       *osd = osd_dev(d);
4644         int                      rc = 0;
4645         ENTRY;
4646
4647         if (osd->od_quota_slave == NULL)
4648                 RETURN(0);
4649
4650         /* start qsd instance on recovery completion, this notifies the quota
4651          * slave code that we are about to process new requests now */
4652         rc = qsd_start(env, osd->od_quota_slave);
4653         RETURN(rc);
4654 }
4655
4656 /*
4657  * we use exports to track all osd users
4658  */
4659 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
4660                            struct obd_device *obd, struct obd_uuid *cluuid,
4661                            struct obd_connect_data *data, void *localdata)
4662 {
4663         struct osd_device    *osd = osd_dev(obd->obd_lu_dev);
4664         struct lustre_handle  conn;
4665         int                   rc;
4666         ENTRY;
4667
4668         CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
4669
4670         rc = class_connect(&conn, obd, cluuid);
4671         if (rc)
4672                 RETURN(rc);
4673
4674         *exp = class_conn2export(&conn);
4675
4676         spin_lock(&osd->od_osfs_lock);
4677         osd->od_connects++;
4678         spin_unlock(&osd->od_osfs_lock);
4679
4680         RETURN(0);
4681 }
4682
4683 /*
4684  * once last export (we don't count self-export) disappeared
4685  * osd can be released
4686  */
4687 static int osd_obd_disconnect(struct obd_export *exp)
4688 {
4689         struct obd_device *obd = exp->exp_obd;
4690         struct osd_device *osd = osd_dev(obd->obd_lu_dev);
4691         int                rc, release = 0;
4692         ENTRY;
4693
4694         /* Only disconnect the underlying layers on the final disconnect. */
4695         spin_lock(&osd->od_osfs_lock);
4696         osd->od_connects--;
4697         if (osd->od_connects == 0)
4698                 release = 1;
4699         spin_unlock(&osd->od_osfs_lock);
4700
4701         rc = class_disconnect(exp); /* bz 9811 */
4702
4703         if (rc == 0 && release)
4704                 class_manual_cleanup(obd);
4705         RETURN(rc);
4706 }
4707
4708 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
4709                        struct lu_device *dev)
4710 {
4711         struct osd_device *osd = osd_dev(dev);
4712         int                result = 0;
4713         ENTRY;
4714
4715         if (dev->ld_site && lu_device_is_md(dev->ld_site->ls_top_dev)) {
4716                 /* MDT/MDD still use old infrastructure to create
4717                  * special files */
4718                 result = llo_local_objects_setup(env, lu2md_dev(pdev),
4719                                                  lu2dt_dev(dev));
4720                 if (result)
4721                         RETURN(result);
4722         }
4723
4724         if (osd->od_quota_slave != NULL)
4725                 /* set up quota slave objects */
4726                 result = qsd_prepare(env, osd->od_quota_slave);
4727
4728         RETURN(result);
4729 }
4730
4731 static const struct lu_object_operations osd_lu_obj_ops = {
4732         .loo_object_init      = osd_object_init,
4733         .loo_object_delete    = osd_object_delete,
4734         .loo_object_release   = osd_object_release,
4735         .loo_object_free      = osd_object_free,
4736         .loo_object_print     = osd_object_print,
4737         .loo_object_invariant = osd_object_invariant
4738 };
4739
4740 const struct lu_device_operations osd_lu_ops = {
4741         .ldo_object_alloc      = osd_object_alloc,
4742         .ldo_process_config    = osd_process_config,
4743         .ldo_recovery_complete = osd_recovery_complete,
4744         .ldo_prepare           = osd_prepare,
4745 };
4746
4747 static const struct lu_device_type_operations osd_device_type_ops = {
4748         .ldto_init = osd_type_init,
4749         .ldto_fini = osd_type_fini,
4750
4751         .ldto_start = osd_type_start,
4752         .ldto_stop  = osd_type_stop,
4753
4754         .ldto_device_alloc = osd_device_alloc,
4755         .ldto_device_free  = osd_device_free,
4756
4757         .ldto_device_init    = osd_device_init,
4758         .ldto_device_fini    = osd_device_fini
4759 };
4760
4761 struct lu_device_type osd_device_type = {
4762         .ldt_tags     = LU_DEVICE_DT,
4763         .ldt_name     = LUSTRE_OSD_LDISKFS_NAME,
4764         .ldt_ops      = &osd_device_type_ops,
4765         .ldt_ctx_tags = LCT_LOCAL,
4766 };
4767
4768 /*
4769  * lprocfs legacy support.
4770  */
4771 static struct obd_ops osd_obd_device_ops = {
4772         .o_owner = THIS_MODULE,
4773         .o_connect      = osd_obd_connect,
4774         .o_disconnect   = osd_obd_disconnect
4775 };
4776
4777 static int __init osd_mod_init(void)
4778 {
4779         struct lprocfs_static_vars lvars;
4780
4781         osd_oi_mod_init();
4782         lprocfs_osd_init_vars(&lvars);
4783         return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
4784                                    LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
4785 }
4786
4787 static void __exit osd_mod_exit(void)
4788 {
4789         class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
4790 }
4791
4792 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4793 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
4794 MODULE_LICENSE("GPL");
4795
4796 cfs_module(osd, "0.1.0", osd_mod_init, osd_mod_exit);