Whamcloud - gitweb
e5797c3c9b66f8fc899d845e181992af6d8bbe0b
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_handler.c
37  *
38  * Top-level entry points into osd module
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
42  */
43
44 #define DEBUG_SUBSYSTEM S_MDS
45
46 #include <linux/module.h>
47
48 /* LUSTRE_VERSION_CODE */
49 #include <lustre_ver.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/types.h>
52 /* prerequisite for linux/xattr.h */
53 #include <linux/fs.h>
54 /* XATTR_{REPLACE,CREATE} */
55 #include <linux/xattr.h>
56 /* simple_mkdir() */
57 #include <lvfs.h>
58
59 /*
60  * struct OBD_{ALLOC,FREE}*()
61  * OBD_FAIL_CHECK
62  */
63 #include <obd_support.h>
64 /* struct ptlrpc_thread */
65 #include <lustre_net.h>
66
67 /* fid_is_local() */
68 #include <lustre_fid.h>
69
70 #include "osd_internal.h"
71 #include "osd_igif.h"
72
73 /* llo_* api support */
74 #include <md_object.h>
75 /* dt_acct_features */
76 #include <lquota.h>
77
78 #ifdef HAVE_LDISKFS_PDO
79 int ldiskfs_pdo = 1;
80 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
81                 "ldiskfs with parallel directory operations");
82 #else
83 int ldiskfs_pdo = 0;
84 #endif
85
86 static const char dot[] = ".";
87 static const char dotdot[] = "..";
88 static const char remote_obj_dir[] = "REM_OBJ_DIR";
89
90 static const struct lu_object_operations      osd_lu_obj_ops;
91 static const struct dt_object_operations      osd_obj_ops;
92 static const struct dt_object_operations      osd_obj_ea_ops;
93 static const struct dt_object_operations      osd_obj_otable_it_ops;
94 static const struct dt_index_operations       osd_index_iam_ops;
95 static const struct dt_index_operations       osd_index_ea_ops;
96
97 static int osd_has_index(const struct osd_object *obj)
98 {
99         return obj->oo_dt.do_index_ops != NULL;
100 }
101
102 static int osd_object_invariant(const struct lu_object *l)
103 {
104         return osd_invariant(osd_obj(l));
105 }
106
107 /*
108  * Concurrency: doesn't matter
109  */
110 static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
111 {
112         return osd_oti_get(env)->oti_r_locks > 0;
113 }
114
115 /*
116  * Concurrency: doesn't matter
117  */
118 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
119 {
120         struct osd_thread_info *oti = osd_oti_get(env);
121         return oti->oti_w_locks > 0 && o->oo_owner == env;
122 }
123
124 /*
125  * Concurrency: doesn't access mutable data
126  */
127 static int osd_root_get(const struct lu_env *env,
128                         struct dt_device *dev, struct lu_fid *f)
129 {
130         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
131         return 0;
132 }
133
134 /*
135  * OSD object methods.
136  */
137
138 /*
139  * Concurrency: no concurrent access is possible that early in object
140  * life-cycle.
141  */
142 static struct lu_object *osd_object_alloc(const struct lu_env *env,
143                                           const struct lu_object_header *hdr,
144                                           struct lu_device *d)
145 {
146         struct osd_object *mo;
147
148         OBD_ALLOC_PTR(mo);
149         if (mo != NULL) {
150                 struct lu_object *l;
151
152                 l = &mo->oo_dt.do_lu;
153                 dt_object_init(&mo->oo_dt, NULL, d);
154                 if (osd_dev(d)->od_iop_mode)
155                         mo->oo_dt.do_ops = &osd_obj_ea_ops;
156                 else
157                         mo->oo_dt.do_ops = &osd_obj_ops;
158
159                 l->lo_ops = &osd_lu_obj_ops;
160                 cfs_init_rwsem(&mo->oo_sem);
161                 cfs_init_rwsem(&mo->oo_ext_idx_sem);
162                 cfs_spin_lock_init(&mo->oo_guard);
163                 return l;
164         } else {
165                 return NULL;
166         }
167 }
168
169 static int osd_get_lma(struct inode *inode, struct dentry *dentry,
170                        struct lustre_mdt_attrs *lma)
171 {
172         int rc;
173
174         dentry->d_inode = inode;
175         rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA, (void *)lma,
176                                    sizeof(*lma));
177         if (rc > 0) {
178                 /* Check LMA compatibility */
179                 if (lma->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP)) {
180                         CWARN("%.16s: unsupported incompat LMA feature(s) "
181                               "%lx/%#x\n",
182                               LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
183                               inode->i_ino, le32_to_cpu(lma->lma_incompat) &
184                                                         ~LMA_INCOMPAT_SUPP);
185                         rc = -ENOSYS;
186                 } else {
187                         lustre_lma_swab(lma);
188                         rc = 0;
189                 }
190         } else if (rc == 0) {
191                 rc = -ENODATA;
192         }
193
194         return rc;
195 }
196
197 /*
198  * retrieve object from backend ext fs.
199  **/
200 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
201                        struct osd_inode_id *id)
202 {
203         struct inode *inode = NULL;
204
205         inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
206         if (IS_ERR(inode)) {
207                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
208                        id->oii_ino, PTR_ERR(inode));
209         } else if (id->oii_gen != OSD_OII_NOGEN &&
210                    inode->i_generation != id->oii_gen) {
211                 CDEBUG(D_INODE, "unmatched inode: ino = %u, gen0 = %u, "
212                        "gen1 = %u\n",
213                        id->oii_ino, id->oii_gen, inode->i_generation);
214                 iput(inode);
215                 inode = ERR_PTR(-ESTALE);
216         } else if (inode->i_nlink == 0) {
217                 /* due to parallel readdir and unlink,
218                 * we can have dead inode here. */
219                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
220                 make_bad_inode(inode);
221                 iput(inode);
222                 inode = ERR_PTR(-ESTALE);
223         } else if (is_bad_inode(inode)) {
224                 CWARN("%.16s: bad inode: ino = %u\n",
225                 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
226                 iput(inode);
227                 inode = ERR_PTR(-ENOENT);
228         } else {
229                 if (id->oii_gen == OSD_OII_NOGEN)
230                         osd_id_gen(id, inode->i_ino, inode->i_generation);
231
232                 /* Do not update file c/mtime in ldiskfs.
233                  * NB: we don't have any lock to protect this because we don't
234                  * have reference on osd_object now, but contention with
235                  * another lookup + attr_set can't happen in the tiny window
236                  * between if (...) and set S_NOCMTIME. */
237                 if (!(inode->i_flags & S_NOCMTIME))
238                         inode->i_flags |= S_NOCMTIME;
239         }
240         return inode;
241 }
242
243 struct inode *osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
244                            struct osd_inode_id *id, struct lu_fid *fid)
245 {
246         struct lustre_mdt_attrs *lma   = &info->oti_mdt_attrs;
247         struct inode            *inode;
248         int                      rc;
249
250         inode = osd_iget(info, dev, id);
251         if (IS_ERR(inode))
252                 return inode;
253
254         rc = osd_get_lma(inode, &info->oti_obj_dentry, lma);
255         if (rc == 0) {
256                 *fid = lma->lma_self_fid;
257         } else if (rc == -ENODATA) {
258                 LU_IGIF_BUILD(fid, inode->i_ino, inode->i_generation);
259         } else {
260                 iput(inode);
261                 inode = ERR_PTR(rc);
262         }
263         return inode;
264 }
265
266 static struct inode *
267 osd_iget_verify(struct osd_thread_info *info, struct osd_device *dev,
268                 struct osd_inode_id *id, const struct lu_fid *fid)
269 {
270         struct lustre_mdt_attrs *lma   = &info->oti_mdt_attrs;
271         struct inode            *inode;
272         int                      rc;
273
274         inode = osd_iget(info, dev, id);
275         if (IS_ERR(inode))
276                 return inode;
277
278         rc = osd_get_lma(inode, &info->oti_obj_dentry, lma);
279         if (rc == -ENODATA)
280                 return inode;
281
282         if (rc != 0) {
283                 iput(inode);
284                 return ERR_PTR(rc);
285         }
286
287         if (!lu_fid_eq(fid, &lma->lma_self_fid)) {
288                 CDEBUG(D_LFSCK, "inconsistent obj: "DFID", %lu, "DFID"\n",
289                        PFID(&lma->lma_self_fid), inode->i_ino, PFID(fid));
290                 iput(inode);
291                 return ERR_PTR(EREMCHG);
292         }
293
294         return inode;
295 }
296
297 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
298                           const struct lu_fid *fid,
299                           const struct lu_object_conf *conf)
300 {
301         struct osd_thread_info *info;
302         struct lu_device       *ldev   = obj->oo_dt.do_lu.lo_dev;
303         struct osd_device      *dev;
304         struct osd_idmap_cache *oic;
305         struct osd_inode_id    *id;
306         struct inode           *inode;
307         struct osd_scrub       *scrub;
308         struct scrub_file      *sf;
309         int                     result;
310         int                     verify = 0;
311         ENTRY;
312
313         LINVRNT(osd_invariant(obj));
314         LASSERT(obj->oo_inode == NULL);
315         LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID, PFID(fid));
316
317         dev = osd_dev(ldev);
318         scrub = &dev->od_scrub;
319         sf = &scrub->os_file;
320         info = osd_oti_get(env);
321         LASSERT(info);
322         oic = &info->oti_cache;
323         id  = &oic->oic_lid;
324
325         if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
326                 RETURN(-ENOENT);
327
328         /* Search order: 1. per-thread cache. */
329         if (lu_fid_eq(fid, &oic->oic_fid)) {
330                 goto iget;
331         } else if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
332                 /* Search order: 2. OI scrub pending list. */
333                 result = osd_oii_lookup(dev, fid, id);
334                 if (result == 0)
335                         goto iget;
336         }
337
338         if (sf->sf_flags & SF_INCONSISTENT)
339                 verify = 1;
340
341         /*
342          * Objects are created as locking anchors or place holders for objects
343          * yet to be created. No need to osd_oi_lookup() at here because FID
344          * shouldn't never be re-used, if it's really a duplicate FID from
345          * unexpected reason, we should be able to detect it later by calling
346          * do_create->osd_oi_insert()
347          */
348         if (conf != NULL && (conf->loc_flags & LOC_F_NEW) != 0)
349                 GOTO(out, result = 0);
350
351         /* Search order: 3. OI files. */
352         result = osd_oi_lookup(info, dev, fid, id);
353         if (result == -ENOENT) {
354                 if (!fid_is_norm(fid) ||
355                     !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
356                                       sf->sf_oi_bitmap))
357                         GOTO(out, result = 0);
358
359                 goto trigger;
360         }
361
362         if (result != 0)
363                 GOTO(out, result);
364
365 iget:
366         if (verify == 0)
367                 inode = osd_iget(info, dev, id);
368         else
369                 inode = osd_iget_verify(info, dev, id, fid);
370         if (IS_ERR(inode)) {
371                 result = PTR_ERR(inode);
372                 if (result == -ENOENT || result == -ESTALE) {
373                         fid_zero(&oic->oic_fid);
374                         result = 0;
375                 } else if (result == -EREMCHG) {
376
377 trigger:
378                         if (thread_is_running(&scrub->os_thread)) {
379                                 result = -EINPROGRESS;
380                         } else if (!dev->od_noscrub) {
381                                 result = osd_scrub_start(dev);
382                                 LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
383                                                "for "DFID", rc = %d [1]\n",
384                                                LDISKFS_SB(osd_sb(dev))->s_es->\
385                                                s_volume_name,PFID(fid), result);
386                                 if (result == 0 || result == -EALREADY)
387                                         result = -EINPROGRESS;
388                                 else
389                                         result = -EREMCHG;
390                         }
391                 }
392
393                 GOTO(out, result);
394         }
395
396         obj->oo_inode = inode;
397         LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
398         if (dev->od_iop_mode) {
399                 obj->oo_compat_dot_created = 1;
400                 obj->oo_compat_dotdot_created = 1;
401         }
402
403         if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
404                 GOTO(out, result = 0);
405
406         LASSERT(obj->oo_hl_head == NULL);
407         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
408         if (obj->oo_hl_head == NULL) {
409                 obj->oo_inode = NULL;
410                 iput(inode);
411                 GOTO(out, result = -ENOMEM);
412         }
413         GOTO(out, result = 0);
414
415 out:
416         LINVRNT(osd_invariant(obj));
417         return result;
418 }
419
420 /*
421  * Concurrency: shouldn't matter.
422  */
423 static void osd_object_init0(struct osd_object *obj)
424 {
425         LASSERT(obj->oo_inode != NULL);
426         obj->oo_dt.do_body_ops = &osd_body_ops;
427         obj->oo_dt.do_lu.lo_header->loh_attr |=
428                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
429 }
430
431 /*
432  * Concurrency: no concurrent access is possible that early in object
433  * life-cycle.
434  */
435 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
436                            const struct lu_object_conf *conf)
437 {
438         struct osd_object *obj = osd_obj(l);
439         int result;
440
441         LINVRNT(osd_invariant(obj));
442
443         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
444         obj->oo_dt.do_body_ops = &osd_body_ops_new;
445         if (result == 0) {
446                 if (obj->oo_inode != NULL) {
447                         osd_object_init0(obj);
448                 } else if (fid_is_otable_it(&l->lo_header->loh_fid)) {
449                         obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
450                         /* LFSCK iterator object is special without inode */
451                         l->lo_header->loh_attr |= LOHA_EXISTS;
452                 }
453         }
454         LINVRNT(osd_invariant(obj));
455         return result;
456 }
457
458 /*
459  * Concurrency: no concurrent access is possible that late in object
460  * life-cycle.
461  */
462 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
463 {
464         struct osd_object *obj = osd_obj(l);
465
466         LINVRNT(osd_invariant(obj));
467
468         dt_object_fini(&obj->oo_dt);
469         if (obj->oo_hl_head != NULL)
470                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
471         OBD_FREE_PTR(obj);
472 }
473
474 /*
475  * Concurrency: no concurrent access is possible that late in object
476  * life-cycle.
477  */
478 static void osd_index_fini(struct osd_object *o)
479 {
480         struct iam_container *bag;
481
482         if (o->oo_dir != NULL) {
483                 bag = &o->oo_dir->od_container;
484                 if (o->oo_inode != NULL) {
485                         if (bag->ic_object == o->oo_inode)
486                                 iam_container_fini(bag);
487                 }
488                 OBD_FREE_PTR(o->oo_dir);
489                 o->oo_dir = NULL;
490         }
491 }
492
493 /*
494  * Concurrency: no concurrent access is possible that late in object
495  * life-cycle (for all existing callers, that is. New callers have to provide
496  * their own locking.)
497  */
498 static int osd_inode_unlinked(const struct inode *inode)
499 {
500         return inode->i_nlink == 0;
501 }
502
503 enum {
504         OSD_TXN_OI_DELETE_CREDITS    = 20,
505         OSD_TXN_INODE_DELETE_CREDITS = 20
506 };
507
508 /*
509  * Journal
510  */
511
512 #if OSD_THANDLE_STATS
513 /**
514  * Set time when the handle is allocated
515  */
516 static void osd_th_alloced(struct osd_thandle *oth)
517 {
518         oth->oth_alloced = cfs_time_current();
519 }
520
521 /**
522  * Set time when the handle started
523  */
524 static void osd_th_started(struct osd_thandle *oth)
525 {
526         oth->oth_started = cfs_time_current();
527 }
528
529 /**
530  * Helper function to convert time interval to microseconds packed in
531  * long int (default time units for the counter in "stats" initialized
532  * by lu_time_init() )
533  */
534 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
535 {
536         struct timeval val;
537
538         cfs_duration_usec(cfs_time_sub(end, start), &val);
539         return val.tv_sec * 1000000 + val.tv_usec;
540 }
541
542 /**
543  * Check whether the we deal with this handle for too long.
544  */
545 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
546                                 cfs_time_t alloced, cfs_time_t started,
547                                 cfs_time_t closed)
548 {
549         cfs_time_t now = cfs_time_current();
550
551         LASSERT(dev != NULL);
552
553         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
554                             interval_to_usec(alloced, started));
555         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
556                             interval_to_usec(started, closed));
557         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
558                             interval_to_usec(closed, now));
559
560         if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
561                 CWARN("transaction handle %p was open for too long: "
562                       "now "CFS_TIME_T" ,"
563                       "alloced "CFS_TIME_T" ,"
564                       "started "CFS_TIME_T" ,"
565                       "closed "CFS_TIME_T"\n",
566                       oth, now, alloced, started, closed);
567                 libcfs_debug_dumpstack(NULL);
568         }
569 }
570
571 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
572 {                                                                       \
573         cfs_time_t __closed = cfs_time_current();                       \
574         cfs_time_t __alloced = oth->oth_alloced;                        \
575         cfs_time_t __started = oth->oth_started;                        \
576                                                                         \
577         expr;                                                           \
578         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
579 }
580
581 #else /* OSD_THANDLE_STATS */
582
583 #define osd_th_alloced(h)                  do {} while(0)
584 #define osd_th_started(h)                  do {} while(0)
585 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
586
587 #endif /* OSD_THANDLE_STATS */
588
589 /*
590  * Concurrency: doesn't access mutable data.
591  */
592 static int osd_param_is_sane(const struct osd_device *dev,
593                              const struct thandle *th)
594 {
595         struct osd_thandle *oh;
596         oh = container_of0(th, struct osd_thandle, ot_super);
597         return oh->ot_credits <= osd_journal(dev)->j_max_transaction_buffers;
598 }
599
600 /*
601  * Concurrency: shouldn't matter.
602  */
603 #ifdef HAVE_LDISKFS_JOURNAL_CALLBACK_ADD
604 static void osd_trans_commit_cb(struct super_block *sb,
605                                 struct journal_callback *jcb, int error)
606 #else
607 static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
608 #endif
609 {
610         struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
611         struct thandle     *th  = &oh->ot_super;
612         struct lu_device   *lud = &th->th_dev->dd_lu_dev;
613         struct dt_txn_commit_cb *dcb, *tmp;
614
615         LASSERT(oh->ot_handle == NULL);
616
617         if (error)
618                 CERROR("transaction @0x%p commit error: %d\n", th, error);
619
620         dt_txn_hook_commit(th);
621
622         /* call per-transaction callbacks if any */
623         cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
624                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
625                          "commit callback entry: magic=%x name='%s'\n",
626                          dcb->dcb_magic, dcb->dcb_name);
627                 cfs_list_del_init(&dcb->dcb_linkage);
628                 dcb->dcb_func(NULL, th, dcb, error);
629         }
630
631         lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
632         lu_device_put(lud);
633         th->th_dev = NULL;
634
635         lu_context_exit(&th->th_ctx);
636         lu_context_fini(&th->th_ctx);
637         OBD_FREE_PTR(oh);
638 }
639
640 static struct thandle *osd_trans_create(const struct lu_env *env,
641                                         struct dt_device *d)
642 {
643         struct osd_thread_info *oti = osd_oti_get(env);
644         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
645         struct osd_thandle     *oh;
646         struct thandle         *th;
647         ENTRY;
648
649         /* on pending IO in this thread should left from prev. request */
650         LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
651
652         th = ERR_PTR(-ENOMEM);
653         OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
654         if (oh != NULL) {
655                 oh->ot_quota_trans = &oti->oti_quota_trans;
656                 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
657                 th = &oh->ot_super;
658                 th->th_dev = d;
659                 th->th_result = 0;
660                 th->th_tags = LCT_TX_HANDLE;
661                 oh->ot_credits = 0;
662                 oti->oti_dev = osd_dt_dev(d);
663                 CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
664                 osd_th_alloced(oh);
665         }
666         RETURN(th);
667 }
668
669 /*
670  * Concurrency: shouldn't matter.
671  */
672 int osd_trans_start(const struct lu_env *env, struct dt_device *d,
673                     struct thandle *th)
674 {
675         struct osd_thread_info *oti = osd_oti_get(env);
676         struct osd_device  *dev = osd_dt_dev(d);
677         handle_t           *jh;
678         struct osd_thandle *oh;
679         int rc;
680
681         ENTRY;
682
683         LASSERT(current->journal_info == NULL);
684
685         oh = container_of0(th, struct osd_thandle, ot_super);
686         LASSERT(oh != NULL);
687         LASSERT(oh->ot_handle == NULL);
688
689         rc = dt_txn_hook_start(env, d, th);
690         if (rc != 0)
691                 GOTO(out, rc);
692
693         if (!osd_param_is_sane(dev, th)) {
694                 CWARN("%.16s: too many transaction credits (%d > %d)\n",
695                       LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
696                       oh->ot_credits,
697                       osd_journal(dev)->j_max_transaction_buffers);
698                 /* XXX Limit the credits to 'max_transaction_buffers', and
699                  *     let the underlying filesystem to catch the error if
700                  *     we really need so many credits.
701                  *
702                  *     This should be removed when we can calculate the
703                  *     credits precisely. */
704                 oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
705 #ifdef OSD_TRACK_DECLARES
706                 CERROR("  attr_set: %d, punch: %d, xattr_set: %d,\n",
707                        oh->ot_declare_attr_set, oh->ot_declare_punch,
708                        oh->ot_declare_xattr_set);
709                 CERROR("  create: %d, ref_add: %d, ref_del: %d, write: %d\n",
710                        oh->ot_declare_create, oh->ot_declare_ref_add,
711                        oh->ot_declare_ref_del, oh->ot_declare_write);
712                 CERROR("  insert: %d, delete: %d, destroy: %d\n",
713                        oh->ot_declare_insert, oh->ot_declare_delete,
714                        oh->ot_declare_destroy);
715 #endif
716         }
717
718         /*
719          * XXX temporary stuff. Some abstraction layer should
720          * be used.
721          */
722         jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
723         osd_th_started(oh);
724         if (!IS_ERR(jh)) {
725                 oh->ot_handle = jh;
726                 LASSERT(oti->oti_txns == 0);
727                 lu_context_init(&th->th_ctx, th->th_tags);
728                 lu_context_enter(&th->th_ctx);
729
730                 lu_device_get(&d->dd_lu_dev);
731                 oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
732                                              "osd-tx", th);
733                 oti->oti_txns++;
734                 rc = 0;
735         } else {
736                 rc = PTR_ERR(jh);
737         }
738 out:
739         RETURN(rc);
740 }
741
742 /*
743  * Concurrency: shouldn't matter.
744  */
745 static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
746 {
747         int                     rc = 0;
748         struct osd_thandle     *oh;
749         struct osd_thread_info *oti = osd_oti_get(env);
750         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
751         struct qsd_instance    *qsd = oti->oti_dev->od_quota_slave;
752         ENTRY;
753
754         oh = container_of0(th, struct osd_thandle, ot_super);
755
756         if (qsd != NULL)
757                 /* inform the quota slave device that the transaction is
758                  * stopping */
759                 qsd_op_end(env, qsd, oh->ot_quota_trans);
760         oh->ot_quota_trans = NULL;
761
762         if (oh->ot_handle != NULL) {
763                 handle_t *hdl = oh->ot_handle;
764
765                 /*
766                  * add commit callback
767                  * notice we don't do this in osd_trans_start()
768                  * as underlying transaction can change during truncate
769                  */
770                 osd_journal_callback_set(hdl, osd_trans_commit_cb,
771                                          &oh->ot_jcb);
772
773                 LASSERT(oti->oti_txns == 1);
774                 oti->oti_txns--;
775                 rc = dt_txn_hook_stop(env, th);
776                 if (rc != 0)
777                         CERROR("Failure in transaction hook: %d\n", rc);
778
779                 /* hook functions might modify th_sync */
780                 hdl->h_sync = th->th_sync;
781
782                 oh->ot_handle = NULL;
783                 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
784                                   rc = ldiskfs_journal_stop(hdl));
785                 if (rc != 0)
786                         CERROR("Failure to stop transaction: %d\n", rc);
787         } else {
788                 OBD_FREE_PTR(oh);
789         }
790
791         /* as we want IO to journal and data IO be concurrent, we don't block
792          * awaiting data IO completion in osd_do_bio(), instead we wait here
793          * once transaction is submitted to the journal. all reqular requests
794          * don't do direct IO (except read/write), thus this wait_event becomes
795          * no-op for them.
796          *
797          * IMPORTANT: we have to wait till any IO submited by the thread is
798          * completed otherwise iobuf may be corrupted by different request
799          */
800         cfs_wait_event(iobuf->dr_wait,
801                        cfs_atomic_read(&iobuf->dr_numreqs) == 0);
802         if (!rc)
803                 rc = iobuf->dr_error;
804
805         RETURN(rc);
806 }
807
808 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
809 {
810         struct osd_thandle *oh = container_of0(th, struct osd_thandle,
811                                                ot_super);
812
813         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
814         LASSERT(&dcb->dcb_func != NULL);
815         cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
816
817         return 0;
818 }
819
820 /*
821  * Called just before object is freed. Releases all resources except for
822  * object itself (that is released by osd_object_free()).
823  *
824  * Concurrency: no concurrent access is possible that late in object
825  * life-cycle.
826  */
827 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
828 {
829         struct osd_object *obj   = osd_obj(l);
830         struct inode      *inode = obj->oo_inode;
831
832         LINVRNT(osd_invariant(obj));
833
834         /*
835          * If object is unlinked remove fid->ino mapping from object index.
836          */
837
838         osd_index_fini(obj);
839         if (inode != NULL) {
840                 iput(inode);
841                 obj->oo_inode = NULL;
842         }
843 }
844
845 /*
846  * Concurrency: ->loo_object_release() is called under site spin-lock.
847  */
848 static void osd_object_release(const struct lu_env *env,
849                                struct lu_object *l)
850 {
851 }
852
853 /*
854  * Concurrency: shouldn't matter.
855  */
856 static int osd_object_print(const struct lu_env *env, void *cookie,
857                             lu_printer_t p, const struct lu_object *l)
858 {
859         struct osd_object *o = osd_obj(l);
860         struct iam_descr  *d;
861
862         if (o->oo_dir != NULL)
863                 d = o->oo_dir->od_container.ic_descr;
864         else
865                 d = NULL;
866         return (*p)(env, cookie,
867                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
868                     o, o->oo_inode,
869                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
870                     o->oo_inode ? o->oo_inode->i_generation : 0,
871                     d ? d->id_ops->id_name : "plain");
872 }
873
874 /*
875  * Concurrency: shouldn't matter.
876  */
877 int osd_statfs(const struct lu_env *env, struct dt_device *d,
878                struct obd_statfs *sfs)
879 {
880         struct osd_device  *osd = osd_dt_dev(d);
881         struct super_block *sb = osd_sb(osd);
882         struct kstatfs     *ksfs;
883         int result = 0;
884
885         if (unlikely(osd->od_mnt == NULL))
886                 return -EINPROGRESS;
887
888         /* osd_lproc.c call this without env, allocate ksfs for that case */
889         if (unlikely(env == NULL)) {
890                 OBD_ALLOC_PTR(ksfs);
891                 if (ksfs == NULL)
892                         return -ENOMEM;
893         } else {
894                 ksfs = &osd_oti_get(env)->oti_ksfs;
895         }
896
897         cfs_spin_lock(&osd->od_osfs_lock);
898         /* cache 1 second */
899         if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
900                 result = sb->s_op->statfs(sb->s_root, ksfs);
901                 if (likely(result == 0)) { /* N.B. statfs can't really fail */
902                         osd->od_osfs_age = cfs_time_current_64();
903                         statfs_pack(&osd->od_statfs, ksfs);
904                         if (sb->s_flags & MS_RDONLY)
905                                 sfs->os_state = OS_STATE_READONLY;
906                 }
907         }
908
909         if (likely(result == 0))
910                 *sfs = osd->od_statfs;
911         cfs_spin_unlock(&osd->od_osfs_lock);
912
913         if (unlikely(env == NULL))
914                 OBD_FREE_PTR(ksfs);
915
916         return result;
917 }
918
919 /**
920  * Estimate space needed for file creations. We assume the largest filename
921  * which is 2^64 - 1, hence a filename of 20 chars.
922  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
923  */
924 #ifdef __LDISKFS_DIR_REC_LEN
925 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
926 #else
927 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
928 #endif
929
930 /*
931  * Concurrency: doesn't access mutable data.
932  */
933 static void osd_conf_get(const struct lu_env *env,
934                          const struct dt_device *dev,
935                          struct dt_device_param *param)
936 {
937         struct super_block *sb = osd_sb(osd_dt_dev(dev));
938
939         /*
940          * XXX should be taken from not-yet-existing fs abstraction layer.
941          */
942         param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
943         param->ddp_max_name_len = LDISKFS_NAME_LEN;
944         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
945         param->ddp_block_shift  = sb->s_blocksize_bits;
946         param->ddp_mount_type     = LDD_MT_LDISKFS;
947         param->ddp_maxbytes       = sb->s_maxbytes;
948         /* Overhead estimate should be fairly accurate, so we really take a tiny
949          * error margin which also avoids fragmenting the filesystem too much */
950         param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
951         /* inode are statically allocated, so per-inode space consumption
952          * is the space consumed by the directory entry */
953         param->ddp_inodespace     = PER_OBJ_USAGE;
954         /* per-fragment overhead to be used by the client code */
955         param->ddp_grant_frag     = 6 * LDISKFS_BLOCK_SIZE(sb);
956         param->ddp_mntopts      = 0;
957         if (test_opt(sb, XATTR_USER))
958                 param->ddp_mntopts |= MNTOPT_USERXATTR;
959         if (test_opt(sb, POSIX_ACL))
960                 param->ddp_mntopts |= MNTOPT_ACL;
961
962 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
963         if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
964                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
965         else
966 #endif
967                 param->ddp_max_ea_size = sb->s_blocksize;
968
969 }
970
971 /**
972  * Helper function to get and fill the buffer with input values.
973  */
974 static struct lu_buf *osd_buf_get(const struct lu_env *env, void *area, ssize_t len)
975 {
976         struct lu_buf *buf;
977
978         buf = &osd_oti_get(env)->oti_buf;
979         buf->lb_buf = area;
980         buf->lb_len = len;
981         return buf;
982 }
983
984 /*
985  * Concurrency: shouldn't matter.
986  */
987 static int osd_sync(const struct lu_env *env, struct dt_device *d)
988 {
989         CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
990         return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
991 }
992
993 /**
994  * Start commit for OSD device.
995  *
996  * An implementation of dt_commit_async method for OSD device.
997  * Asychronously starts underlayng fs sync and thereby a transaction
998  * commit.
999  *
1000  * \param env environment
1001  * \param d dt device
1002  *
1003  * \see dt_device_operations
1004  */
1005 static int osd_commit_async(const struct lu_env *env,
1006                             struct dt_device *d)
1007 {
1008         struct super_block *s = osd_sb(osd_dt_dev(d));
1009         ENTRY;
1010
1011         CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1012         RETURN(s->s_op->sync_fs(s, 0));
1013 }
1014
1015 /*
1016  * Concurrency: shouldn't matter.
1017  */
1018
1019 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1020 {
1021         struct super_block *sb = osd_sb(osd_dt_dev(d));
1022         int rc;
1023         ENTRY;
1024
1025         CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1026
1027         rc = __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
1028         RETURN(rc);
1029 }
1030
1031 /*
1032  * Concurrency: serialization provided by callers.
1033  */
1034 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
1035                               int mode, unsigned long timeout, __u32 alg,
1036                               struct lustre_capa_key *keys)
1037 {
1038         struct osd_device *dev = osd_dt_dev(d);
1039         ENTRY;
1040
1041         dev->od_fl_capa = mode;
1042         dev->od_capa_timeout = timeout;
1043         dev->od_capa_alg = alg;
1044         dev->od_capa_keys = keys;
1045         RETURN(0);
1046 }
1047
1048 /**
1049  * Note: we do not count into QUOTA here.
1050  * If we mount with --data_journal we may need more.
1051  */
1052 const int osd_dto_credits_noquota[DTO_NR] = {
1053         /**
1054          * Insert/Delete.
1055          * INDEX_EXTRA_TRANS_BLOCKS(8) +
1056          * SINGLEDATA_TRANS_BLOCKS(8)
1057          * XXX Note: maybe iam need more, since iam have more level than
1058          *           EXT3 htree.
1059          */
1060         [DTO_INDEX_INSERT]  = 16,
1061         [DTO_INDEX_DELETE]  = 16,
1062         /**
1063          * Used for OI scrub
1064          */
1065         [DTO_INDEX_UPDATE]  = 16,
1066         /**
1067          * Create a object. The same as create object in EXT3.
1068          * DATA_TRANS_BLOCKS(14) +
1069          * INDEX_EXTRA_BLOCKS(8) +
1070          * 3(inode bits, groups, GDT)
1071          */
1072         [DTO_OBJECT_CREATE] = 25,
1073         /**
1074          * XXX: real credits to be fixed
1075          */
1076         [DTO_OBJECT_DELETE] = 25,
1077         /**
1078          * Attr set credits (inode)
1079          */
1080         [DTO_ATTR_SET_BASE] = 1,
1081         /**
1082          * Xattr set. The same as xattr of EXT3.
1083          * DATA_TRANS_BLOCKS(14)
1084          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1085          * are also counted in. Do not know why?
1086          */
1087         [DTO_XATTR_SET]     = 14,
1088         [DTO_LOG_REC]       = 14,
1089         /**
1090          * credits for inode change during write.
1091          */
1092         [DTO_WRITE_BASE]    = 3,
1093         /**
1094          * credits for single block write.
1095          */
1096         [DTO_WRITE_BLOCK]   = 14,
1097         /**
1098          * Attr set credits for chown.
1099          * This is extra credits for setattr, and it is null without quota
1100          */
1101         [DTO_ATTR_SET_CHOWN]= 0
1102 };
1103
1104 static const struct dt_device_operations osd_dt_ops = {
1105         .dt_root_get       = osd_root_get,
1106         .dt_statfs         = osd_statfs,
1107         .dt_trans_create   = osd_trans_create,
1108         .dt_trans_start    = osd_trans_start,
1109         .dt_trans_stop     = osd_trans_stop,
1110         .dt_trans_cb_add   = osd_trans_cb_add,
1111         .dt_conf_get       = osd_conf_get,
1112         .dt_sync           = osd_sync,
1113         .dt_ro             = osd_ro,
1114         .dt_commit_async   = osd_commit_async,
1115         .dt_init_capa_ctxt = osd_init_capa_ctxt,
1116 };
1117
1118 static void osd_object_read_lock(const struct lu_env *env,
1119                                  struct dt_object *dt, unsigned role)
1120 {
1121         struct osd_object *obj = osd_dt_obj(dt);
1122         struct osd_thread_info *oti = osd_oti_get(env);
1123
1124         LINVRNT(osd_invariant(obj));
1125
1126         LASSERT(obj->oo_owner != env);
1127         cfs_down_read_nested(&obj->oo_sem, role);
1128
1129         LASSERT(obj->oo_owner == NULL);
1130         oti->oti_r_locks++;
1131 }
1132
1133 static void osd_object_write_lock(const struct lu_env *env,
1134                                   struct dt_object *dt, unsigned role)
1135 {
1136         struct osd_object *obj = osd_dt_obj(dt);
1137         struct osd_thread_info *oti = osd_oti_get(env);
1138
1139         LINVRNT(osd_invariant(obj));
1140
1141         LASSERT(obj->oo_owner != env);
1142         cfs_down_write_nested(&obj->oo_sem, role);
1143
1144         LASSERT(obj->oo_owner == NULL);
1145         obj->oo_owner = env;
1146         oti->oti_w_locks++;
1147 }
1148
1149 static void osd_object_read_unlock(const struct lu_env *env,
1150                                    struct dt_object *dt)
1151 {
1152         struct osd_object *obj = osd_dt_obj(dt);
1153         struct osd_thread_info *oti = osd_oti_get(env);
1154
1155         LINVRNT(osd_invariant(obj));
1156
1157         LASSERT(oti->oti_r_locks > 0);
1158         oti->oti_r_locks--;
1159         cfs_up_read(&obj->oo_sem);
1160 }
1161
1162 static void osd_object_write_unlock(const struct lu_env *env,
1163                                     struct dt_object *dt)
1164 {
1165         struct osd_object *obj = osd_dt_obj(dt);
1166         struct osd_thread_info *oti = osd_oti_get(env);
1167
1168         LINVRNT(osd_invariant(obj));
1169
1170         LASSERT(obj->oo_owner == env);
1171         LASSERT(oti->oti_w_locks > 0);
1172         oti->oti_w_locks--;
1173         obj->oo_owner = NULL;
1174         cfs_up_write(&obj->oo_sem);
1175 }
1176
1177 static int osd_object_write_locked(const struct lu_env *env,
1178                                    struct dt_object *dt)
1179 {
1180         struct osd_object *obj = osd_dt_obj(dt);
1181
1182         LINVRNT(osd_invariant(obj));
1183
1184         return obj->oo_owner == env;
1185 }
1186
1187 static int capa_is_sane(const struct lu_env *env,
1188                         struct osd_device *dev,
1189                         struct lustre_capa *capa,
1190                         struct lustre_capa_key *keys)
1191 {
1192         struct osd_thread_info *oti = osd_oti_get(env);
1193         struct lustre_capa *tcapa = &oti->oti_capa;
1194         struct obd_capa *oc;
1195         int i, rc = 0;
1196         ENTRY;
1197
1198         oc = capa_lookup(dev->od_capa_hash, capa, 0);
1199         if (oc) {
1200                 if (capa_is_expired(oc)) {
1201                         DEBUG_CAPA(D_ERROR, capa, "expired");
1202                         rc = -ESTALE;
1203                 }
1204                 capa_put(oc);
1205                 RETURN(rc);
1206         }
1207
1208         if (capa_is_expired_sec(capa)) {
1209                 DEBUG_CAPA(D_ERROR, capa, "expired");
1210                 RETURN(-ESTALE);
1211         }
1212
1213         cfs_spin_lock(&capa_lock);
1214         for (i = 0; i < 2; i++) {
1215                 if (keys[i].lk_keyid == capa->lc_keyid) {
1216                         oti->oti_capa_key = keys[i];
1217                         break;
1218                 }
1219         }
1220         cfs_spin_unlock(&capa_lock);
1221
1222         if (i == 2) {
1223                 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1224                 RETURN(-ESTALE);
1225         }
1226
1227         rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1228         if (rc)
1229                 RETURN(rc);
1230
1231         if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1232                 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1233                 RETURN(-EACCES);
1234         }
1235
1236         oc = capa_add(dev->od_capa_hash, capa);
1237         capa_put(oc);
1238
1239         RETURN(0);
1240 }
1241
1242 int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1243                     struct lustre_capa *capa, __u64 opc)
1244 {
1245         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1246         struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1247         struct md_capainfo *ci;
1248         int rc;
1249
1250         if (!dev->od_fl_capa)
1251                 return 0;
1252
1253         if (capa == BYPASS_CAPA)
1254                 return 0;
1255
1256         ci = md_capainfo(env);
1257         if (unlikely(!ci))
1258                 return 0;
1259
1260         if (ci->mc_auth == LC_ID_NONE)
1261                 return 0;
1262
1263         if (!capa) {
1264                 CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
1265                 return -EACCES;
1266         }
1267
1268         if (!lu_fid_eq(fid, &capa->lc_fid)) {
1269                 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1270                            PFID(fid));
1271                 return -EACCES;
1272         }
1273
1274         if (!capa_opc_supported(capa, opc)) {
1275                 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1276                 return -EACCES;
1277         }
1278
1279         if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
1280                 DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
1281                 return -EACCES;
1282         }
1283
1284         return 0;
1285 }
1286
1287 static struct timespec *osd_inode_time(const struct lu_env *env,
1288                                        struct inode *inode, __u64 seconds)
1289 {
1290         struct osd_thread_info  *oti = osd_oti_get(env);
1291         struct timespec         *t   = &oti->oti_time;
1292
1293         t->tv_sec = seconds;
1294         t->tv_nsec = 0;
1295         *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1296         return t;
1297 }
1298
1299
1300 static void osd_inode_getattr(const struct lu_env *env,
1301                               struct inode *inode, struct lu_attr *attr)
1302 {
1303         attr->la_valid      |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1304                                LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1305                                LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
1306
1307         attr->la_atime      = LTIME_S(inode->i_atime);
1308         attr->la_mtime      = LTIME_S(inode->i_mtime);
1309         attr->la_ctime      = LTIME_S(inode->i_ctime);
1310         attr->la_mode       = inode->i_mode;
1311         attr->la_size       = i_size_read(inode);
1312         attr->la_blocks     = inode->i_blocks;
1313         attr->la_uid        = inode->i_uid;
1314         attr->la_gid        = inode->i_gid;
1315         attr->la_flags      = LDISKFS_I(inode)->i_flags;
1316         attr->la_nlink      = inode->i_nlink;
1317         attr->la_rdev       = inode->i_rdev;
1318         attr->la_blksize    = 1 << inode->i_blkbits;
1319         attr->la_blkbits    = inode->i_blkbits;
1320 }
1321
1322 static int osd_attr_get(const struct lu_env *env,
1323                         struct dt_object *dt,
1324                         struct lu_attr *attr,
1325                         struct lustre_capa *capa)
1326 {
1327         struct osd_object *obj = osd_dt_obj(dt);
1328
1329         LASSERT(dt_object_exists(dt));
1330         LINVRNT(osd_invariant(obj));
1331
1332         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1333                 return -EACCES;
1334
1335         cfs_spin_lock(&obj->oo_guard);
1336         osd_inode_getattr(env, obj->oo_inode, attr);
1337         cfs_spin_unlock(&obj->oo_guard);
1338         return 0;
1339 }
1340
1341 static int osd_declare_attr_set(const struct lu_env *env,
1342                                 struct dt_object *dt,
1343                                 const struct lu_attr *attr,
1344                                 struct thandle *handle)
1345 {
1346         struct osd_thandle     *oh;
1347         struct osd_object      *obj;
1348         struct osd_thread_info *info = osd_oti_get(env);
1349         struct lquota_id_info  *qi = &info->oti_qi;
1350         long long               bspace;
1351         int                     rc = 0;
1352         bool                    allocated;
1353         ENTRY;
1354
1355         LASSERT(dt != NULL);
1356         LASSERT(handle != NULL);
1357
1358         obj = osd_dt_obj(dt);
1359         LASSERT(osd_invariant(obj));
1360
1361         oh = container_of0(handle, struct osd_thandle, ot_super);
1362         LASSERT(oh->ot_handle == NULL);
1363
1364         OSD_DECLARE_OP(oh, attr_set);
1365         oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
1366
1367         if (attr == NULL || obj->oo_inode == NULL)
1368                 RETURN(rc);
1369
1370         bspace   = obj->oo_inode->i_blocks;
1371         bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1372         bspace   = toqb(bspace);
1373
1374         /* Changing ownership is always preformed by super user, it should not
1375          * fail with EDQUOT.
1376          *
1377          * We still need to call the osd_declare_qid() to calculate the journal
1378          * credits for updating quota accounting files and to trigger quota
1379          * space adjustment once the operation is completed.*/
1380         if ((attr->la_valid & LA_UID) != 0 &&
1381              attr->la_uid != obj->oo_inode->i_uid) {
1382                 qi->lqi_type = USRQUOTA;
1383
1384                 /* inode accounting */
1385                 qi->lqi_is_blk = false;
1386
1387                 /* one more inode for the new owner ... */
1388                 qi->lqi_id.qid_uid = attr->la_uid;
1389                 qi->lqi_space      = 1;
1390                 allocated = (attr->la_uid == 0) ? true : false;
1391                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1392                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1393                         rc = 0;
1394                 if (rc)
1395                         RETURN(rc);
1396
1397                 /* and one less inode for the current uid */
1398                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1399                 qi->lqi_space      = -1;
1400                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1401                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1402                         rc = 0;
1403                 if (rc)
1404                         RETURN(rc);
1405
1406                 /* block accounting */
1407                 qi->lqi_is_blk = true;
1408
1409                 /* more blocks for the new owner ... */
1410                 qi->lqi_id.qid_uid = attr->la_uid;
1411                 qi->lqi_space      = bspace;
1412                 allocated = (attr->la_uid == 0) ? true : false;
1413                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1414                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1415                         rc = 0;
1416                 if (rc)
1417                         RETURN(rc);
1418
1419                 /* and finally less blocks for the current owner */
1420                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1421                 qi->lqi_space      = -bspace;
1422                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1423                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1424                         rc = 0;
1425                 if (rc)
1426                         RETURN(rc);
1427         }
1428
1429         if (attr->la_valid & LA_GID &&
1430             attr->la_gid != obj->oo_inode->i_gid) {
1431                 qi->lqi_type = GRPQUOTA;
1432
1433                 /* inode accounting */
1434                 qi->lqi_is_blk = false;
1435
1436                 /* one more inode for the new group owner ... */
1437                 qi->lqi_id.qid_gid = attr->la_gid;
1438                 qi->lqi_space      = 1;
1439                 allocated = (attr->la_gid == 0) ? true : false;
1440                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1441                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1442                         rc = 0;
1443                 if (rc)
1444                         RETURN(rc);
1445
1446                 /* and one less inode for the current gid */
1447                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1448                 qi->lqi_space      = -1;
1449                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1450                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1451                         rc = 0;
1452                 if (rc)
1453                         RETURN(rc);
1454
1455                 /* block accounting */
1456                 qi->lqi_is_blk = true;
1457
1458                 /* more blocks for the new owner ... */
1459                 qi->lqi_id.qid_gid = attr->la_gid;
1460                 qi->lqi_space      = bspace;
1461                 allocated = (attr->la_gid == 0) ? true : false;
1462                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1463                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1464                         rc = 0;
1465                 if (rc)
1466                         RETURN(rc);
1467
1468                 /* and finally less blocks for the current owner */
1469                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1470                 qi->lqi_space      = -bspace;
1471                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1472                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1473                         rc = 0;
1474                 if (rc)
1475                         RETURN(rc);
1476         }
1477
1478         RETURN(rc);
1479 }
1480
1481 static int osd_inode_setattr(const struct lu_env *env,
1482                              struct inode *inode, const struct lu_attr *attr)
1483 {
1484         __u64 bits;
1485
1486         bits = attr->la_valid;
1487
1488         LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
1489
1490         if (bits & LA_ATIME)
1491                 inode->i_atime  = *osd_inode_time(env, inode, attr->la_atime);
1492         if (bits & LA_CTIME)
1493                 inode->i_ctime  = *osd_inode_time(env, inode, attr->la_ctime);
1494         if (bits & LA_MTIME)
1495                 inode->i_mtime  = *osd_inode_time(env, inode, attr->la_mtime);
1496         if (bits & LA_SIZE) {
1497                 LDISKFS_I(inode)->i_disksize = attr->la_size;
1498                 i_size_write(inode, attr->la_size);
1499         }
1500
1501 #if 0
1502         /* OSD should not change "i_blocks" which is used by quota.
1503          * "i_blocks" should be changed by ldiskfs only. */
1504         if (bits & LA_BLOCKS)
1505                 inode->i_blocks = attr->la_blocks;
1506 #endif
1507         if (bits & LA_MODE)
1508                 inode->i_mode   = (inode->i_mode & S_IFMT) |
1509                         (attr->la_mode & ~S_IFMT);
1510         if (bits & LA_UID)
1511                 inode->i_uid    = attr->la_uid;
1512         if (bits & LA_GID)
1513                 inode->i_gid    = attr->la_gid;
1514         if (bits & LA_NLINK)
1515                 inode->i_nlink  = attr->la_nlink;
1516         if (bits & LA_RDEV)
1517                 inode->i_rdev   = attr->la_rdev;
1518
1519         if (bits & LA_FLAGS) {
1520                 /* always keep S_NOCMTIME */
1521                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1522                                  S_NOCMTIME;
1523         }
1524         return 0;
1525 }
1526
1527 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1528 {
1529         if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
1530             (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
1531                 struct iattr    iattr;
1532                 int             rc;
1533
1534                 iattr.ia_valid = 0;
1535                 if (attr->la_valid & LA_UID)
1536                         iattr.ia_valid |= ATTR_UID;
1537                 if (attr->la_valid & LA_GID)
1538                         iattr.ia_valid |= ATTR_GID;
1539                 iattr.ia_uid = attr->la_uid;
1540                 iattr.ia_gid = attr->la_gid;
1541
1542                 rc = ll_vfs_dq_transfer(inode, &iattr);
1543                 if (rc) {
1544                         CERROR("%s: quota transfer failed: rc = %d. Is quota "
1545                                "enforcement enabled on the ldiskfs filesystem?",
1546                                inode->i_sb->s_id, rc);
1547                         return rc;
1548                 }
1549         }
1550         return 0;
1551 }
1552
1553 static int osd_attr_set(const struct lu_env *env,
1554                         struct dt_object *dt,
1555                         const struct lu_attr *attr,
1556                         struct thandle *handle,
1557                         struct lustre_capa *capa)
1558 {
1559         struct osd_object *obj = osd_dt_obj(dt);
1560         struct inode      *inode;
1561         int rc;
1562
1563         LASSERT(handle != NULL);
1564         LASSERT(dt_object_exists(dt));
1565         LASSERT(osd_invariant(obj));
1566
1567         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1568                 return -EACCES;
1569
1570         OSD_EXEC_OP(handle, attr_set);
1571
1572         inode = obj->oo_inode;
1573
1574         rc = osd_quota_transfer(inode, attr);
1575         if (rc)
1576                 return rc;
1577
1578         cfs_spin_lock(&obj->oo_guard);
1579         rc = osd_inode_setattr(env, inode, attr);
1580         cfs_spin_unlock(&obj->oo_guard);
1581
1582         if (!rc)
1583                 inode->i_sb->s_op->dirty_inode(inode);
1584         return rc;
1585 }
1586
1587 struct dentry *osd_child_dentry_get(const struct lu_env *env,
1588                                     struct osd_object *obj,
1589                                     const char *name, const int namelen)
1590 {
1591         return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
1592 }
1593
1594 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1595                       cfs_umode_t mode,
1596                       struct dt_allocation_hint *hint,
1597                       struct thandle *th)
1598 {
1599         int result;
1600         struct osd_device  *osd = osd_obj2dev(obj);
1601         struct osd_thandle *oth;
1602         struct dt_object   *parent = NULL;
1603         struct inode       *inode;
1604
1605         LINVRNT(osd_invariant(obj));
1606         LASSERT(obj->oo_inode == NULL);
1607         LASSERT(obj->oo_hl_head == NULL);
1608
1609         if (S_ISDIR(mode) && ldiskfs_pdo) {
1610                 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1611                 if (obj->oo_hl_head == NULL)
1612                         return -ENOMEM;
1613         }
1614
1615         oth = container_of(th, struct osd_thandle, ot_super);
1616         LASSERT(oth->ot_handle->h_transaction != NULL);
1617
1618         if (hint && hint->dah_parent)
1619                 parent = hint->dah_parent;
1620
1621         inode = ldiskfs_create_inode(oth->ot_handle,
1622                                      parent ? osd_dt_obj(parent)->oo_inode :
1623                                               osd_sb(osd)->s_root->d_inode,
1624                                      mode);
1625         if (!IS_ERR(inode)) {
1626                 /* Do not update file c/mtime in ldiskfs.
1627                  * NB: don't need any lock because no contention at this
1628                  * early stage */
1629                 inode->i_flags |= S_NOCMTIME;
1630                 inode->i_state |= I_LUSTRE_NOSCRUB;
1631                 obj->oo_inode = inode;
1632                 result = 0;
1633         } else {
1634                 if (obj->oo_hl_head != NULL) {
1635                         ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1636                         obj->oo_hl_head = NULL;
1637                 }
1638                 result = PTR_ERR(inode);
1639         }
1640         LINVRNT(osd_invariant(obj));
1641         return result;
1642 }
1643
1644 enum {
1645         OSD_NAME_LEN = 255
1646 };
1647
1648 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1649                      struct lu_attr *attr,
1650                      struct dt_allocation_hint *hint,
1651                      struct dt_object_format *dof,
1652                      struct thandle *th)
1653 {
1654         int result;
1655         struct osd_thandle *oth;
1656         struct osd_device *osd = osd_obj2dev(obj);
1657         __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1658
1659         LASSERT(S_ISDIR(attr->la_mode));
1660
1661         oth = container_of(th, struct osd_thandle, ot_super);
1662         LASSERT(oth->ot_handle->h_transaction != NULL);
1663         result = osd_mkfile(info, obj, mode, hint, th);
1664         if (result == 0 && osd->od_iop_mode == 0) {
1665                 LASSERT(obj->oo_inode != NULL);
1666                 /*
1667                  * XXX uh-oh... call low-level iam function directly.
1668                  */
1669
1670                 result = iam_lvar_create(obj->oo_inode, OSD_NAME_LEN, 4,
1671                                          sizeof (struct osd_fid_pack),
1672                                          oth->ot_handle);
1673         }
1674         return result;
1675 }
1676
1677 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
1678                         struct lu_attr *attr,
1679                         struct dt_allocation_hint *hint,
1680                         struct dt_object_format *dof,
1681                         struct thandle *th)
1682 {
1683         int result;
1684         struct osd_thandle *oth;
1685         const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
1686
1687         __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
1688
1689         LASSERT(S_ISREG(attr->la_mode));
1690
1691         oth = container_of(th, struct osd_thandle, ot_super);
1692         LASSERT(oth->ot_handle->h_transaction != NULL);
1693
1694         result = osd_mkfile(info, obj, mode, hint, th);
1695         if (result == 0) {
1696                 LASSERT(obj->oo_inode != NULL);
1697                 if (feat->dif_flags & DT_IND_VARKEY)
1698                         result = iam_lvar_create(obj->oo_inode,
1699                                                  feat->dif_keysize_max,
1700                                                  feat->dif_ptrsize,
1701                                                  feat->dif_recsize_max,
1702                                                  oth->ot_handle);
1703                 else
1704                         result = iam_lfix_create(obj->oo_inode,
1705                                                  feat->dif_keysize_max,
1706                                                  feat->dif_ptrsize,
1707                                                  feat->dif_recsize_max,
1708                                                  oth->ot_handle);
1709
1710         }
1711         return result;
1712 }
1713
1714 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1715                      struct lu_attr *attr,
1716                      struct dt_allocation_hint *hint,
1717                      struct dt_object_format *dof,
1718                      struct thandle *th)
1719 {
1720         LASSERT(S_ISREG(attr->la_mode));
1721         return osd_mkfile(info, obj, (attr->la_mode &
1722                                (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1723 }
1724
1725 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1726                      struct lu_attr *attr,
1727                      struct dt_allocation_hint *hint,
1728                      struct dt_object_format *dof,
1729                      struct thandle *th)
1730 {
1731         LASSERT(S_ISLNK(attr->la_mode));
1732         return osd_mkfile(info, obj, (attr->la_mode &
1733                               (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1734 }
1735
1736 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1737                      struct lu_attr *attr,
1738                      struct dt_allocation_hint *hint,
1739                      struct dt_object_format *dof,
1740                      struct thandle *th)
1741 {
1742         cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
1743         int result;
1744
1745         LINVRNT(osd_invariant(obj));
1746         LASSERT(obj->oo_inode == NULL);
1747         LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1748                 S_ISFIFO(mode) || S_ISSOCK(mode));
1749
1750         result = osd_mkfile(info, obj, mode, hint, th);
1751         if (result == 0) {
1752                 LASSERT(obj->oo_inode != NULL);
1753                 /*
1754                  * This inode should be marked dirty for i_rdev.  Currently
1755                  * that is done in the osd_attr_init().
1756                  */
1757                 init_special_inode(obj->oo_inode, mode, attr->la_rdev);
1758         }
1759         LINVRNT(osd_invariant(obj));
1760         return result;
1761 }
1762
1763 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1764                               struct lu_attr *,
1765                               struct dt_allocation_hint *hint,
1766                               struct dt_object_format *dof,
1767                               struct thandle *);
1768
1769 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1770 {
1771         osd_obj_type_f result;
1772
1773         switch (type) {
1774         case DFT_DIR:
1775                 result = osd_mkdir;
1776                 break;
1777         case DFT_REGULAR:
1778                 result = osd_mkreg;
1779                 break;
1780         case DFT_SYM:
1781                 result = osd_mksym;
1782                 break;
1783         case DFT_NODE:
1784                 result = osd_mknod;
1785                 break;
1786         case DFT_INDEX:
1787                 result = osd_mk_index;
1788                 break;
1789
1790         default:
1791                 LBUG();
1792                 break;
1793         }
1794         return result;
1795 }
1796
1797
1798 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1799                         struct dt_object *parent, struct dt_object *child,
1800                         cfs_umode_t child_mode)
1801 {
1802         LASSERT(ah);
1803
1804         memset(ah, 0, sizeof(*ah));
1805         ah->dah_parent = parent;
1806         ah->dah_mode = child_mode;
1807 }
1808
1809 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
1810                           struct lu_attr *attr, struct dt_object_format *dof)
1811 {
1812         struct inode   *inode = obj->oo_inode;
1813         __u64           valid = attr->la_valid;
1814         int             result;
1815
1816         attr->la_valid &= ~(LA_TYPE | LA_MODE);
1817
1818         if (dof->dof_type != DFT_NODE)
1819                 attr->la_valid &= ~LA_RDEV;
1820         if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
1821                 attr->la_valid &= ~LA_ATIME;
1822         if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
1823                 attr->la_valid &= ~LA_CTIME;
1824         if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
1825                 attr->la_valid &= ~LA_MTIME;
1826
1827         result = osd_quota_transfer(inode, attr);
1828         if (result)
1829                 return;
1830
1831         if (attr->la_valid != 0) {
1832                 result = osd_inode_setattr(info->oti_env, inode, attr);
1833                 /*
1834                  * The osd_inode_setattr() should always succeed here.  The
1835                  * only error that could be returned is EDQUOT when we are
1836                  * trying to change the UID or GID of the inode. However, this
1837                  * should not happen since quota enforcement is no longer
1838                  * enabled on ldiskfs (lquota takes care of it).
1839                  */
1840                 LASSERTF(result == 0, "%d", result);
1841                 inode->i_sb->s_op->dirty_inode(inode);
1842         }
1843
1844         attr->la_valid = valid;
1845 }
1846
1847 /**
1848  * Helper function for osd_object_create()
1849  *
1850  * \retval 0, on success
1851  */
1852 static int __osd_object_create(struct osd_thread_info *info,
1853                                struct osd_object *obj, struct lu_attr *attr,
1854                                struct dt_allocation_hint *hint,
1855                                struct dt_object_format *dof,
1856                                struct thandle *th)
1857 {
1858         int     result;
1859         __u32   umask;
1860
1861         /* we drop umask so that permissions we pass are not affected */
1862         umask = current->fs->umask;
1863         current->fs->umask = 0;
1864
1865         result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
1866                                                   th);
1867         if (result == 0) {
1868                 osd_attr_init(info, obj, attr, dof);
1869                 osd_object_init0(obj);
1870                 /* bz 24037 */
1871                 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
1872                         unlock_new_inode(obj->oo_inode);
1873         }
1874
1875         /* restore previous umask value */
1876         current->fs->umask = umask;
1877
1878         return result;
1879 }
1880
1881 /**
1882  * Helper function for osd_object_create()
1883  *
1884  * \retval 0, on success
1885  */
1886 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
1887                            const struct lu_fid *fid, struct thandle *th)
1888 {
1889         struct osd_thread_info *info = osd_oti_get(env);
1890         struct osd_inode_id    *id   = &info->oti_id;
1891         struct osd_device      *osd  = osd_obj2dev(obj);
1892
1893         LASSERT(obj->oo_inode != NULL);
1894
1895         osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
1896         return osd_oi_insert(info, osd, fid, id, th);
1897 }
1898
1899 static int osd_declare_object_create(const struct lu_env *env,
1900                                      struct dt_object *dt,
1901                                      struct lu_attr *attr,
1902                                      struct dt_allocation_hint *hint,
1903                                      struct dt_object_format *dof,
1904                                      struct thandle *handle)
1905 {
1906         struct osd_thandle      *oh;
1907         int                      rc;
1908         ENTRY;
1909
1910         LASSERT(handle != NULL);
1911
1912         oh = container_of0(handle, struct osd_thandle, ot_super);
1913         LASSERT(oh->ot_handle == NULL);
1914
1915         OSD_DECLARE_OP(oh, create);
1916         oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
1917         /* XXX: So far, only normal fid needs be inserted into the oi,
1918          *      things could be changed later. Revise following code then. */
1919         if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
1920                 OSD_DECLARE_OP(oh, insert);
1921                 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
1922                 /* Reuse idle OI block may cause additional one OI block
1923                  * to be changed. */
1924                 oh->ot_credits += 1;
1925         }
1926         /* If this is directory, then we expect . and .. to be inserted as
1927          * well. The one directory block always needs to be created for the
1928          * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
1929          * block), there is no danger of needing a tree for the first block.
1930          */
1931         if (attr && S_ISDIR(attr->la_mode)) {
1932                 OSD_DECLARE_OP(oh, insert);
1933                 OSD_DECLARE_OP(oh, insert);
1934                 oh->ot_credits += osd_dto_credits_noquota[DTO_WRITE_BASE];
1935         }
1936
1937         if (!attr)
1938                 RETURN(0);
1939
1940         rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
1941                                    false, false, NULL, false);
1942         RETURN(rc);
1943 }
1944
1945 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1946                              struct lu_attr *attr,
1947                              struct dt_allocation_hint *hint,
1948                              struct dt_object_format *dof,
1949                              struct thandle *th)
1950 {
1951         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
1952         struct osd_object      *obj    = osd_dt_obj(dt);
1953         struct osd_thread_info *info   = osd_oti_get(env);
1954         int result;
1955
1956         ENTRY;
1957
1958         LINVRNT(osd_invariant(obj));
1959         LASSERT(!dt_object_exists(dt));
1960         LASSERT(osd_write_locked(env, obj));
1961         LASSERT(th != NULL);
1962
1963         if (unlikely(fid_is_acct(fid)))
1964                 /* Quota files can't be created from the kernel any more,
1965                  * 'tune2fs -O quota' will take care of creating them */
1966                 RETURN(-EPERM);
1967
1968         OSD_EXEC_OP(th, create);
1969
1970         result = __osd_object_create(info, obj, attr, hint, dof, th);
1971         if (result == 0)
1972                 result = __osd_oi_insert(env, obj, fid, th);
1973
1974         LASSERT(ergo(result == 0, dt_object_exists(dt)));
1975         LASSERT(osd_invariant(obj));
1976         RETURN(result);
1977 }
1978
1979 /**
1980  * Called to destroy on-disk representation of the object
1981  *
1982  * Concurrency: must be locked
1983  */
1984 static int osd_declare_object_destroy(const struct lu_env *env,
1985                                       struct dt_object *dt,
1986                                       struct thandle *th)
1987 {
1988         struct osd_object  *obj = osd_dt_obj(dt);
1989         struct inode       *inode = obj->oo_inode;
1990         struct osd_thandle *oh;
1991         int                 rc;
1992         ENTRY;
1993
1994         oh = container_of0(th, struct osd_thandle, ot_super);
1995         LASSERT(oh->ot_handle == NULL);
1996         LASSERT(inode);
1997
1998         OSD_DECLARE_OP(oh, destroy);
1999         OSD_DECLARE_OP(oh, delete);
2000         oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_DELETE];
2001         /* XXX: So far, only normal fid needs to be inserted into the OI,
2002          *      so only normal fid needs to be removed from the OI also. */
2003         if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
2004                 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
2005                 /* Recycle idle OI leaf may cause additional three OI blocks
2006                  * to be changed. */
2007                 oh->ot_credits += 3;
2008         }
2009
2010         /* one less inode */
2011         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
2012                                    false, true, NULL, false);
2013         if (rc)
2014                 RETURN(rc);
2015         /* data to be truncated */
2016         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh, true,
2017                                    true, NULL, false);
2018         RETURN(rc);
2019 }
2020
2021 static int osd_object_destroy(const struct lu_env *env,
2022                               struct dt_object *dt,
2023                               struct thandle *th)
2024 {
2025         const struct lu_fid    *fid = lu_object_fid(&dt->do_lu);
2026         struct osd_object      *obj = osd_dt_obj(dt);
2027         struct inode           *inode = obj->oo_inode;
2028         struct osd_device      *osd = osd_obj2dev(obj);
2029         struct osd_thandle     *oh;
2030         int                     result;
2031         ENTRY;
2032
2033         oh = container_of0(th, struct osd_thandle, ot_super);
2034         LASSERT(oh->ot_handle);
2035         LASSERT(inode);
2036         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2037
2038         if (unlikely(fid_is_acct(fid)))
2039                 RETURN(-EPERM);
2040
2041         /* Parallel control for OI scrub. For most of cases, there is no
2042          * lock contention. So it will not affect unlink performance. */
2043         cfs_mutex_lock(&inode->i_mutex);
2044         if (S_ISDIR(inode->i_mode)) {
2045                 LASSERT(osd_inode_unlinked(inode) ||
2046                         inode->i_nlink == 1);
2047                 cfs_spin_lock(&obj->oo_guard);
2048                 inode->i_nlink = 0;
2049                 cfs_spin_unlock(&obj->oo_guard);
2050                 inode->i_sb->s_op->dirty_inode(inode);
2051         } else {
2052                 LASSERT(osd_inode_unlinked(inode));
2053         }
2054
2055         OSD_EXEC_OP(th, destroy);
2056
2057         result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
2058         cfs_mutex_unlock(&inode->i_mutex);
2059
2060         /* XXX: add to ext3 orphan list */
2061         /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2062
2063         /* not needed in the cache anymore */
2064         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2065
2066         RETURN(0);
2067 }
2068
2069 /**
2070  * Helper function for osd_xattr_set()
2071  */
2072 static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2073                            const struct lu_buf *buf, const char *name, int fl)
2074 {
2075         struct osd_object      *obj      = osd_dt_obj(dt);
2076         struct inode           *inode    = obj->oo_inode;
2077         struct osd_thread_info *info     = osd_oti_get(env);
2078         struct dentry          *dentry   = &info->oti_child_dentry;
2079         int                     fs_flags = 0;
2080         int                     rc;
2081
2082         LASSERT(dt_object_exists(dt));
2083         LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
2084
2085         if (fl & LU_XATTR_REPLACE)
2086                 fs_flags |= XATTR_REPLACE;
2087
2088         if (fl & LU_XATTR_CREATE)
2089                 fs_flags |= XATTR_CREATE;
2090
2091         dentry->d_inode = inode;
2092         rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
2093                                    buf->lb_len, fs_flags);
2094         return rc;
2095 }
2096
2097 /**
2098  * Put the fid into lustre_mdt_attrs, and then place the structure
2099  * inode's ea. This fid should not be altered during the life time
2100  * of the inode.
2101  *
2102  * \retval +ve, on success
2103  * \retval -ve, on error
2104  *
2105  * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2106  */
2107 static int osd_ea_fid_set(const struct lu_env *env, struct dt_object *dt,
2108                           const struct lu_fid *fid)
2109 {
2110         struct osd_thread_info  *info      = osd_oti_get(env);
2111         struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
2112
2113         lustre_lma_init(mdt_attrs, fid);
2114         lustre_lma_swab(mdt_attrs);
2115         return __osd_xattr_set(env, dt,
2116                                osd_buf_get(env, mdt_attrs, sizeof *mdt_attrs),
2117                                XATTR_NAME_LMA, LU_XATTR_CREATE);
2118
2119 }
2120
2121 /**
2122  * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2123  * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2124  * To have compatilibility with 1.8 ldiskfs driver we need to have
2125  * magic number at start of fid data.
2126  * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2127  * its inmemory API.
2128  */
2129 void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2130                                   const struct dt_rec *fid)
2131 {
2132         param->edp_magic = LDISKFS_LUFID_MAGIC;
2133         param->edp_len =  sizeof(struct lu_fid) + 1;
2134
2135         fid_cpu_to_be((struct lu_fid *)param->edp_data,
2136                       (struct lu_fid *)fid);
2137 }
2138
2139 /**
2140  * Try to read the fid from inode ea into dt_rec, if return value
2141  * i.e. rc is +ve, then we got fid, otherwise we will have to form igif
2142  *
2143  * \param fid object fid.
2144  *
2145  * \retval 0 on success
2146  */
2147 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2148                           __u32 ino, struct lu_fid *fid,
2149                           struct osd_inode_id *id)
2150 {
2151         struct osd_thread_info *info  = osd_oti_get(env);
2152         struct inode           *inode;
2153         ENTRY;
2154
2155         osd_id_gen(id, ino, OSD_OII_NOGEN);
2156         inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2157         if (IS_ERR(inode))
2158                 RETURN(PTR_ERR(inode));
2159
2160         iput(inode);
2161         RETURN(0);
2162 }
2163
2164 /**
2165  * OSD layer object create function for interoperability mode (b11826).
2166  * This is mostly similar to osd_object_create(). Only difference being, fid is
2167  * inserted into inode ea here.
2168  *
2169  * \retval   0, on success
2170  * \retval -ve, on error
2171  */
2172 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2173                                 struct lu_attr *attr,
2174                                 struct dt_allocation_hint *hint,
2175                                 struct dt_object_format *dof,
2176                                 struct thandle *th)
2177 {
2178         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
2179         struct osd_object      *obj    = osd_dt_obj(dt);
2180         struct osd_thread_info *info   = osd_oti_get(env);
2181         int                     result;
2182
2183         ENTRY;
2184
2185         LASSERT(osd_invariant(obj));
2186         LASSERT(!dt_object_exists(dt));
2187         LASSERT(osd_write_locked(env, obj));
2188         LASSERT(th != NULL);
2189
2190         if (unlikely(fid_is_acct(fid)))
2191                 /* Quota files can't be created from the kernel any more,
2192                  * 'tune2fs -O quota' will take care of creating them */
2193                 RETURN(-EPERM);
2194
2195         OSD_EXEC_OP(th, create);
2196
2197         result = __osd_object_create(info, obj, attr, hint, dof, th);
2198         /* objects under osd root shld have igif fid, so dont add fid EA */
2199         if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL)
2200                 result = osd_ea_fid_set(env, dt, fid);
2201
2202         if (result == 0)
2203                 result = __osd_oi_insert(env, obj, fid, th);
2204
2205         LASSERT(ergo(result == 0, dt_object_exists(dt)));
2206         LINVRNT(osd_invariant(obj));
2207         RETURN(result);
2208 }
2209
2210 static int osd_declare_object_ref_add(const struct lu_env *env,
2211                                       struct dt_object *dt,
2212                                       struct thandle *handle)
2213 {
2214         struct osd_thandle *oh;
2215
2216         /* it's possible that object doesn't exist yet */
2217         LASSERT(handle != NULL);
2218
2219         oh = container_of0(handle, struct osd_thandle, ot_super);
2220         LASSERT(oh->ot_handle == NULL);
2221
2222         OSD_DECLARE_OP(oh, ref_add);
2223         oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
2224
2225         return 0;
2226 }
2227
2228 /*
2229  * Concurrency: @dt is write locked.
2230  */
2231 static int osd_object_ref_add(const struct lu_env *env,
2232                               struct dt_object *dt, struct thandle *th)
2233 {
2234         struct osd_object *obj = osd_dt_obj(dt);
2235         struct inode      *inode = obj->oo_inode;
2236
2237         LINVRNT(osd_invariant(obj));
2238         LASSERT(dt_object_exists(dt));
2239         LASSERT(osd_write_locked(env, obj));
2240         LASSERT(th != NULL);
2241
2242         OSD_EXEC_OP(th, ref_add);
2243
2244         /*
2245          * DIR_NLINK feature is set for compatibility reasons if:
2246          * 1) nlinks > LDISKFS_LINK_MAX, or
2247          * 2) nlinks == 2, since this indicates i_nlink was previously 1.
2248          *
2249          * It is easier to always set this flag (rather than check and set),
2250          * since it has less overhead, and the superblock will be dirtied
2251          * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
2252          * do not actually care whether this flag is set or not.
2253          */
2254         cfs_spin_lock(&obj->oo_guard);
2255         inode->i_nlink++;
2256         if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
2257                 if (inode->i_nlink >= LDISKFS_LINK_MAX ||
2258                     inode->i_nlink == 2)
2259                         inode->i_nlink = 1;
2260         }
2261         LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2262         cfs_spin_unlock(&obj->oo_guard);
2263         inode->i_sb->s_op->dirty_inode(inode);
2264         LINVRNT(osd_invariant(obj));
2265
2266         return 0;
2267 }
2268
2269 static int osd_declare_object_ref_del(const struct lu_env *env,
2270                                       struct dt_object *dt,
2271                                       struct thandle *handle)
2272 {
2273         struct osd_thandle *oh;
2274
2275         LASSERT(dt_object_exists(dt));
2276         LASSERT(handle != NULL);
2277
2278         oh = container_of0(handle, struct osd_thandle, ot_super);
2279         LASSERT(oh->ot_handle == NULL);
2280
2281         OSD_DECLARE_OP(oh, ref_del);
2282         oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
2283
2284         return 0;
2285 }
2286
2287 /*
2288  * Concurrency: @dt is write locked.
2289  */
2290 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2291                               struct thandle *th)
2292 {
2293         struct osd_object *obj = osd_dt_obj(dt);
2294         struct inode      *inode = obj->oo_inode;
2295
2296         LINVRNT(osd_invariant(obj));
2297         LASSERT(dt_object_exists(dt));
2298         LASSERT(osd_write_locked(env, obj));
2299         LASSERT(th != NULL);
2300
2301         OSD_EXEC_OP(th, ref_del);
2302
2303         cfs_spin_lock(&obj->oo_guard);
2304         LASSERT(inode->i_nlink > 0);
2305         inode->i_nlink--;
2306         /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
2307          * then the nlink count is 1. Don't let it be set to 0 or the directory
2308          * inode will be deleted incorrectly. */
2309         if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
2310                 inode->i_nlink++;
2311         cfs_spin_unlock(&obj->oo_guard);
2312         inode->i_sb->s_op->dirty_inode(inode);
2313         LINVRNT(osd_invariant(obj));
2314
2315         return 0;
2316 }
2317
2318 /*
2319  * Get the 64-bit version for an inode.
2320  */
2321 static int osd_object_version_get(const struct lu_env *env,
2322                                   struct dt_object *dt, dt_obj_version_t *ver)
2323 {
2324         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2325
2326         CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2327                LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2328         *ver = LDISKFS_I(inode)->i_fs_version;
2329         return 0;
2330 }
2331
2332 /*
2333  * Concurrency: @dt is read locked.
2334  */
2335 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2336                          struct lu_buf *buf, const char *name,
2337                          struct lustre_capa *capa)
2338 {
2339         struct osd_object      *obj    = osd_dt_obj(dt);
2340         struct inode           *inode  = obj->oo_inode;
2341         struct osd_thread_info *info   = osd_oti_get(env);
2342         struct dentry          *dentry = &info->oti_obj_dentry;
2343
2344         /* version get is not real XATTR but uses xattr API */
2345         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2346                 /* for version we are just using xattr API but change inode
2347                  * field instead */
2348                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2349                 osd_object_version_get(env, dt, buf->lb_buf);
2350                 return sizeof(dt_obj_version_t);
2351         }
2352
2353         LASSERT(dt_object_exists(dt));
2354         LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
2355
2356         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2357                 return -EACCES;
2358
2359         dentry->d_inode = inode;
2360         return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
2361 }
2362
2363
2364 static int osd_declare_xattr_set(const struct lu_env *env,
2365                                  struct dt_object *dt,
2366                                  const struct lu_buf *buf, const char *name,
2367                                  int fl, struct thandle *handle)
2368 {
2369         struct osd_thandle *oh;
2370
2371         LASSERT(handle != NULL);
2372
2373         oh = container_of0(handle, struct osd_thandle, ot_super);
2374         LASSERT(oh->ot_handle == NULL);
2375
2376         OSD_DECLARE_OP(oh, xattr_set);
2377         if (strcmp(name, XATTR_NAME_VERSION) == 0)
2378                 oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
2379         else
2380                 oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
2381
2382         return 0;
2383 }
2384
2385 /*
2386  * Set the 64-bit version for object
2387  */
2388 static void osd_object_version_set(const struct lu_env *env,
2389                                    struct dt_object *dt,
2390                                    dt_obj_version_t *new_version)
2391 {
2392         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2393
2394         CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
2395                *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2396
2397         LDISKFS_I(inode)->i_fs_version = *new_version;
2398         /** Version is set after all inode operations are finished,
2399          *  so we should mark it dirty here */
2400         inode->i_sb->s_op->dirty_inode(inode);
2401 }
2402
2403 /*
2404  * Concurrency: @dt is write locked.
2405  */
2406 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2407                          const struct lu_buf *buf, const char *name, int fl,
2408                          struct thandle *handle, struct lustre_capa *capa)
2409 {
2410         LASSERT(handle != NULL);
2411
2412         /* version set is not real XATTR */
2413         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2414                 /* for version we are just using xattr API but change inode
2415                  * field instead */
2416                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2417                 osd_object_version_set(env, dt, buf->lb_buf);
2418                 return sizeof(dt_obj_version_t);
2419         }
2420
2421         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2422                 return -EACCES;
2423
2424         OSD_EXEC_OP(handle, xattr_set);
2425         return __osd_xattr_set(env, dt, buf, name, fl);
2426 }
2427
2428 /*
2429  * Concurrency: @dt is read locked.
2430  */
2431 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
2432                           struct lu_buf *buf, struct lustre_capa *capa)
2433 {
2434         struct osd_object      *obj    = osd_dt_obj(dt);
2435         struct inode           *inode  = obj->oo_inode;
2436         struct osd_thread_info *info   = osd_oti_get(env);
2437         struct dentry          *dentry = &info->oti_obj_dentry;
2438
2439         LASSERT(dt_object_exists(dt));
2440         LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
2441         LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2442
2443         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2444                 return -EACCES;
2445
2446         dentry->d_inode = inode;
2447         return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
2448 }
2449
2450 static int osd_declare_xattr_del(const struct lu_env *env,
2451                                  struct dt_object *dt, const char *name,
2452                                  struct thandle *handle)
2453 {
2454         struct osd_thandle *oh;
2455
2456         LASSERT(dt_object_exists(dt));
2457         LASSERT(handle != NULL);
2458
2459         oh = container_of0(handle, struct osd_thandle, ot_super);
2460         LASSERT(oh->ot_handle == NULL);
2461
2462         OSD_DECLARE_OP(oh, xattr_set);
2463         oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
2464
2465         return 0;
2466 }
2467
2468 /*
2469  * Concurrency: @dt is write locked.
2470  */
2471 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
2472                          const char *name, struct thandle *handle,
2473                          struct lustre_capa *capa)
2474 {
2475         struct osd_object      *obj    = osd_dt_obj(dt);
2476         struct inode           *inode  = obj->oo_inode;
2477         struct osd_thread_info *info   = osd_oti_get(env);
2478         struct dentry          *dentry = &info->oti_obj_dentry;
2479         int                     rc;
2480
2481         LASSERT(dt_object_exists(dt));
2482         LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
2483         LASSERT(osd_write_locked(env, obj));
2484         LASSERT(handle != NULL);
2485
2486         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2487                 return -EACCES;
2488
2489         OSD_EXEC_OP(handle, xattr_set);
2490
2491         dentry->d_inode = inode;
2492         rc = inode->i_op->removexattr(dentry, name);
2493         return rc;
2494 }
2495
2496 static struct obd_capa *osd_capa_get(const struct lu_env *env,
2497                                      struct dt_object *dt,
2498                                      struct lustre_capa *old,
2499                                      __u64 opc)
2500 {
2501         struct osd_thread_info *info = osd_oti_get(env);
2502         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2503         struct osd_object *obj = osd_dt_obj(dt);
2504         struct osd_device *dev = osd_obj2dev(obj);
2505         struct lustre_capa_key *key = &info->oti_capa_key;
2506         struct lustre_capa *capa = &info->oti_capa;
2507         struct obd_capa *oc;
2508         struct md_capainfo *ci;
2509         int rc;
2510         ENTRY;
2511
2512         if (!dev->od_fl_capa)
2513                 RETURN(ERR_PTR(-ENOENT));
2514
2515         LASSERT(dt_object_exists(dt));
2516         LINVRNT(osd_invariant(obj));
2517
2518         /* renewal sanity check */
2519         if (old && osd_object_auth(env, dt, old, opc))
2520                 RETURN(ERR_PTR(-EACCES));
2521
2522         ci = md_capainfo(env);
2523         if (unlikely(!ci))
2524                 RETURN(ERR_PTR(-ENOENT));
2525
2526         switch (ci->mc_auth) {
2527         case LC_ID_NONE:
2528                 RETURN(NULL);
2529         case LC_ID_PLAIN:
2530                 capa->lc_uid = obj->oo_inode->i_uid;
2531                 capa->lc_gid = obj->oo_inode->i_gid;
2532                 capa->lc_flags = LC_ID_PLAIN;
2533                 break;
2534         case LC_ID_CONVERT: {
2535                 __u32 d[4], s[4];
2536
2537                 s[0] = obj->oo_inode->i_uid;
2538                 cfs_get_random_bytes(&(s[1]), sizeof(__u32));
2539                 s[2] = obj->oo_inode->i_gid;
2540                 cfs_get_random_bytes(&(s[3]), sizeof(__u32));
2541                 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
2542                 if (unlikely(rc))
2543                         RETURN(ERR_PTR(rc));
2544
2545                 capa->lc_uid   = ((__u64)d[1] << 32) | d[0];
2546                 capa->lc_gid   = ((__u64)d[3] << 32) | d[2];
2547                 capa->lc_flags = LC_ID_CONVERT;
2548                 break;
2549         }
2550         default:
2551                 RETURN(ERR_PTR(-EINVAL));
2552         }
2553
2554         capa->lc_fid = *fid;
2555         capa->lc_opc = opc;
2556         capa->lc_flags |= dev->od_capa_alg << 24;
2557         capa->lc_timeout = dev->od_capa_timeout;
2558         capa->lc_expiry = 0;
2559
2560         oc = capa_lookup(dev->od_capa_hash, capa, 1);
2561         if (oc) {
2562                 LASSERT(!capa_is_expired(oc));
2563                 RETURN(oc);
2564         }
2565
2566         cfs_spin_lock(&capa_lock);
2567         *key = dev->od_capa_keys[1];
2568         cfs_spin_unlock(&capa_lock);
2569
2570         capa->lc_keyid = key->lk_keyid;
2571         capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
2572
2573         rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
2574         if (rc) {
2575                 DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
2576                 RETURN(ERR_PTR(rc));
2577         }
2578
2579         oc = capa_add(dev->od_capa_hash, capa);
2580         RETURN(oc);
2581 }
2582
2583 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
2584 {
2585         struct osd_object       *obj    = osd_dt_obj(dt);
2586         struct inode            *inode  = obj->oo_inode;
2587         struct osd_thread_info  *info   = osd_oti_get(env);
2588         struct dentry           *dentry = &info->oti_obj_dentry;
2589         struct file             *file   = &info->oti_file;
2590         int                     rc;
2591
2592         ENTRY;
2593
2594         dentry->d_inode = inode;
2595         file->f_dentry = dentry;
2596         file->f_mapping = inode->i_mapping;
2597         file->f_op = inode->i_fop;
2598         mutex_lock(&inode->i_mutex);
2599         rc = file->f_op->fsync(file, dentry, 0);
2600         mutex_unlock(&inode->i_mutex);
2601         RETURN(rc);
2602 }
2603
2604 static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
2605                         void **data)
2606 {
2607         struct osd_object *obj = osd_dt_obj(dt);
2608         ENTRY;
2609
2610         *data = (void *)obj->oo_inode;
2611         RETURN(0);
2612 }
2613
2614 /*
2615  * Index operations.
2616  */
2617
2618 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
2619                            const struct dt_index_features *feat)
2620 {
2621         struct iam_descr *descr;
2622
2623         if (osd_object_is_root(o))
2624                 return feat == &dt_directory_features;
2625
2626         LASSERT(o->oo_dir != NULL);
2627
2628         descr = o->oo_dir->od_container.ic_descr;
2629         if (feat == &dt_directory_features) {
2630                 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
2631                         return 1;
2632                 else
2633                         return 0;
2634         } else {
2635                 return
2636                         feat->dif_keysize_min <= descr->id_key_size &&
2637                         descr->id_key_size <= feat->dif_keysize_max &&
2638                         feat->dif_recsize_min <= descr->id_rec_size &&
2639                         descr->id_rec_size <= feat->dif_recsize_max &&
2640                         !(feat->dif_flags & (DT_IND_VARKEY |
2641                                              DT_IND_VARREC | DT_IND_NONUNQ)) &&
2642                         ergo(feat->dif_flags & DT_IND_UPDATE,
2643                              1 /* XXX check that object (and file system) is
2644                                 * writable */);
2645         }
2646 }
2647
2648 static int osd_iam_container_init(const struct lu_env *env,
2649                                   struct osd_object *obj,
2650                                   struct osd_directory *dir)
2651 {
2652         struct iam_container *bag = &dir->od_container;
2653         int result;
2654
2655         result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
2656         if (result != 0)
2657                 return result;
2658
2659         result = iam_container_setup(bag);
2660         if (result == 0)
2661                 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
2662         else
2663                 iam_container_fini(bag);
2664
2665         return result;
2666 }
2667
2668
2669 /*
2670  * Concurrency: no external locking is necessary.
2671  */
2672 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
2673                          const struct dt_index_features *feat)
2674 {
2675         int                      result;
2676         int                      skip_iam = 0;
2677         struct osd_object       *obj = osd_dt_obj(dt);
2678         struct osd_device       *osd = osd_obj2dev(obj);
2679
2680         LINVRNT(osd_invariant(obj));
2681         LASSERT(dt_object_exists(dt));
2682
2683         if (osd_object_is_root(obj)) {
2684                 dt->do_index_ops = &osd_index_ea_ops;
2685                 result = 0;
2686         } else if (feat == &dt_directory_features && osd->od_iop_mode) {
2687                 dt->do_index_ops = &osd_index_ea_ops;
2688                 if (S_ISDIR(obj->oo_inode->i_mode))
2689                         result = 0;
2690                 else
2691                         result = -ENOTDIR;
2692                 skip_iam = 1;
2693         } else if (unlikely(feat == &dt_otable_features)) {
2694                 dt->do_index_ops = &osd_otable_ops;
2695                 return 0;
2696         } else if (feat == &dt_acct_features) {
2697                 dt->do_index_ops = &osd_acct_index_ops;
2698                 result = 0;
2699                 skip_iam = 1;
2700         } else if (!osd_has_index(obj)) {
2701                 struct osd_directory *dir;
2702
2703                 OBD_ALLOC_PTR(dir);
2704                 if (dir != NULL) {
2705
2706                         cfs_spin_lock(&obj->oo_guard);
2707                         if (obj->oo_dir == NULL)
2708                                 obj->oo_dir = dir;
2709                         else
2710                                 /*
2711                                  * Concurrent thread allocated container data.
2712                                  */
2713                                 OBD_FREE_PTR(dir);
2714                         cfs_spin_unlock(&obj->oo_guard);
2715                         /*
2716                          * Now, that we have container data, serialize its
2717                          * initialization.
2718                          */
2719                         cfs_down_write(&obj->oo_ext_idx_sem);
2720                         /*
2721                          * recheck under lock.
2722                          */
2723                         if (!osd_has_index(obj))
2724                                 result = osd_iam_container_init(env, obj, dir);
2725                         else
2726                                 result = 0;
2727                         cfs_up_write(&obj->oo_ext_idx_sem);
2728                 } else {
2729                         result = -ENOMEM;
2730                 }
2731         } else {
2732                 result = 0;
2733         }
2734
2735         if (result == 0 && skip_iam == 0) {
2736                 if (!osd_iam_index_probe(env, obj, feat))
2737                         result = -ENOTDIR;
2738         }
2739         LINVRNT(osd_invariant(obj));
2740
2741         if (is_quota_glb_feat(feat))
2742                 result = osd_quota_migration(env, dt, feat);
2743
2744         return result;
2745 }
2746
2747 static int osd_otable_it_attr_get(const struct lu_env *env,
2748                                  struct dt_object *dt,
2749                                  struct lu_attr *attr,
2750                                  struct lustre_capa *capa)
2751 {
2752         attr->la_valid = 0;
2753         return 0;
2754 }
2755
2756 static const struct dt_object_operations osd_obj_ops = {
2757         .do_read_lock         = osd_object_read_lock,
2758         .do_write_lock        = osd_object_write_lock,
2759         .do_read_unlock       = osd_object_read_unlock,
2760         .do_write_unlock      = osd_object_write_unlock,
2761         .do_write_locked      = osd_object_write_locked,
2762         .do_attr_get          = osd_attr_get,
2763         .do_declare_attr_set  = osd_declare_attr_set,
2764         .do_attr_set          = osd_attr_set,
2765         .do_ah_init           = osd_ah_init,
2766         .do_declare_create    = osd_declare_object_create,
2767         .do_create            = osd_object_create,
2768         .do_declare_destroy   = osd_declare_object_destroy,
2769         .do_destroy           = osd_object_destroy,
2770         .do_index_try         = osd_index_try,
2771         .do_declare_ref_add   = osd_declare_object_ref_add,
2772         .do_ref_add           = osd_object_ref_add,
2773         .do_declare_ref_del   = osd_declare_object_ref_del,
2774         .do_ref_del           = osd_object_ref_del,
2775         .do_xattr_get         = osd_xattr_get,
2776         .do_declare_xattr_set = osd_declare_xattr_set,
2777         .do_xattr_set         = osd_xattr_set,
2778         .do_declare_xattr_del = osd_declare_xattr_del,
2779         .do_xattr_del         = osd_xattr_del,
2780         .do_xattr_list        = osd_xattr_list,
2781         .do_capa_get          = osd_capa_get,
2782         .do_object_sync       = osd_object_sync,
2783         .do_data_get          = osd_data_get,
2784 };
2785
2786 /**
2787  * dt_object_operations for interoperability mode
2788  * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
2789  */
2790 static const struct dt_object_operations osd_obj_ea_ops = {
2791         .do_read_lock         = osd_object_read_lock,
2792         .do_write_lock        = osd_object_write_lock,
2793         .do_read_unlock       = osd_object_read_unlock,
2794         .do_write_unlock      = osd_object_write_unlock,
2795         .do_write_locked      = osd_object_write_locked,
2796         .do_attr_get          = osd_attr_get,
2797         .do_declare_attr_set  = osd_declare_attr_set,
2798         .do_attr_set          = osd_attr_set,
2799         .do_ah_init           = osd_ah_init,
2800         .do_declare_create    = osd_declare_object_create,
2801         .do_create            = osd_object_ea_create,
2802         .do_declare_destroy   = osd_declare_object_destroy,
2803         .do_destroy           = osd_object_destroy,
2804         .do_index_try         = osd_index_try,
2805         .do_declare_ref_add   = osd_declare_object_ref_add,
2806         .do_ref_add           = osd_object_ref_add,
2807         .do_declare_ref_del   = osd_declare_object_ref_del,
2808         .do_ref_del           = osd_object_ref_del,
2809         .do_xattr_get         = osd_xattr_get,
2810         .do_declare_xattr_set = osd_declare_xattr_set,
2811         .do_xattr_set         = osd_xattr_set,
2812         .do_declare_xattr_del = osd_declare_xattr_del,
2813         .do_xattr_del         = osd_xattr_del,
2814         .do_xattr_list        = osd_xattr_list,
2815         .do_capa_get          = osd_capa_get,
2816         .do_object_sync       = osd_object_sync,
2817         .do_data_get          = osd_data_get,
2818 };
2819
2820 static const struct dt_object_operations osd_obj_otable_it_ops = {
2821         .do_attr_get    = osd_otable_it_attr_get,
2822         .do_index_try   = osd_index_try,
2823 };
2824
2825 static int osd_index_declare_iam_delete(const struct lu_env *env,
2826                                         struct dt_object *dt,
2827                                         const struct dt_key *key,
2828                                         struct thandle *handle)
2829 {
2830         struct osd_thandle    *oh;
2831
2832         oh = container_of0(handle, struct osd_thandle, ot_super);
2833         LASSERT(oh->ot_handle == NULL);
2834
2835         OSD_DECLARE_OP(oh, delete);
2836         oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
2837
2838         return 0;
2839 }
2840
2841 /**
2842  *      delete a (key, value) pair from index \a dt specified by \a key
2843  *
2844  *      \param  dt      osd index object
2845  *      \param  key     key for index
2846  *      \param  rec     record reference
2847  *      \param  handle  transaction handler
2848  *
2849  *      \retval  0  success
2850  *      \retval -ve   failure
2851  */
2852
2853 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
2854                                 const struct dt_key *key,
2855                                 struct thandle *handle,
2856                                 struct lustre_capa *capa)
2857 {
2858         struct osd_thread_info *oti = osd_oti_get(env);
2859         struct osd_object      *obj = osd_dt_obj(dt);
2860         struct osd_thandle     *oh;
2861         struct iam_path_descr  *ipd;
2862         struct iam_container   *bag = &obj->oo_dir->od_container;
2863         int                     rc;
2864
2865         ENTRY;
2866
2867         LINVRNT(osd_invariant(obj));
2868         LASSERT(dt_object_exists(dt));
2869         LASSERT(bag->ic_object == obj->oo_inode);
2870         LASSERT(handle != NULL);
2871
2872         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2873                 RETURN(-EACCES);
2874
2875         OSD_EXEC_OP(handle, delete);
2876
2877         ipd = osd_idx_ipd_get(env, bag);
2878         if (unlikely(ipd == NULL))
2879                 RETURN(-ENOMEM);
2880
2881         oh = container_of0(handle, struct osd_thandle, ot_super);
2882         LASSERT(oh->ot_handle != NULL);
2883         LASSERT(oh->ot_handle->h_transaction != NULL);
2884
2885         if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
2886                 /* swab quota uid/gid provided by caller */
2887                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
2888                 key = (const struct dt_key *)&oti->oti_quota_id;
2889         }
2890
2891         rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
2892         osd_ipd_put(env, bag, ipd);
2893         LINVRNT(osd_invariant(obj));
2894         RETURN(rc);
2895 }
2896
2897 static int osd_index_declare_ea_delete(const struct lu_env *env,
2898                                        struct dt_object *dt,
2899                                        const struct dt_key *key,
2900                                        struct thandle *handle)
2901 {
2902         struct osd_thandle *oh;
2903         struct inode       *inode;
2904         int                 rc;
2905         ENTRY;
2906
2907         LASSERT(dt_object_exists(dt));
2908         LASSERT(handle != NULL);
2909
2910         oh = container_of0(handle, struct osd_thandle, ot_super);
2911         LASSERT(oh->ot_handle == NULL);
2912
2913         OSD_DECLARE_OP(oh, delete);
2914         oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
2915
2916         inode = osd_dt_obj(dt)->oo_inode;
2917         LASSERT(inode);
2918
2919         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
2920                                    true, true, NULL, false);
2921         RETURN(rc);
2922 }
2923
2924 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
2925                                           struct dt_rec *fid)
2926 {
2927         struct osd_fid_pack *rec;
2928         int                  rc = -ENODATA;
2929
2930         if (de->file_type & LDISKFS_DIRENT_LUFID) {
2931                 rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
2932                 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
2933         }
2934         RETURN(rc);
2935 }
2936
2937 /**
2938  * Index delete function for interoperability mode (b11826).
2939  * It will remove the directory entry added by osd_index_ea_insert().
2940  * This entry is needed to maintain name->fid mapping.
2941  *
2942  * \param key,  key i.e. file entry to be deleted
2943  *
2944  * \retval   0, on success
2945  * \retval -ve, on error
2946  */
2947 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
2948                                const struct dt_key *key,
2949                                struct thandle *handle,
2950                                struct lustre_capa *capa)
2951 {
2952         struct osd_object          *obj    = osd_dt_obj(dt);
2953         struct inode               *dir    = obj->oo_inode;
2954         struct dentry              *dentry;
2955         struct osd_thandle         *oh;
2956         struct ldiskfs_dir_entry_2 *de;
2957         struct buffer_head         *bh;
2958         struct htree_lock          *hlock = NULL;
2959         int                         rc;
2960
2961         ENTRY;
2962
2963         LINVRNT(osd_invariant(obj));
2964         LASSERT(dt_object_exists(dt));
2965         LASSERT(handle != NULL);
2966
2967         OSD_EXEC_OP(handle, delete);
2968
2969         oh = container_of(handle, struct osd_thandle, ot_super);
2970         LASSERT(oh->ot_handle != NULL);
2971         LASSERT(oh->ot_handle->h_transaction != NULL);
2972
2973         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2974                 RETURN(-EACCES);
2975
2976         dentry = osd_child_dentry_get(env, obj,
2977                                       (char *)key, strlen((char *)key));
2978
2979         if (obj->oo_hl_head != NULL) {
2980                 hlock = osd_oti_get(env)->oti_hlock;
2981                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
2982                                    dir, LDISKFS_HLOCK_DEL);
2983         } else {
2984                 cfs_down_write(&obj->oo_ext_idx_sem);
2985         }
2986
2987         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
2988         if (bh) {
2989                 rc = ldiskfs_delete_entry(oh->ot_handle,
2990                                           dir, de, bh);
2991                 brelse(bh);
2992         } else {
2993                 rc = -ENOENT;
2994         }
2995         if (hlock != NULL)
2996                 ldiskfs_htree_unlock(hlock);
2997         else
2998                 cfs_up_write(&obj->oo_ext_idx_sem);
2999
3000         LASSERT(osd_invariant(obj));
3001         RETURN(rc);
3002 }
3003
3004 /**
3005  *      Lookup index for \a key and copy record to \a rec.
3006  *
3007  *      \param  dt      osd index object
3008  *      \param  key     key for index
3009  *      \param  rec     record reference
3010  *
3011  *      \retval  +ve  success : exact mach
3012  *      \retval  0    return record with key not greater than \a key
3013  *      \retval -ve   failure
3014  */
3015 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
3016                                 struct dt_rec *rec, const struct dt_key *key,
3017                                 struct lustre_capa *capa)
3018 {
3019         struct osd_object      *obj = osd_dt_obj(dt);
3020         struct iam_path_descr  *ipd;
3021         struct iam_container   *bag = &obj->oo_dir->od_container;
3022         struct osd_thread_info *oti = osd_oti_get(env);
3023         struct iam_iterator    *it = &oti->oti_idx_it;
3024         struct iam_rec         *iam_rec;
3025         int                     rc;
3026
3027         ENTRY;
3028
3029         LASSERT(osd_invariant(obj));
3030         LASSERT(dt_object_exists(dt));
3031         LASSERT(bag->ic_object == obj->oo_inode);
3032
3033         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
3034                 RETURN(-EACCES);
3035
3036         ipd = osd_idx_ipd_get(env, bag);
3037         if (IS_ERR(ipd))
3038                 RETURN(-ENOMEM);
3039
3040         /* got ipd now we can start iterator. */
3041         iam_it_init(it, bag, 0, ipd);
3042
3043         if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3044                 /* swab quota uid/gid provided by caller */
3045                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3046                 key = (const struct dt_key *)&oti->oti_quota_id;
3047         }
3048
3049         rc = iam_it_get(it, (struct iam_key *)key);
3050         if (rc >= 0) {
3051                 if (S_ISDIR(obj->oo_inode->i_mode))
3052                         iam_rec = (struct iam_rec *)oti->oti_ldp;
3053                 else
3054                         iam_rec = (struct iam_rec *) rec;
3055
3056                 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
3057
3058                 if (S_ISDIR(obj->oo_inode->i_mode))
3059                         osd_fid_unpack((struct lu_fid *) rec,
3060                                        (struct osd_fid_pack *)iam_rec);
3061                 else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
3062                         osd_quota_unpack(obj, rec);
3063         }
3064
3065         iam_it_put(it);
3066         iam_it_fini(it);
3067         osd_ipd_put(env, bag, ipd);
3068
3069         LINVRNT(osd_invariant(obj));
3070
3071         RETURN(rc);
3072 }
3073
3074 static int osd_index_declare_iam_insert(const struct lu_env *env,
3075                                         struct dt_object *dt,
3076                                         const struct dt_rec *rec,
3077                                         const struct dt_key *key,
3078                                         struct thandle *handle)
3079 {
3080         struct osd_thandle *oh;
3081
3082         LASSERT(dt_object_exists(dt));
3083         LASSERT(handle != NULL);
3084
3085         oh = container_of0(handle, struct osd_thandle, ot_super);
3086         LASSERT(oh->ot_handle == NULL);
3087
3088         OSD_DECLARE_OP(oh, insert);
3089         oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
3090
3091         return 0;
3092 }
3093
3094 /**
3095  *      Inserts (key, value) pair in \a dt index object.
3096  *
3097  *      \param  dt      osd index object
3098  *      \param  key     key for index
3099  *      \param  rec     record reference
3100  *      \param  th      transaction handler
3101  *
3102  *      \retval  0  success
3103  *      \retval -ve failure
3104  */
3105 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
3106                                 const struct dt_rec *rec,
3107                                 const struct dt_key *key, struct thandle *th,
3108                                 struct lustre_capa *capa, int ignore_quota)
3109 {
3110         struct osd_object     *obj = osd_dt_obj(dt);
3111         struct iam_path_descr *ipd;
3112         struct osd_thandle    *oh;
3113         struct iam_container  *bag = &obj->oo_dir->od_container;
3114         struct osd_thread_info *oti = osd_oti_get(env);
3115         struct iam_rec         *iam_rec;
3116         int                     rc;
3117
3118         ENTRY;
3119
3120         LINVRNT(osd_invariant(obj));
3121         LASSERT(dt_object_exists(dt));
3122         LASSERT(bag->ic_object == obj->oo_inode);
3123         LASSERT(th != NULL);
3124
3125         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3126                 RETURN(-EACCES);
3127
3128         OSD_EXEC_OP(th, insert);
3129
3130         ipd = osd_idx_ipd_get(env, bag);
3131         if (unlikely(ipd == NULL))
3132                 RETURN(-ENOMEM);
3133
3134         oh = container_of0(th, struct osd_thandle, ot_super);
3135         LASSERT(oh->ot_handle != NULL);
3136         LASSERT(oh->ot_handle->h_transaction != NULL);
3137         if (S_ISDIR(obj->oo_inode->i_mode)) {
3138                 iam_rec = (struct iam_rec *)oti->oti_ldp;
3139                 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
3140         } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3141                 /* pack quota uid/gid */
3142                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3143                 key = (const struct dt_key *)&oti->oti_quota_id;
3144                 /* pack quota record */
3145                 rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
3146                 iam_rec = (struct iam_rec *)rec;
3147         } else {
3148                 iam_rec = (struct iam_rec *)rec;
3149         }
3150
3151         rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
3152                         iam_rec, ipd);
3153         osd_ipd_put(env, bag, ipd);
3154         LINVRNT(osd_invariant(obj));
3155         RETURN(rc);
3156 }
3157
3158 /**
3159  * Calls ldiskfs_add_entry() to add directory entry
3160  * into the directory. This is required for
3161  * interoperability mode (b11826)
3162  *
3163  * \retval   0, on success
3164  * \retval -ve, on error
3165  */
3166 static int __osd_ea_add_rec(struct osd_thread_info *info,
3167                             struct osd_object *pobj, struct inode  *cinode,
3168                             const char *name, const struct dt_rec *fid,
3169                             struct htree_lock *hlock, struct thandle *th)
3170 {
3171         struct ldiskfs_dentry_param *ldp;
3172         struct dentry               *child;
3173         struct osd_thandle          *oth;
3174         int                          rc;
3175
3176         oth = container_of(th, struct osd_thandle, ot_super);
3177         LASSERT(oth->ot_handle != NULL);
3178         LASSERT(oth->ot_handle->h_transaction != NULL);
3179
3180         child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
3181
3182         /* XXX: remove fid_is_igif() check here.
3183          * IGIF check is just to handle insertion of .. when it is 'ROOT',
3184          * it is IGIF now but needs FID in dir entry as well for readdir
3185          * to work.
3186          * LU-838 should fix that and remove fid_is_igif() check */
3187         if (fid_is_igif((struct lu_fid *)fid) ||
3188             fid_is_norm((struct lu_fid *)fid)) {
3189                 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3190                 osd_get_ldiskfs_dirent_param(ldp, fid);
3191                 child->d_fsdata = (void *)ldp;
3192         } else {
3193                 child->d_fsdata = NULL;
3194         }
3195         rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
3196
3197         RETURN(rc);
3198 }
3199
3200 /**
3201  * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
3202  * into the directory.Also sets flags into osd object to
3203  * indicate dot and dotdot are created. This is required for
3204  * interoperability mode (b11826)
3205  *
3206  * \param dir   directory for dot and dotdot fixup.
3207  * \param obj   child object for linking
3208  *
3209  * \retval   0, on success
3210  * \retval -ve, on error
3211  */
3212 static int osd_add_dot_dotdot(struct osd_thread_info *info,
3213                               struct osd_object *dir,
3214                               struct inode  *parent_dir, const char *name,
3215                               const struct dt_rec *dot_fid,
3216                               const struct dt_rec *dot_dot_fid,
3217                               struct thandle *th)
3218 {
3219         struct inode                *inode = dir->oo_inode;
3220         struct ldiskfs_dentry_param *dot_ldp;
3221         struct ldiskfs_dentry_param *dot_dot_ldp;
3222         struct osd_thandle          *oth;
3223         int result = 0;
3224
3225         oth = container_of(th, struct osd_thandle, ot_super);
3226         LASSERT(oth->ot_handle->h_transaction != NULL);
3227         LASSERT(S_ISDIR(dir->oo_inode->i_mode));
3228
3229         if (strcmp(name, dot) == 0) {
3230                 if (dir->oo_compat_dot_created) {
3231                         result = -EEXIST;
3232                 } else {
3233                         LASSERT(inode == parent_dir);
3234                         dir->oo_compat_dot_created = 1;
3235                         result = 0;
3236                 }
3237         } else if(strcmp(name, dotdot) == 0) {
3238                 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3239                 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3240
3241                 if (!dir->oo_compat_dot_created)
3242                         return -EINVAL;
3243                 if (!fid_is_igif((struct lu_fid *)dot_fid)) {
3244                         osd_get_ldiskfs_dirent_param(dot_ldp, dot_fid);
3245                         osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3246                 } else {
3247                         dot_ldp = NULL;
3248                         dot_dot_ldp = NULL;
3249                 }
3250                 /* in case of rename, dotdot is already created */
3251                 if (dir->oo_compat_dotdot_created) {
3252                         return __osd_ea_add_rec(info, dir, parent_dir, name,
3253                                                 dot_dot_fid, NULL, th);
3254                 }
3255
3256                 result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3257                                                 inode, dot_ldp, dot_dot_ldp);
3258                 if (result == 0)
3259                        dir->oo_compat_dotdot_created = 1;
3260         }
3261
3262         return result;
3263 }
3264
3265
3266 /**
3267  * It will call the appropriate osd_add* function and return the
3268  * value, return by respective functions.
3269  */
3270 static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
3271                           struct inode *cinode, const char *name,
3272                           const struct dt_rec *fid, struct thandle *th)
3273 {
3274         struct osd_thread_info *info   = osd_oti_get(env);
3275         struct htree_lock      *hlock;
3276         int                     rc;
3277
3278         hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
3279
3280         if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' &&
3281                                                    name[2] =='\0'))) {
3282                 if (hlock != NULL) {
3283                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3284                                            pobj->oo_inode, 0);
3285                 } else {
3286                         cfs_down_write(&pobj->oo_ext_idx_sem);
3287                 }
3288                 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
3289                      (struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
3290                                         fid, th);
3291         } else {
3292                 if (hlock != NULL) {
3293                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3294                                            pobj->oo_inode, LDISKFS_HLOCK_ADD);
3295                 } else {
3296                         cfs_down_write(&pobj->oo_ext_idx_sem);
3297                 }
3298
3299                 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
3300                                       hlock, th);
3301         }
3302         if (hlock != NULL)
3303                 ldiskfs_htree_unlock(hlock);
3304         else
3305                 cfs_up_write(&pobj->oo_ext_idx_sem);
3306
3307         return rc;
3308 }
3309
3310 static int
3311 osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
3312                       struct osd_idmap_cache *oic)
3313 {
3314         struct osd_scrub    *scrub = &dev->od_scrub;
3315         struct lu_fid       *fid   = &oic->oic_fid;
3316         struct osd_inode_id *id    = &oti->oti_id;
3317         int                  once  = 0;
3318         int                  rc;
3319         ENTRY;
3320
3321         if (!fid_is_norm(fid) && !fid_is_igif(fid))
3322                 RETURN(0);
3323
3324 again:
3325         rc = osd_oi_lookup(oti, dev, fid, id);
3326         if (rc != 0 && rc != -ENOENT)
3327                 RETURN(rc);
3328
3329         if (rc == 0 && osd_id_eq(id, &oic->oic_lid))
3330                 RETURN(0);
3331
3332         if (thread_is_running(&scrub->os_thread)) {
3333                 rc = osd_oii_insert(dev, oic, rc == -ENOENT);
3334                 /* There is race condition between osd_oi_lookup and OI scrub.
3335                  * The OI scrub finished just after osd_oi_lookup() failure.
3336                  * Under such case, it is unnecessary to trigger OI scrub again,
3337                  * but try to call osd_oi_lookup() again. */
3338                 if (unlikely(rc == -EAGAIN))
3339                         goto again;
3340
3341                 RETURN(rc);
3342         }
3343
3344         if (!dev->od_noscrub && ++once == 1) {
3345                 CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
3346                        PFID(fid));
3347                 rc = osd_scrub_start(dev);
3348                 LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC for "DFID
3349                                ", rc = %d [2]\n",
3350                                LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
3351                                PFID(fid), rc);
3352                 if (rc == 0)
3353                         goto again;
3354         }
3355
3356         RETURN(0);
3357 }
3358
3359 /**
3360  * Calls ->lookup() to find dentry. From dentry get inode and
3361  * read inode's ea to get fid. This is required for  interoperability
3362  * mode (b11826)
3363  *
3364  * \retval   0, on success
3365  * \retval -ve, on error
3366  */
3367 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
3368                              struct dt_rec *rec, const struct dt_key *key)
3369 {
3370         struct inode               *dir    = obj->oo_inode;
3371         struct dentry              *dentry;
3372         struct ldiskfs_dir_entry_2 *de;
3373         struct buffer_head         *bh;
3374         struct lu_fid              *fid = (struct lu_fid *) rec;
3375         struct htree_lock          *hlock = NULL;
3376         int                         ino;
3377         int                         rc;
3378
3379         LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
3380
3381         dentry = osd_child_dentry_get(env, obj,
3382                                       (char *)key, strlen((char *)key));
3383
3384         if (obj->oo_hl_head != NULL) {
3385                 hlock = osd_oti_get(env)->oti_hlock;
3386                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3387                                    dir, LDISKFS_HLOCK_LOOKUP);
3388         } else {
3389                 cfs_down_read(&obj->oo_ext_idx_sem);
3390         }
3391
3392         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
3393         if (bh) {
3394                 struct osd_thread_info *oti = osd_oti_get(env);
3395                 struct osd_idmap_cache *oic = &oti->oti_cache;
3396                 struct osd_device *dev = osd_obj2dev(obj);
3397                 struct osd_scrub *scrub = &dev->od_scrub;
3398                 struct scrub_file *sf = &scrub->os_file;
3399
3400                 ino = le32_to_cpu(de->inode);
3401                 rc = osd_get_fid_from_dentry(de, rec);
3402
3403                 /* done with de, release bh */
3404                 brelse(bh);
3405                 if (rc != 0)
3406                         rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
3407                 else
3408                         osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
3409                 if (rc != 0) {
3410                         fid_zero(&oic->oic_fid);
3411                         GOTO(out, rc);
3412                 }
3413
3414                 oic->oic_fid = *fid;
3415                 if ((scrub->os_pos_current <= ino) &&
3416                     (sf->sf_flags & SF_INCONSISTENT ||
3417                      ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
3418                                       sf->sf_oi_bitmap)))
3419                         osd_consistency_check(oti, dev, oic);
3420         } else {
3421                 rc = -ENOENT;
3422         }
3423
3424         GOTO(out, rc);
3425
3426 out:
3427         if (hlock != NULL)
3428                 ldiskfs_htree_unlock(hlock);
3429         else
3430                 cfs_up_read(&obj->oo_ext_idx_sem);
3431         return rc;
3432 }
3433
3434 /**
3435  * Find the osd object for given fid.
3436  *
3437  * \param fid need to find the osd object having this fid
3438  *
3439  * \retval osd_object on success
3440  * \retval        -ve on error
3441  */
3442 struct osd_object *osd_object_find(const struct lu_env *env,
3443                                    struct dt_object *dt,
3444                                    const struct lu_fid *fid)
3445 {
3446         struct lu_device  *ludev = dt->do_lu.lo_dev;
3447         struct osd_object *child = NULL;
3448         struct lu_object  *luch;
3449         struct lu_object  *lo;
3450
3451         /*
3452          * at this point topdev might not exist yet
3453          * (i.e. MGS is preparing profiles). so we can
3454          * not rely on topdev and instead lookup with
3455          * our device passed as topdev. this can't work
3456          * if the object isn't cached yet (as osd doesn't
3457          * allocate lu_header). IOW, the object must be
3458          * in the cache, otherwise lu_object_alloc() crashes
3459          * -bzzz
3460          */
3461         luch = lu_object_find_at(env, ludev, fid, NULL);
3462         if (!IS_ERR(luch)) {
3463                 if (lu_object_exists(luch)) {
3464                         lo = lu_object_locate(luch->lo_header, ludev->ld_type);
3465                         if (lo != NULL)
3466                                 child = osd_obj(lo);
3467                         else
3468                                 LU_OBJECT_DEBUG(D_ERROR, env, luch,
3469                                                 "lu_object can't be located"
3470                                                 DFID"\n", PFID(fid));
3471
3472                         if (child == NULL) {
3473                                 lu_object_put(env, luch);
3474                                 CERROR("Unable to get osd_object\n");
3475                                 child = ERR_PTR(-ENOENT);
3476                         }
3477                 } else {
3478                         LU_OBJECT_DEBUG(D_ERROR, env, luch,
3479                                         "lu_object does not exists "DFID"\n",
3480                                         PFID(fid));
3481                         lu_object_put(env, luch);
3482                         child = ERR_PTR(-ENOENT);
3483                 }
3484         } else
3485                 child = (void *)luch;
3486
3487         return child;
3488 }
3489
3490 /**
3491  * Put the osd object once done with it.
3492  *
3493  * \param obj osd object that needs to be put
3494  */
3495 static inline void osd_object_put(const struct lu_env *env,
3496                                   struct osd_object *obj)
3497 {
3498         lu_object_put(env, &obj->oo_dt.do_lu);
3499 }
3500
3501 static int osd_index_declare_ea_insert(const struct lu_env *env,
3502                                        struct dt_object *dt,
3503                                        const struct dt_rec *rec,
3504                                        const struct dt_key *key,
3505                                        struct thandle *handle)
3506 {
3507         struct osd_thandle *oh;
3508         struct inode       *inode;
3509         int                 rc;
3510         ENTRY;
3511
3512         LASSERT(dt_object_exists(dt));
3513         LASSERT(handle != NULL);
3514
3515         oh = container_of0(handle, struct osd_thandle, ot_super);
3516         LASSERT(oh->ot_handle == NULL);
3517
3518         OSD_DECLARE_OP(oh, insert);
3519         oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
3520
3521         inode = osd_dt_obj(dt)->oo_inode;
3522         LASSERT(inode);
3523
3524         /* We ignore block quota on meta pool (MDTs), so needn't
3525          * calculate how many blocks will be consumed by this index
3526          * insert */
3527         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
3528                                    true, true, NULL, false);
3529         RETURN(rc);
3530 }
3531
3532 /**
3533  * Index add function for interoperability mode (b11826).
3534  * It will add the directory entry.This entry is needed to
3535  * maintain name->fid mapping.
3536  *
3537  * \param key it is key i.e. file entry to be inserted
3538  * \param rec it is value of given key i.e. fid
3539  *
3540  * \retval   0, on success
3541  * \retval -ve, on error
3542  */
3543 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
3544                                const struct dt_rec *rec,
3545                                const struct dt_key *key, struct thandle *th,
3546                                struct lustre_capa *capa, int ignore_quota)
3547 {
3548         struct osd_object *obj   = osd_dt_obj(dt);
3549         struct lu_fid     *fid   = (struct lu_fid *) rec;
3550         const char        *name  = (const char *)key;
3551         struct osd_object *child;
3552         int                rc;
3553
3554         ENTRY;
3555
3556         LASSERT(osd_invariant(obj));
3557         LASSERT(dt_object_exists(dt));
3558         LASSERT(th != NULL);
3559
3560         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3561                 RETURN(-EACCES);
3562
3563         child = osd_object_find(env, dt, fid);
3564         if (!IS_ERR(child)) {
3565                 rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
3566                 osd_object_put(env, child);
3567         } else {
3568                 rc = PTR_ERR(child);
3569         }
3570
3571         LASSERT(osd_invariant(obj));
3572         RETURN(rc);
3573 }
3574
3575 /**
3576  *  Initialize osd Iterator for given osd index object.
3577  *
3578  *  \param  dt      osd index object
3579  */
3580
3581 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
3582                                      struct dt_object *dt,
3583                                      __u32 unused,
3584                                      struct lustre_capa *capa)
3585 {
3586         struct osd_it_iam      *it;
3587         struct osd_thread_info *oti = osd_oti_get(env);
3588         struct osd_object      *obj = osd_dt_obj(dt);
3589         struct lu_object       *lo  = &dt->do_lu;
3590         struct iam_path_descr  *ipd;
3591         struct iam_container   *bag = &obj->oo_dir->od_container;
3592
3593         LASSERT(lu_object_exists(lo));
3594
3595         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
3596                 return ERR_PTR(-EACCES);
3597
3598         it = &oti->oti_it;
3599         ipd = osd_it_ipd_get(env, bag);
3600         if (likely(ipd != NULL)) {
3601                 it->oi_obj = obj;
3602                 it->oi_ipd = ipd;
3603                 lu_object_get(lo);
3604                 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
3605                 return (struct dt_it *)it;
3606         }
3607         return ERR_PTR(-ENOMEM);
3608 }
3609
3610 /**
3611  * free given Iterator.
3612  */
3613
3614 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
3615 {
3616         struct osd_it_iam *it = (struct osd_it_iam *)di;
3617         struct osd_object *obj = it->oi_obj;
3618
3619         iam_it_fini(&it->oi_it);
3620         osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
3621         lu_object_put(env, &obj->oo_dt.do_lu);
3622 }
3623
3624 /**
3625  *  Move Iterator to record specified by \a key
3626  *
3627  *  \param  di      osd iterator
3628  *  \param  key     key for index
3629  *
3630  *  \retval +ve  di points to record with least key not larger than key
3631  *  \retval  0   di points to exact matched key
3632  *  \retval -ve  failure
3633  */
3634
3635 static int osd_it_iam_get(const struct lu_env *env,
3636                           struct dt_it *di, const struct dt_key *key)
3637 {
3638         struct osd_thread_info  *oti = osd_oti_get(env);
3639         struct osd_it_iam       *it = (struct osd_it_iam *)di;
3640
3641         if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
3642                 /* swab quota uid/gid */
3643                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3644                 key = (struct dt_key *)&oti->oti_quota_id;
3645         }
3646
3647         return iam_it_get(&it->oi_it, (const struct iam_key *)key);
3648 }
3649
3650 /**
3651  *  Release Iterator
3652  *
3653  *  \param  di      osd iterator
3654  */
3655
3656 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
3657 {
3658         struct osd_it_iam *it = (struct osd_it_iam *)di;
3659
3660         iam_it_put(&it->oi_it);
3661 }
3662
3663 /**
3664  *  Move iterator by one record
3665  *
3666  *  \param  di      osd iterator
3667  *
3668  *  \retval +1   end of container reached
3669  *  \retval  0   success
3670  *  \retval -ve  failure
3671  */
3672
3673 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
3674 {
3675         struct osd_it_iam *it = (struct osd_it_iam *)di;
3676
3677         return iam_it_next(&it->oi_it);
3678 }
3679
3680 /**
3681  * Return pointer to the key under iterator.
3682  */
3683
3684 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
3685                                  const struct dt_it *di)
3686 {
3687         struct osd_thread_info *oti = osd_oti_get(env);
3688         struct osd_it_iam      *it = (struct osd_it_iam *)di;
3689         struct osd_object      *obj = it->oi_obj;
3690         struct dt_key          *key;
3691
3692         key = (struct dt_key *)iam_it_key_get(&it->oi_it);
3693
3694         if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
3695                 /* swab quota uid/gid */
3696                 oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
3697                 key = (struct dt_key *)&oti->oti_quota_id;
3698         }
3699
3700         return key;
3701 }
3702
3703 /**
3704  * Return size of key under iterator (in bytes)
3705  */
3706
3707 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
3708 {
3709         struct osd_it_iam *it = (struct osd_it_iam *)di;
3710
3711         return iam_it_key_size(&it->oi_it);
3712 }
3713
3714 static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
3715                                        int len, __u16 type)
3716 {
3717         struct luda_type *lt;
3718         const unsigned    align = sizeof(struct luda_type) - 1;
3719
3720         /* check if file type is required */
3721         if (attr & LUDA_TYPE) {
3722                         len = (len + align) & ~align;
3723
3724                         lt = (void *) ent->lde_name + len;
3725                         lt->lt_type = cpu_to_le16(CFS_DTTOIF(type));
3726                         ent->lde_attrs |= LUDA_TYPE;
3727         }
3728
3729         ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
3730 }
3731
3732 /**
3733  * build lu direct from backend fs dirent.
3734  */
3735
3736 static inline void osd_it_pack_dirent(struct lu_dirent *ent,
3737                                       struct lu_fid *fid, __u64 offset,
3738                                       char *name, __u16 namelen,
3739                                       __u16 type, __u32 attr)
3740 {
3741         fid_cpu_to_le(&ent->lde_fid, fid);
3742         ent->lde_attrs = LUDA_FID;
3743
3744         ent->lde_hash = cpu_to_le64(offset);
3745         ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
3746
3747         strncpy(ent->lde_name, name, namelen);
3748         ent->lde_namelen = cpu_to_le16(namelen);
3749
3750         /* append lustre attributes */
3751         osd_it_append_attrs(ent, attr, namelen, type);
3752 }
3753
3754 /**
3755  * Return pointer to the record under iterator.
3756  */
3757 static int osd_it_iam_rec(const struct lu_env *env,
3758                           const struct dt_it *di,
3759                           struct dt_rec *dtrec, __u32 attr)
3760 {
3761         struct osd_it_iam      *it   = (struct osd_it_iam *)di;
3762         struct osd_thread_info *info = osd_oti_get(env);
3763         ENTRY;
3764
3765         if (S_ISDIR(it->oi_obj->oo_inode->i_mode)) {
3766                 const struct osd_fid_pack *rec;
3767                 struct lu_fid             *fid = &info->oti_fid;
3768                 struct lu_dirent          *lde = (struct lu_dirent *)dtrec;
3769                 char                      *name;
3770                 int                        namelen;
3771                 __u64                      hash;
3772                 int                        rc;
3773
3774                 name = (char *)iam_it_key_get(&it->oi_it);
3775                 if (IS_ERR(name))
3776                         RETURN(PTR_ERR(name));
3777
3778                 namelen = iam_it_key_size(&it->oi_it);
3779
3780                 rec = (const struct osd_fid_pack *)iam_it_rec_get(&it->oi_it);
3781                 if (IS_ERR(rec))
3782                         RETURN(PTR_ERR(rec));
3783
3784                 rc = osd_fid_unpack(fid, rec);
3785                 if (rc)
3786                         RETURN(rc);
3787
3788                 hash = iam_it_store(&it->oi_it);
3789
3790                 /* IAM does not store object type in IAM index (dir) */
3791                 osd_it_pack_dirent(lde, fid, hash, name, namelen,
3792                                    0, LUDA_FID);
3793         } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
3794                 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
3795                            (struct iam_rec *)dtrec);
3796                 osd_quota_unpack(it->oi_obj, dtrec);
3797         } else {
3798                 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
3799                            (struct iam_rec *)dtrec);
3800         }
3801
3802         RETURN(0);
3803 }
3804
3805 /**
3806  * Returns cookie for current Iterator position.
3807  */
3808 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
3809 {
3810         struct osd_it_iam *it = (struct osd_it_iam *)di;
3811
3812         return iam_it_store(&it->oi_it);
3813 }
3814
3815 /**
3816  * Restore iterator from cookie.
3817  *
3818  * \param  di      osd iterator
3819  * \param  hash    Iterator location cookie
3820  *
3821  * \retval +ve  di points to record with least key not larger than key.
3822  * \retval  0   di points to exact matched key
3823  * \retval -ve  failure
3824  */
3825
3826 static int osd_it_iam_load(const struct lu_env *env,
3827                            const struct dt_it *di, __u64 hash)
3828 {
3829         struct osd_it_iam *it = (struct osd_it_iam *)di;
3830
3831         return iam_it_load(&it->oi_it, hash);
3832 }
3833
3834 static const struct dt_index_operations osd_index_iam_ops = {
3835         .dio_lookup         = osd_index_iam_lookup,
3836         .dio_declare_insert = osd_index_declare_iam_insert,
3837         .dio_insert         = osd_index_iam_insert,
3838         .dio_declare_delete = osd_index_declare_iam_delete,
3839         .dio_delete         = osd_index_iam_delete,
3840         .dio_it     = {
3841                 .init     = osd_it_iam_init,
3842                 .fini     = osd_it_iam_fini,
3843                 .get      = osd_it_iam_get,
3844                 .put      = osd_it_iam_put,
3845                 .next     = osd_it_iam_next,
3846                 .key      = osd_it_iam_key,
3847                 .key_size = osd_it_iam_key_size,
3848                 .rec      = osd_it_iam_rec,
3849                 .store    = osd_it_iam_store,
3850                 .load     = osd_it_iam_load
3851         }
3852 };
3853
3854 /**
3855  * Creates or initializes iterator context.
3856  *
3857  * \retval struct osd_it_ea, iterator structure on success
3858  *
3859  */
3860 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
3861                                     struct dt_object *dt,
3862                                     __u32 attr,
3863                                     struct lustre_capa *capa)
3864 {
3865         struct osd_object       *obj  = osd_dt_obj(dt);
3866         struct osd_thread_info  *info = osd_oti_get(env);
3867         struct osd_it_ea        *it   = &info->oti_it_ea;
3868         struct lu_object        *lo   = &dt->do_lu;
3869         struct dentry           *obj_dentry = &info->oti_it_dentry;
3870         ENTRY;
3871         LASSERT(lu_object_exists(lo));
3872
3873         obj_dentry->d_inode = obj->oo_inode;
3874         obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
3875         obj_dentry->d_name.hash = 0;
3876
3877         it->oie_rd_dirent       = 0;
3878         it->oie_it_dirent       = 0;
3879         it->oie_dirent          = NULL;
3880         it->oie_buf             = info->oti_it_ea_buf;
3881         it->oie_obj             = obj;
3882         it->oie_file.f_pos      = 0;
3883         it->oie_file.f_dentry   = obj_dentry;
3884         if (attr & LUDA_64BITHASH)
3885                 it->oie_file.f_mode |= FMODE_64BITHASH;
3886         else
3887                 it->oie_file.f_mode |= FMODE_32BITHASH;
3888         it->oie_file.f_mapping    = obj->oo_inode->i_mapping;
3889         it->oie_file.f_op         = obj->oo_inode->i_fop;
3890         it->oie_file.private_data = NULL;
3891         lu_object_get(lo);
3892         RETURN((struct dt_it *) it);
3893 }
3894
3895 /**
3896  * Destroy or finishes iterator context.
3897  *
3898  * \param di iterator structure to be destroyed
3899  */
3900 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
3901 {
3902         struct osd_it_ea     *it   = (struct osd_it_ea *)di;
3903         struct osd_object    *obj  = it->oie_obj;
3904         struct inode       *inode  = obj->oo_inode;
3905
3906         ENTRY;
3907         it->oie_file.f_op->release(inode, &it->oie_file);
3908         lu_object_put(env, &obj->oo_dt.do_lu);
3909         EXIT;
3910 }
3911
3912 /**
3913  * It position the iterator at given key, so that next lookup continues from
3914  * that key Or it is similar to dio_it->load() but based on a key,
3915  * rather than file position.
3916  *
3917  * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
3918  * to the beginning.
3919  *
3920  * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
3921  */
3922 static int osd_it_ea_get(const struct lu_env *env,
3923                          struct dt_it *di, const struct dt_key *key)
3924 {
3925         struct osd_it_ea     *it   = (struct osd_it_ea *)di;
3926
3927         ENTRY;
3928         LASSERT(((const char *)key)[0] == '\0');
3929         it->oie_file.f_pos      = 0;
3930         it->oie_rd_dirent       = 0;
3931         it->oie_it_dirent       = 0;
3932         it->oie_dirent          = NULL;
3933
3934         RETURN(+1);
3935 }
3936
3937 /**
3938  * Does nothing
3939  */
3940 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
3941 {
3942 }
3943
3944 /**
3945  * It is called internally by ->readdir(). It fills the
3946  * iterator's in-memory data structure with required
3947  * information i.e. name, namelen, rec_size etc.
3948  *
3949  * \param buf in which information to be filled in.
3950  * \param name name of the file in given dir
3951  *
3952  * \retval 0 on success
3953  * \retval 1 on buffer full
3954  */
3955 static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
3956                                loff_t offset, __u64 ino,
3957                                unsigned d_type)
3958 {
3959         struct osd_it_ea        *it   = (struct osd_it_ea *)buf;
3960         struct osd_it_ea_dirent *ent  = it->oie_dirent;
3961         struct lu_fid           *fid  = &ent->oied_fid;
3962         struct osd_fid_pack     *rec;
3963         ENTRY;
3964
3965         /* this should never happen */
3966         if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
3967                 CERROR("ldiskfs return invalid namelen %d\n", namelen);
3968                 RETURN(-EIO);
3969         }
3970
3971         if ((void *) ent - it->oie_buf + sizeof(*ent) + namelen >
3972             OSD_IT_EA_BUFSIZE)
3973                 RETURN(1);
3974
3975         if (d_type & LDISKFS_DIRENT_LUFID) {
3976                 rec = (struct osd_fid_pack*) (name + namelen + 1);
3977
3978                 if (osd_fid_unpack(fid, rec) != 0)
3979                         fid_zero(fid);
3980
3981                 d_type &= ~LDISKFS_DIRENT_LUFID;
3982         } else {
3983                 fid_zero(fid);
3984         }
3985
3986         ent->oied_ino     = ino;
3987         ent->oied_off     = offset;
3988         ent->oied_namelen = namelen;
3989         ent->oied_type    = d_type;
3990
3991         memcpy(ent->oied_name, name, namelen);
3992
3993         it->oie_rd_dirent++;
3994         it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
3995         RETURN(0);
3996 }
3997
3998 /**
3999  * Calls ->readdir() to load a directory entry at a time
4000  * and stored it in iterator's in-memory data structure.
4001  *
4002  * \param di iterator's in memory structure
4003  *
4004  * \retval   0 on success
4005  * \retval -ve on error
4006  */
4007 static int osd_ldiskfs_it_fill(const struct lu_env *env,
4008                                const struct dt_it *di)
4009 {
4010         struct osd_it_ea   *it    = (struct osd_it_ea *)di;
4011         struct osd_object  *obj   = it->oie_obj;
4012         struct inode       *inode = obj->oo_inode;
4013         struct htree_lock  *hlock = NULL;
4014         int                 result = 0;
4015
4016         ENTRY;
4017         it->oie_dirent = it->oie_buf;
4018         it->oie_rd_dirent = 0;
4019
4020         if (obj->oo_hl_head != NULL) {
4021                 hlock = osd_oti_get(env)->oti_hlock;
4022                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
4023                                    inode, LDISKFS_HLOCK_READDIR);
4024         } else {
4025                 cfs_down_read(&obj->oo_ext_idx_sem);
4026         }
4027
4028         result = inode->i_fop->readdir(&it->oie_file, it,
4029                                        (filldir_t) osd_ldiskfs_filldir);
4030
4031         if (hlock != NULL)
4032                 ldiskfs_htree_unlock(hlock);
4033         else
4034                 cfs_up_read(&obj->oo_ext_idx_sem);
4035
4036         if (it->oie_rd_dirent == 0) {
4037                 result = -EIO;
4038         } else {
4039                 it->oie_dirent = it->oie_buf;
4040                 it->oie_it_dirent = 1;
4041         }
4042
4043         RETURN(result);
4044 }
4045
4046 /**
4047  * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4048  * to load a directory entry at a time and stored it in
4049  * iterator's in-memory data structure.
4050  *
4051  * \param di iterator's in memory structure
4052  *
4053  * \retval +ve iterator reached to end
4054  * \retval   0 iterator not reached to end
4055  * \retval -ve on error
4056  */
4057 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
4058 {
4059         struct osd_it_ea *it = (struct osd_it_ea *)di;
4060         int rc;
4061
4062         ENTRY;
4063
4064         if (it->oie_it_dirent < it->oie_rd_dirent) {
4065                 it->oie_dirent =
4066                         (void *) it->oie_dirent +
4067                         cfs_size_round(sizeof(struct osd_it_ea_dirent) +
4068                                        it->oie_dirent->oied_namelen);
4069                 it->oie_it_dirent++;
4070                 RETURN(0);
4071         } else {
4072                 if (it->oie_file.f_pos == ldiskfs_get_htree_eof(&it->oie_file))
4073                         rc = +1;
4074                 else
4075                         rc = osd_ldiskfs_it_fill(env, di);
4076         }
4077
4078         RETURN(rc);
4079 }
4080
4081 /**
4082  * Returns the key at current position from iterator's in memory structure.
4083  *
4084  * \param di iterator's in memory structure
4085  *
4086  * \retval key i.e. struct dt_key on success
4087  */
4088 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
4089                                     const struct dt_it *di)
4090 {
4091         struct osd_it_ea *it = (struct osd_it_ea *)di;
4092
4093         return (struct dt_key *)it->oie_dirent->oied_name;
4094 }
4095
4096 /**
4097  * Returns the key's size at current position from iterator's in memory structure.
4098  *
4099  * \param di iterator's in memory structure
4100  *
4101  * \retval key_size i.e. struct dt_key on success
4102  */
4103 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
4104 {
4105         struct osd_it_ea *it = (struct osd_it_ea *)di;
4106
4107         return it->oie_dirent->oied_namelen;
4108 }
4109
4110
4111 /**
4112  * Returns the value (i.e. fid/igif) at current position from iterator's
4113  * in memory structure.
4114  *
4115  * \param di struct osd_it_ea, iterator's in memory structure
4116  * \param attr attr requested for dirent.
4117  * \param lde lustre dirent
4118  *
4119  * \retval   0 no error and \param lde has correct lustre dirent.
4120  * \retval -ve on error
4121  */
4122 static inline int osd_it_ea_rec(const struct lu_env *env,
4123                                 const struct dt_it *di,
4124                                 struct dt_rec *dtrec, __u32 attr)
4125 {
4126         struct osd_it_ea       *it    = (struct osd_it_ea *)di;
4127         struct osd_object      *obj   = it->oie_obj;
4128         struct osd_device      *dev   = osd_obj2dev(obj);
4129         struct osd_scrub       *scrub = &dev->od_scrub;
4130         struct scrub_file      *sf    = &scrub->os_file;
4131         struct osd_thread_info *oti   = osd_oti_get(env);
4132         struct osd_idmap_cache *oic   = &oti->oti_cache;
4133         struct lu_fid          *fid   = &it->oie_dirent->oied_fid;
4134         struct lu_dirent       *lde   = (struct lu_dirent *)dtrec;
4135         __u32                   ino   = it->oie_dirent->oied_ino;
4136         int                     rc    = 0;
4137         ENTRY;
4138
4139         if (!fid_is_sane(fid)) {
4140                 rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
4141                 if (rc != 0)
4142                         RETURN(rc);
4143         } else {
4144                 osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
4145         }
4146
4147         osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
4148                            it->oie_dirent->oied_name,
4149                            it->oie_dirent->oied_namelen,
4150                            it->oie_dirent->oied_type, attr);
4151         oic->oic_fid = *fid;
4152         if ((scrub->os_pos_current <= ino) &&
4153             (sf->sf_flags & SF_INCONSISTENT ||
4154              ldiskfs_test_bit(osd_oi_fid2idx(dev, fid), sf->sf_oi_bitmap)))
4155                 osd_consistency_check(oti, dev, oic);
4156
4157         RETURN(rc);
4158 }
4159
4160 /**
4161  * Returns a cookie for current position of the iterator head, so that
4162  * user can use this cookie to load/start the iterator next time.
4163  *
4164  * \param di iterator's in memory structure
4165  *
4166  * \retval cookie for current position, on success
4167  */
4168 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
4169 {
4170         struct osd_it_ea *it = (struct osd_it_ea *)di;
4171
4172         return it->oie_dirent->oied_off;
4173 }
4174
4175 /**
4176  * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4177  * to load a directory entry at a time and stored it i inn,
4178  * in iterator's in-memory data structure.
4179  *
4180  * \param di struct osd_it_ea, iterator's in memory structure
4181  *
4182  * \retval +ve on success
4183  * \retval -ve on error
4184  */
4185 static int osd_it_ea_load(const struct lu_env *env,
4186                           const struct dt_it *di, __u64 hash)
4187 {
4188         struct osd_it_ea *it = (struct osd_it_ea *)di;
4189         int rc;
4190
4191         ENTRY;
4192         it->oie_file.f_pos = hash;
4193
4194         rc =  osd_ldiskfs_it_fill(env, di);
4195         if (rc == 0)
4196                 rc = +1;
4197
4198         RETURN(rc);
4199 }
4200
4201 /**
4202  * Index lookup function for interoperability mode (b11826).
4203  *
4204  * \param key,  key i.e. file name to be searched
4205  *
4206  * \retval +ve, on success
4207  * \retval -ve, on error
4208  */
4209 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
4210                                struct dt_rec *rec, const struct dt_key *key,
4211                                struct lustre_capa *capa)
4212 {
4213         struct osd_object *obj = osd_dt_obj(dt);
4214         int rc = 0;
4215
4216         ENTRY;
4217
4218         LASSERT(S_ISDIR(obj->oo_inode->i_mode));
4219         LINVRNT(osd_invariant(obj));
4220
4221         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
4222                 return -EACCES;
4223
4224         rc = osd_ea_lookup_rec(env, obj, rec, key);
4225         if (rc == 0)
4226                 rc = +1;
4227         RETURN(rc);
4228 }
4229
4230 /**
4231  * Index and Iterator operations for interoperability
4232  * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
4233  */
4234 static const struct dt_index_operations osd_index_ea_ops = {
4235         .dio_lookup         = osd_index_ea_lookup,
4236         .dio_declare_insert = osd_index_declare_ea_insert,
4237         .dio_insert         = osd_index_ea_insert,
4238         .dio_declare_delete = osd_index_declare_ea_delete,
4239         .dio_delete         = osd_index_ea_delete,
4240         .dio_it     = {
4241                 .init     = osd_it_ea_init,
4242                 .fini     = osd_it_ea_fini,
4243                 .get      = osd_it_ea_get,
4244                 .put      = osd_it_ea_put,
4245                 .next     = osd_it_ea_next,
4246                 .key      = osd_it_ea_key,
4247                 .key_size = osd_it_ea_key_size,
4248                 .rec      = osd_it_ea_rec,
4249                 .store    = osd_it_ea_store,
4250                 .load     = osd_it_ea_load
4251         }
4252 };
4253
4254 static void *osd_key_init(const struct lu_context *ctx,
4255                           struct lu_context_key *key)
4256 {
4257         struct osd_thread_info *info;
4258
4259         OBD_ALLOC_PTR(info);
4260         if (info == NULL)
4261                 return ERR_PTR(-ENOMEM);
4262
4263         OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4264         if (info->oti_it_ea_buf == NULL)
4265                 goto out_free_info;
4266
4267         info->oti_env = container_of(ctx, struct lu_env, le_ctx);
4268
4269         info->oti_hlock = ldiskfs_htree_lock_alloc();
4270         if (info->oti_hlock == NULL)
4271                 goto out_free_ea;
4272
4273         return info;
4274
4275  out_free_ea:
4276         OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4277  out_free_info:
4278         OBD_FREE_PTR(info);
4279         return ERR_PTR(-ENOMEM);
4280 }
4281
4282 static void osd_key_fini(const struct lu_context *ctx,
4283                          struct lu_context_key *key, void* data)
4284 {
4285         struct osd_thread_info *info = data;
4286
4287         if (info->oti_hlock != NULL)
4288                 ldiskfs_htree_lock_free(info->oti_hlock);
4289         OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4290         OBD_FREE_PTR(info);
4291 }
4292
4293 static void osd_key_exit(const struct lu_context *ctx,
4294                          struct lu_context_key *key, void *data)
4295 {
4296         struct osd_thread_info *info = data;
4297
4298         LASSERT(info->oti_r_locks == 0);
4299         LASSERT(info->oti_w_locks == 0);
4300         LASSERT(info->oti_txns    == 0);
4301 }
4302
4303 /* type constructor/destructor: osd_type_init, osd_type_fini */
4304 LU_TYPE_INIT_FINI(osd, &osd_key);
4305
4306 struct lu_context_key osd_key = {
4307         .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
4308         .lct_init = osd_key_init,
4309         .lct_fini = osd_key_fini,
4310         .lct_exit = osd_key_exit
4311 };
4312
4313
4314 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
4315                            const char *name, struct lu_device *next)
4316 {
4317         struct osd_device *osd = osd_dev(d);
4318
4319         strncpy(osd->od_svname, name, MAX_OBD_NAME);
4320         return osd_procfs_init(osd, name);
4321 }
4322
4323 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
4324 {
4325         ENTRY;
4326
4327         osd_scrub_cleanup(env, o);
4328
4329         if (o->od_fsops) {
4330                 fsfilt_put_ops(o->od_fsops);
4331                 o->od_fsops = NULL;
4332         }
4333
4334         /* shutdown quota slave instance associated with the device */
4335         if (o->od_quota_slave != NULL) {
4336                 qsd_fini(env, o->od_quota_slave);
4337                 o->od_quota_slave = NULL;
4338         }
4339
4340         RETURN(0);
4341 }
4342
4343 static int osd_mount(const struct lu_env *env,
4344                      struct osd_device *o, struct lustre_cfg *cfg)
4345 {
4346         const char              *name  = lustre_cfg_string(cfg, 0);
4347         const char              *dev  = lustre_cfg_string(cfg, 1);
4348         const char              *opts;
4349         unsigned long            page, s_flags, lmd_flags = 0;
4350         struct page             *__page;
4351         struct file_system_type *type;
4352         char                    *options = NULL;
4353         char                    *str;
4354         int                       rc = 0;
4355         ENTRY;
4356
4357         if (o->od_mnt != NULL)
4358                 RETURN(0);
4359
4360         o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
4361         if (o->od_fsops == NULL) {
4362                 CERROR("Can't find fsfilt_ldiskfs\n");
4363                 RETURN(-ENOTSUPP);
4364         }
4365
4366         OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
4367         if (__page == NULL)
4368                 GOTO(out, rc = -ENOMEM);
4369
4370         str = lustre_cfg_string(cfg, 2);
4371         s_flags = simple_strtoul(str, NULL, 0);
4372         str = strstr(str, ":");
4373         if (str)
4374                 lmd_flags = simple_strtoul(str + 1, NULL, 0);
4375         opts = lustre_cfg_string(cfg, 3);
4376         page = (unsigned long)cfs_page_address(__page);
4377         options = (char *)page;
4378         *options = '\0';
4379         if (opts == NULL)
4380                 strcat(options, "user_xattr,acl");
4381         else
4382                 strcat(options, opts);
4383
4384         /* Glom up mount options */
4385         if (*options != '\0')
4386                 strcat(options, ",");
4387         strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
4388
4389         type = get_fs_type("ldiskfs");
4390         if (!type) {
4391                 CERROR("%s: cannot find ldiskfs module\n", name);
4392                 GOTO(out, rc = -ENODEV);
4393         }
4394
4395         o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
4396         cfs_module_put(type->owner);
4397
4398         if (IS_ERR(o->od_mnt)) {
4399                 rc = PTR_ERR(o->od_mnt);
4400                 CERROR("%s: can't mount %s: %d\n", name, dev, rc);
4401                 o->od_mnt = NULL;
4402                 GOTO(out, rc);
4403         }
4404
4405         if (lvfs_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
4406                 CERROR("%s: underlying device %s is marked as read-only. "
4407                        "Setup failed\n", name, dev);
4408                 mntput(o->od_mnt);
4409                 o->od_mnt = NULL;
4410                 GOTO(out, rc = -EROFS);
4411         }
4412
4413         if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
4414             LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
4415                 CERROR("%s: device %s is mounted w/o journal\n", name, dev);
4416                 mntput(o->od_mnt);
4417                 o->od_mnt = NULL;
4418                 GOTO(out, rc = -EINVAL);
4419         }
4420
4421         if (lmd_flags & LMD_FLG_IAM) {
4422                 o->od_iop_mode = 0;
4423                 LCONSOLE_WARN("%s: OSD: IAM mode enabled\n", name);
4424         } else
4425                 o->od_iop_mode = 1;
4426         if (lmd_flags & LMD_FLG_NOSCRUB)
4427                 o->od_noscrub = 1;
4428
4429 out:
4430         if (__page)
4431                 OBD_PAGE_FREE(__page);
4432         if (rc)
4433                 fsfilt_put_ops(o->od_fsops);
4434
4435         RETURN(rc);
4436 }
4437
4438 static struct lu_device *osd_device_fini(const struct lu_env *env,
4439                                          struct lu_device *d)
4440 {
4441         int rc;
4442         ENTRY;
4443
4444         rc = osd_shutdown(env, osd_dev(d));
4445
4446         osd_compat_fini(osd_dev(d));
4447
4448         shrink_dcache_sb(osd_sb(osd_dev(d)));
4449         osd_sync(env, lu2dt_dev(d));
4450
4451         rc = osd_procfs_fini(osd_dev(d));
4452         if (rc) {
4453                 CERROR("proc fini error %d \n", rc);
4454                 RETURN (ERR_PTR(rc));
4455         }
4456
4457         if (osd_dev(d)->od_mnt) {
4458                 mntput(osd_dev(d)->od_mnt);
4459                 osd_dev(d)->od_mnt = NULL;
4460         }
4461
4462         RETURN(NULL);
4463 }
4464
4465 static int osd_device_init0(const struct lu_env *env,
4466                             struct osd_device *o,
4467                             struct lustre_cfg *cfg)
4468 {
4469         struct lu_device        *l = osd2lu_dev(o);
4470         struct osd_thread_info *info;
4471         int                     rc;
4472
4473         /* if the module was re-loaded, env can loose its keys */
4474         rc = lu_env_refill((struct lu_env *) env);
4475         if (rc)
4476                 GOTO(out, rc);
4477         info = osd_oti_get(env);
4478         LASSERT(info);
4479
4480         l->ld_ops = &osd_lu_ops;
4481         o->od_dt_dev.dd_ops = &osd_dt_ops;
4482
4483         cfs_spin_lock_init(&o->od_osfs_lock);
4484         cfs_mutex_init(&o->od_otable_mutex);
4485         o->od_osfs_age = cfs_time_shift_64(-1000);
4486
4487         o->od_capa_hash = init_capa_hash();
4488         if (o->od_capa_hash == NULL)
4489                 GOTO(out, rc = -ENOMEM);
4490
4491         o->od_read_cache = 1;
4492         o->od_writethrough_cache = 1;
4493         o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
4494
4495         rc = osd_mount(env, o, cfg);
4496         if (rc)
4497                 GOTO(out_capa, rc);
4498
4499         /* setup scrub, including OI files initialization */
4500         rc = osd_scrub_setup(env, o);
4501         if (rc < 0)
4502                 GOTO(out_mnt, rc);
4503
4504         strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
4505                         sizeof(o->od_svname) - 1);
4506
4507         rc = osd_compat_init(o);
4508         if (rc != 0)
4509                 GOTO(out_scrub, rc);
4510
4511         rc = lu_site_init(&o->od_site, l);
4512         if (rc)
4513                 GOTO(out_compat, rc);
4514         o->od_site.ls_bottom_dev = l;
4515
4516         rc = lu_site_init_finish(&o->od_site);
4517         if (rc)
4518                 GOTO(out_site, rc);
4519
4520         rc = osd_procfs_init(o, o->od_svname);
4521         if (rc != 0) {
4522                 CERROR("%s: can't initialize procfs: rc = %d\n",
4523                        o->od_svname, rc);
4524                 GOTO(out_site, rc);
4525         }
4526
4527         LASSERT(l->ld_site->ls_linkage.next && l->ld_site->ls_linkage.prev);
4528
4529         /* initialize quota slave instance */
4530         o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
4531                                      o->od_proc_entry);
4532         if (IS_ERR(o->od_quota_slave)) {
4533                 rc = PTR_ERR(o->od_quota_slave);
4534                 o->od_quota_slave = NULL;
4535                 GOTO(out_procfs, rc);
4536         }
4537
4538         RETURN(0);
4539 out_procfs:
4540         osd_procfs_fini(o);
4541 out_site:
4542         lu_site_fini(&o->od_site);
4543 out_compat:
4544         osd_compat_fini(o);
4545 out_scrub:
4546         osd_scrub_cleanup(env, o);
4547 out_mnt:
4548         osd_oi_fini(info, o);
4549         osd_shutdown(env, o);
4550         mntput(o->od_mnt);
4551         o->od_mnt = NULL;
4552 out_capa:
4553         cleanup_capa_hash(o->od_capa_hash);
4554 out:
4555         RETURN(rc);
4556 }
4557
4558 static struct lu_device *osd_device_alloc(const struct lu_env *env,
4559                                           struct lu_device_type *t,
4560                                           struct lustre_cfg *cfg)
4561 {
4562         struct osd_device *o;
4563         int                rc;
4564
4565         OBD_ALLOC_PTR(o);
4566         if (o == NULL)
4567                 return ERR_PTR(-ENOMEM);
4568
4569         rc = dt_device_init(&o->od_dt_dev, t);
4570         if (rc == 0) {
4571                 rc = osd_device_init0(env, o, cfg);
4572                 if (rc)
4573                         dt_device_fini(&o->od_dt_dev);
4574         }
4575
4576         if (unlikely(rc != 0))
4577                 OBD_FREE_PTR(o);
4578
4579         return rc == 0 ? osd2lu_dev(o) : ERR_PTR(rc);
4580 }
4581
4582 static struct lu_device *osd_device_free(const struct lu_env *env,
4583                                          struct lu_device *d)
4584 {
4585         struct osd_device *o = osd_dev(d);
4586         ENTRY;
4587
4588         cleanup_capa_hash(o->od_capa_hash);
4589         /* XXX: make osd top device in order to release reference */
4590         d->ld_site->ls_top_dev = d;
4591         lu_site_purge(env, d->ld_site, -1);
4592         if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
4593                 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
4594                 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
4595         }
4596         lu_site_fini(&o->od_site);
4597         dt_device_fini(&o->od_dt_dev);
4598         OBD_FREE_PTR(o);
4599         RETURN(NULL);
4600 }
4601
4602 static int osd_process_config(const struct lu_env *env,
4603                               struct lu_device *d, struct lustre_cfg *cfg)
4604 {
4605         struct osd_device *o = osd_dev(d);
4606         int err;
4607         ENTRY;
4608
4609         switch(cfg->lcfg_command) {
4610         case LCFG_SETUP:
4611                 err = osd_mount(env, o, cfg);
4612                 break;
4613         case LCFG_CLEANUP:
4614                 lu_dev_del_linkage(d->ld_site, d);
4615                 err = osd_shutdown(env, o);
4616                 break;
4617         default:
4618                 err = -ENOSYS;
4619         }
4620
4621         RETURN(err);
4622 }
4623
4624 static int osd_recovery_complete(const struct lu_env *env,
4625                                  struct lu_device *d)
4626 {
4627         RETURN(0);
4628 }
4629
4630 /*
4631  * we use exports to track all osd users
4632  */
4633 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
4634                            struct obd_device *obd, struct obd_uuid *cluuid,
4635                            struct obd_connect_data *data, void *localdata)
4636 {
4637         struct osd_device    *osd = osd_dev(obd->obd_lu_dev);
4638         struct lustre_handle  conn;
4639         int                   rc;
4640         ENTRY;
4641
4642         CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
4643
4644         rc = class_connect(&conn, obd, cluuid);
4645         if (rc)
4646                 RETURN(rc);
4647
4648         *exp = class_conn2export(&conn);
4649
4650         cfs_spin_lock(&osd->od_osfs_lock);
4651         osd->od_connects++;
4652         cfs_spin_unlock(&osd->od_osfs_lock);
4653
4654         RETURN(0);
4655 }
4656
4657 /*
4658  * once last export (we don't count self-export) disappeared
4659  * osd can be released
4660  */
4661 static int osd_obd_disconnect(struct obd_export *exp)
4662 {
4663         struct obd_device *obd = exp->exp_obd;
4664         struct osd_device *osd = osd_dev(obd->obd_lu_dev);
4665         int                rc, release = 0;
4666         ENTRY;
4667
4668         /* Only disconnect the underlying layers on the final disconnect. */
4669         cfs_spin_lock(&osd->od_osfs_lock);
4670         osd->od_connects--;
4671         if (osd->od_connects == 0)
4672                 release = 1;
4673         cfs_spin_unlock(&osd->od_osfs_lock);
4674
4675         rc = class_disconnect(exp); /* bz 9811 */
4676
4677         if (rc == 0 && release)
4678                 class_manual_cleanup(obd);
4679         RETURN(rc);
4680 }
4681
4682 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
4683                        struct lu_device *dev)
4684 {
4685         struct osd_device *osd = osd_dev(dev);
4686         int                result = 0;
4687         ENTRY;
4688
4689         if (dev->ld_site && lu_device_is_md(dev->ld_site->ls_top_dev)) {
4690                 /* MDT/MDD still use old infrastructure to create
4691                  * special files */
4692                 result = llo_local_objects_setup(env, lu2md_dev(pdev),
4693                                                  lu2dt_dev(dev));
4694                 if (result)
4695                         RETURN(result);
4696         }
4697
4698         if (osd->od_quota_slave != NULL)
4699                 /* set up quota slave objects */
4700                 result = qsd_prepare(env, osd->od_quota_slave);
4701
4702         RETURN(result);
4703 }
4704
4705 static const struct lu_object_operations osd_lu_obj_ops = {
4706         .loo_object_init      = osd_object_init,
4707         .loo_object_delete    = osd_object_delete,
4708         .loo_object_release   = osd_object_release,
4709         .loo_object_free      = osd_object_free,
4710         .loo_object_print     = osd_object_print,
4711         .loo_object_invariant = osd_object_invariant
4712 };
4713
4714 const struct lu_device_operations osd_lu_ops = {
4715         .ldo_object_alloc      = osd_object_alloc,
4716         .ldo_process_config    = osd_process_config,
4717         .ldo_recovery_complete = osd_recovery_complete,
4718         .ldo_prepare           = osd_prepare,
4719 };
4720
4721 static const struct lu_device_type_operations osd_device_type_ops = {
4722         .ldto_init = osd_type_init,
4723         .ldto_fini = osd_type_fini,
4724
4725         .ldto_start = osd_type_start,
4726         .ldto_stop  = osd_type_stop,
4727
4728         .ldto_device_alloc = osd_device_alloc,
4729         .ldto_device_free  = osd_device_free,
4730
4731         .ldto_device_init    = osd_device_init,
4732         .ldto_device_fini    = osd_device_fini
4733 };
4734
4735 struct lu_device_type osd_device_type = {
4736         .ldt_tags     = LU_DEVICE_DT,
4737         .ldt_name     = LUSTRE_OSD_LDISKFS_NAME,
4738         .ldt_ops      = &osd_device_type_ops,
4739         .ldt_ctx_tags = LCT_LOCAL,
4740 };
4741
4742 /*
4743  * lprocfs legacy support.
4744  */
4745 static struct obd_ops osd_obd_device_ops = {
4746         .o_owner = THIS_MODULE,
4747         .o_connect      = osd_obd_connect,
4748         .o_disconnect   = osd_obd_disconnect
4749 };
4750
4751 static int __init osd_mod_init(void)
4752 {
4753         struct lprocfs_static_vars lvars;
4754
4755         osd_oi_mod_init();
4756         lprocfs_osd_init_vars(&lvars);
4757         return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
4758                                    LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
4759 }
4760
4761 static void __exit osd_mod_exit(void)
4762 {
4763         class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
4764 }
4765
4766 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4767 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
4768 MODULE_LICENSE("GPL");
4769
4770 cfs_module(osd, "0.1.0", osd_mod_init, osd_mod_exit);