Whamcloud - gitweb
LU-2441 mdd: Implement volatile file
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_handler.c
37  *
38  * Top-level entry points into osd module
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
42  */
43
44 #define DEBUG_SUBSYSTEM S_MDS
45
46 #include <linux/module.h>
47
48 /* LUSTRE_VERSION_CODE */
49 #include <lustre_ver.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/types.h>
52 /* prerequisite for linux/xattr.h */
53 #include <linux/fs.h>
54 /* XATTR_{REPLACE,CREATE} */
55 #include <linux/xattr.h>
56 /* simple_mkdir() */
57 #include <lvfs.h>
58
59 /*
60  * struct OBD_{ALLOC,FREE}*()
61  * OBD_FAIL_CHECK
62  */
63 #include <obd_support.h>
64 /* struct ptlrpc_thread */
65 #include <lustre_net.h>
66
67 /* fid_is_local() */
68 #include <lustre_fid.h>
69
70 #include "osd_internal.h"
71 #include "osd_igif.h"
72
73 /* llo_* api support */
74 #include <md_object.h>
75 #include <lustre_quota.h>
76
77 int ldiskfs_pdo = 1;
78 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
79                 "ldiskfs with parallel directory operations");
80
81 static const char dot[] = ".";
82 static const char dotdot[] = "..";
83 static const char remote_obj_dir[] = "REM_OBJ_DIR";
84
85 static const struct lu_object_operations      osd_lu_obj_ops;
86 static const struct dt_object_operations      osd_obj_ops;
87 static const struct dt_object_operations      osd_obj_ea_ops;
88 static const struct dt_object_operations      osd_obj_otable_it_ops;
89 static const struct dt_index_operations       osd_index_iam_ops;
90 static const struct dt_index_operations       osd_index_ea_ops;
91
92 static int osd_has_index(const struct osd_object *obj)
93 {
94         return obj->oo_dt.do_index_ops != NULL;
95 }
96
97 static int osd_object_invariant(const struct lu_object *l)
98 {
99         return osd_invariant(osd_obj(l));
100 }
101
102 /*
103  * Concurrency: doesn't matter
104  */
105 static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
106 {
107         return osd_oti_get(env)->oti_r_locks > 0;
108 }
109
110 /*
111  * Concurrency: doesn't matter
112  */
113 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
114 {
115         struct osd_thread_info *oti = osd_oti_get(env);
116         return oti->oti_w_locks > 0 && o->oo_owner == env;
117 }
118
119 /*
120  * Concurrency: doesn't access mutable data
121  */
122 static int osd_root_get(const struct lu_env *env,
123                         struct dt_device *dev, struct lu_fid *f)
124 {
125         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
126         return 0;
127 }
128
129 /*
130  * OSD object methods.
131  */
132
133 /*
134  * Concurrency: no concurrent access is possible that early in object
135  * life-cycle.
136  */
137 static struct lu_object *osd_object_alloc(const struct lu_env *env,
138                                           const struct lu_object_header *hdr,
139                                           struct lu_device *d)
140 {
141         struct osd_object *mo;
142
143         OBD_ALLOC_PTR(mo);
144         if (mo != NULL) {
145                 struct lu_object *l;
146
147                 l = &mo->oo_dt.do_lu;
148                 dt_object_init(&mo->oo_dt, NULL, d);
149                 mo->oo_dt.do_ops = &osd_obj_ea_ops;
150                 l->lo_ops = &osd_lu_obj_ops;
151                 init_rwsem(&mo->oo_sem);
152                 init_rwsem(&mo->oo_ext_idx_sem);
153                 spin_lock_init(&mo->oo_guard);
154                 return l;
155         } else {
156                 return NULL;
157         }
158 }
159
160 static int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
161                        struct dentry *dentry, struct lustre_mdt_attrs *lma)
162 {
163         int rc;
164
165         dentry->d_inode = inode;
166         rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA, (void *)lma,
167                                    sizeof(*lma));
168         if (rc == -ERANGE) {
169                 /* try with old lma size */
170                 rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA,
171                                            info->oti_mdt_attrs_old,
172                                            LMA_OLD_SIZE);
173                 if (rc > 0)
174                         memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
175         }
176         if (rc > 0) {
177                 /* Check LMA compatibility */
178                 if (lma->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP)) {
179                         CWARN("%.16s: unsupported incompat LMA feature(s) "
180                               "%lx/%#x\n",
181                               LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
182                               inode->i_ino, le32_to_cpu(lma->lma_incompat) &
183                                                         ~LMA_INCOMPAT_SUPP);
184                         rc = -ENOSYS;
185                 } else {
186                         lustre_lma_swab(lma);
187                         rc = 0;
188                 }
189         } else if (rc == 0) {
190                 rc = -ENODATA;
191         }
192
193         return rc;
194 }
195
196 /*
197  * retrieve object from backend ext fs.
198  **/
199 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
200                        struct osd_inode_id *id)
201 {
202         struct inode *inode = NULL;
203
204         inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
205         if (IS_ERR(inode)) {
206                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
207                        id->oii_ino, PTR_ERR(inode));
208         } else if (id->oii_gen != OSD_OII_NOGEN &&
209                    inode->i_generation != id->oii_gen) {
210                 CDEBUG(D_INODE, "unmatched inode: ino = %u, gen0 = %u, "
211                        "gen1 = %u\n",
212                        id->oii_ino, id->oii_gen, inode->i_generation);
213                 iput(inode);
214                 inode = ERR_PTR(-ESTALE);
215         } else if (inode->i_nlink == 0) {
216                 /* due to parallel readdir and unlink,
217                 * we can have dead inode here. */
218                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
219                 make_bad_inode(inode);
220                 iput(inode);
221                 inode = ERR_PTR(-ESTALE);
222         } else if (is_bad_inode(inode)) {
223                 CWARN("%.16s: bad inode: ino = %u\n",
224                 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
225                 iput(inode);
226                 inode = ERR_PTR(-ENOENT);
227         } else {
228                 if (id->oii_gen == OSD_OII_NOGEN)
229                         osd_id_gen(id, inode->i_ino, inode->i_generation);
230
231                 /* Do not update file c/mtime in ldiskfs.
232                  * NB: we don't have any lock to protect this because we don't
233                  * have reference on osd_object now, but contention with
234                  * another lookup + attr_set can't happen in the tiny window
235                  * between if (...) and set S_NOCMTIME. */
236                 if (!(inode->i_flags & S_NOCMTIME))
237                         inode->i_flags |= S_NOCMTIME;
238         }
239         return inode;
240 }
241
242 struct inode *osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
243                            struct osd_inode_id *id, struct lu_fid *fid)
244 {
245         struct lustre_mdt_attrs *lma   = &info->oti_mdt_attrs;
246         struct inode            *inode;
247         int                      rc;
248
249         inode = osd_iget(info, dev, id);
250         if (IS_ERR(inode))
251                 return inode;
252
253         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
254         if (rc == 0) {
255                 *fid = lma->lma_self_fid;
256         } else if (rc == -ENODATA) {
257                 LU_IGIF_BUILD(fid, inode->i_ino, inode->i_generation);
258         } else {
259                 iput(inode);
260                 inode = ERR_PTR(rc);
261         }
262         return inode;
263 }
264
265 static struct inode *
266 osd_iget_verify(struct osd_thread_info *info, struct osd_device *dev,
267                 struct osd_inode_id *id, const struct lu_fid *fid)
268 {
269         struct lustre_mdt_attrs *lma   = &info->oti_mdt_attrs;
270         struct inode            *inode;
271         int                      rc;
272
273         inode = osd_iget(info, dev, id);
274         if (IS_ERR(inode))
275                 return inode;
276
277         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
278         if (rc == -ENODATA)
279                 return inode;
280
281         if (rc != 0) {
282                 iput(inode);
283                 return ERR_PTR(rc);
284         }
285
286         if (!lu_fid_eq(fid, &lma->lma_self_fid)) {
287                 CDEBUG(D_LFSCK, "inconsistent obj: "DFID", %lu, "DFID"\n",
288                        PFID(&lma->lma_self_fid), inode->i_ino, PFID(fid));
289                 iput(inode);
290                 return ERR_PTR(-EREMCHG);
291         }
292
293         return inode;
294 }
295
296 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
297                           const struct lu_fid *fid,
298                           const struct lu_object_conf *conf)
299 {
300         struct osd_thread_info *info;
301         struct lu_device       *ldev   = obj->oo_dt.do_lu.lo_dev;
302         struct osd_device      *dev;
303         struct osd_idmap_cache *oic;
304         struct osd_inode_id    *id;
305         struct inode           *inode;
306         struct osd_scrub       *scrub;
307         struct scrub_file      *sf;
308         int                     result;
309         int                     verify = 0;
310         ENTRY;
311
312         LINVRNT(osd_invariant(obj));
313         LASSERT(obj->oo_inode == NULL);
314         LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID, PFID(fid));
315
316         dev = osd_dev(ldev);
317         scrub = &dev->od_scrub;
318         sf = &scrub->os_file;
319         info = osd_oti_get(env);
320         LASSERT(info);
321         oic = &info->oti_cache;
322
323         if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
324                 RETURN(-ENOENT);
325
326         /* Search order: 1. per-thread cache. */
327         if (lu_fid_eq(fid, &oic->oic_fid)) {
328                 id = &oic->oic_lid;
329                 goto iget;
330         }
331
332         id = &info->oti_id;
333         if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
334                 /* Search order: 2. OI scrub pending list. */
335                 result = osd_oii_lookup(dev, fid, id);
336                 if (result == 0)
337                         goto iget;
338         }
339
340         if (sf->sf_flags & SF_INCONSISTENT)
341                 verify = 1;
342
343         /*
344          * Objects are created as locking anchors or place holders for objects
345          * yet to be created. No need to osd_oi_lookup() at here because FID
346          * shouldn't never be re-used, if it's really a duplicate FID from
347          * unexpected reason, we should be able to detect it later by calling
348          * do_create->osd_oi_insert()
349          */
350         if (conf != NULL && (conf->loc_flags & LOC_F_NEW) != 0)
351                 GOTO(out, result = 0);
352
353         /* Search order: 3. OI files. */
354         result = osd_oi_lookup(info, dev, fid, id);
355         if (result == -ENOENT) {
356                 if (!fid_is_norm(fid) ||
357                     !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
358                                       sf->sf_oi_bitmap))
359                         GOTO(out, result = 0);
360
361                 goto trigger;
362         }
363
364         if (result != 0)
365                 GOTO(out, result);
366
367 iget:
368         if (verify == 0)
369                 inode = osd_iget(info, dev, id);
370         else
371                 inode = osd_iget_verify(info, dev, id, fid);
372         if (IS_ERR(inode)) {
373                 result = PTR_ERR(inode);
374                 if (result == -ENOENT || result == -ESTALE) {
375                         fid_zero(&oic->oic_fid);
376                         result = 0;
377                 } else if (result == -EREMCHG) {
378
379 trigger:
380                         if (thread_is_running(&scrub->os_thread)) {
381                                 result = -EINPROGRESS;
382                         } else if (!dev->od_noscrub) {
383                                 result = osd_scrub_start(dev);
384                                 LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
385                                                "for "DFID", rc = %d [1]\n",
386                                                LDISKFS_SB(osd_sb(dev))->s_es->\
387                                                s_volume_name,PFID(fid), result);
388                                 if (result == 0 || result == -EALREADY)
389                                         result = -EINPROGRESS;
390                                 else
391                                         result = -EREMCHG;
392                         }
393                 }
394
395                 GOTO(out, result);
396         }
397
398         obj->oo_inode = inode;
399         LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
400
401         obj->oo_compat_dot_created = 1;
402         obj->oo_compat_dotdot_created = 1;
403
404         if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
405                 GOTO(out, result = 0);
406
407         LASSERT(obj->oo_hl_head == NULL);
408         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
409         if (obj->oo_hl_head == NULL) {
410                 obj->oo_inode = NULL;
411                 iput(inode);
412                 GOTO(out, result = -ENOMEM);
413         }
414         GOTO(out, result = 0);
415
416 out:
417         LINVRNT(osd_invariant(obj));
418         return result;
419 }
420
421 /*
422  * Concurrency: shouldn't matter.
423  */
424 static void osd_object_init0(struct osd_object *obj)
425 {
426         LASSERT(obj->oo_inode != NULL);
427         obj->oo_dt.do_body_ops = &osd_body_ops;
428         obj->oo_dt.do_lu.lo_header->loh_attr |=
429                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
430 }
431
432 /*
433  * Concurrency: no concurrent access is possible that early in object
434  * life-cycle.
435  */
436 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
437                            const struct lu_object_conf *conf)
438 {
439         struct osd_object *obj = osd_obj(l);
440         int result;
441
442         LINVRNT(osd_invariant(obj));
443
444         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
445         obj->oo_dt.do_body_ops = &osd_body_ops_new;
446         if (result == 0) {
447                 if (obj->oo_inode != NULL) {
448                         osd_object_init0(obj);
449                 } else if (fid_is_otable_it(&l->lo_header->loh_fid)) {
450                         obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
451                         /* LFSCK iterator object is special without inode */
452                         l->lo_header->loh_attr |= LOHA_EXISTS;
453                 }
454         }
455         LINVRNT(osd_invariant(obj));
456         return result;
457 }
458
459 /*
460  * Concurrency: no concurrent access is possible that late in object
461  * life-cycle.
462  */
463 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
464 {
465         struct osd_object *obj = osd_obj(l);
466
467         LINVRNT(osd_invariant(obj));
468
469         dt_object_fini(&obj->oo_dt);
470         if (obj->oo_hl_head != NULL)
471                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
472         OBD_FREE_PTR(obj);
473 }
474
475 /*
476  * Concurrency: no concurrent access is possible that late in object
477  * life-cycle.
478  */
479 static void osd_index_fini(struct osd_object *o)
480 {
481         struct iam_container *bag;
482
483         if (o->oo_dir != NULL) {
484                 bag = &o->oo_dir->od_container;
485                 if (o->oo_inode != NULL) {
486                         if (bag->ic_object == o->oo_inode)
487                                 iam_container_fini(bag);
488                 }
489                 OBD_FREE_PTR(o->oo_dir);
490                 o->oo_dir = NULL;
491         }
492 }
493
494 /*
495  * Concurrency: no concurrent access is possible that late in object
496  * life-cycle (for all existing callers, that is. New callers have to provide
497  * their own locking.)
498  */
499 static int osd_inode_unlinked(const struct inode *inode)
500 {
501         return inode->i_nlink == 0;
502 }
503
504 enum {
505         OSD_TXN_OI_DELETE_CREDITS    = 20,
506         OSD_TXN_INODE_DELETE_CREDITS = 20
507 };
508
509 /*
510  * Journal
511  */
512
513 #if OSD_THANDLE_STATS
514 /**
515  * Set time when the handle is allocated
516  */
517 static void osd_th_alloced(struct osd_thandle *oth)
518 {
519         oth->oth_alloced = cfs_time_current();
520 }
521
522 /**
523  * Set time when the handle started
524  */
525 static void osd_th_started(struct osd_thandle *oth)
526 {
527         oth->oth_started = cfs_time_current();
528 }
529
530 /**
531  * Helper function to convert time interval to microseconds packed in
532  * long int.
533  */
534 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
535 {
536         struct timeval val;
537
538         cfs_duration_usec(cfs_time_sub(end, start), &val);
539         return val.tv_sec * 1000000 + val.tv_usec;
540 }
541
542 /**
543  * Check whether the we deal with this handle for too long.
544  */
545 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
546                                 cfs_time_t alloced, cfs_time_t started,
547                                 cfs_time_t closed)
548 {
549         cfs_time_t now = cfs_time_current();
550
551         LASSERT(dev != NULL);
552
553         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
554                             interval_to_usec(alloced, started));
555         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
556                             interval_to_usec(started, closed));
557         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
558                             interval_to_usec(closed, now));
559
560         if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
561                 CWARN("transaction handle %p was open for too long: "
562                       "now "CFS_TIME_T" ,"
563                       "alloced "CFS_TIME_T" ,"
564                       "started "CFS_TIME_T" ,"
565                       "closed "CFS_TIME_T"\n",
566                       oth, now, alloced, started, closed);
567                 libcfs_debug_dumpstack(NULL);
568         }
569 }
570
571 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
572 {                                                                       \
573         cfs_time_t __closed = cfs_time_current();                       \
574         cfs_time_t __alloced = oth->oth_alloced;                        \
575         cfs_time_t __started = oth->oth_started;                        \
576                                                                         \
577         expr;                                                           \
578         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
579 }
580
581 #else /* OSD_THANDLE_STATS */
582
583 #define osd_th_alloced(h)                  do {} while(0)
584 #define osd_th_started(h)                  do {} while(0)
585 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
586
587 #endif /* OSD_THANDLE_STATS */
588
589 /*
590  * Concurrency: doesn't access mutable data.
591  */
592 static int osd_param_is_not_sane(const struct osd_device *dev,
593                                  const struct thandle *th)
594 {
595         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
596
597         return oh->ot_credits > osd_journal(dev)->j_max_transaction_buffers;
598 }
599
600 /*
601  * Concurrency: shouldn't matter.
602  */
603 #ifdef HAVE_LDISKFS_JOURNAL_CALLBACK_ADD
604 static void osd_trans_commit_cb(struct super_block *sb,
605                                 struct journal_callback *jcb, int error)
606 #else
607 static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
608 #endif
609 {
610         struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
611         struct thandle     *th  = &oh->ot_super;
612         struct lu_device   *lud = &th->th_dev->dd_lu_dev;
613         struct dt_txn_commit_cb *dcb, *tmp;
614
615         LASSERT(oh->ot_handle == NULL);
616
617         if (error)
618                 CERROR("transaction @0x%p commit error: %d\n", th, error);
619
620         dt_txn_hook_commit(th);
621
622         /* call per-transaction callbacks if any */
623         cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
624                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
625                          "commit callback entry: magic=%x name='%s'\n",
626                          dcb->dcb_magic, dcb->dcb_name);
627                 cfs_list_del_init(&dcb->dcb_linkage);
628                 dcb->dcb_func(NULL, th, dcb, error);
629         }
630
631         lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
632         lu_device_put(lud);
633         th->th_dev = NULL;
634
635         lu_context_exit(&th->th_ctx);
636         lu_context_fini(&th->th_ctx);
637         OBD_FREE_PTR(oh);
638 }
639
640 static struct thandle *osd_trans_create(const struct lu_env *env,
641                                         struct dt_device *d)
642 {
643         struct osd_thread_info *oti = osd_oti_get(env);
644         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
645         struct osd_thandle     *oh;
646         struct thandle         *th;
647         ENTRY;
648
649         /* on pending IO in this thread should left from prev. request */
650         LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
651
652         th = ERR_PTR(-ENOMEM);
653         OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
654         if (oh != NULL) {
655                 oh->ot_quota_trans = &oti->oti_quota_trans;
656                 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
657                 th = &oh->ot_super;
658                 th->th_dev = d;
659                 th->th_result = 0;
660                 th->th_tags = LCT_TX_HANDLE;
661                 oh->ot_credits = 0;
662                 oti->oti_dev = osd_dt_dev(d);
663                 CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
664                 osd_th_alloced(oh);
665         }
666         RETURN(th);
667 }
668
669 /*
670  * Concurrency: shouldn't matter.
671  */
672 int osd_trans_start(const struct lu_env *env, struct dt_device *d,
673                     struct thandle *th)
674 {
675         struct osd_thread_info *oti = osd_oti_get(env);
676         struct osd_device  *dev = osd_dt_dev(d);
677         handle_t           *jh;
678         struct osd_thandle *oh;
679         int rc;
680
681         ENTRY;
682
683         LASSERT(current->journal_info == NULL);
684
685         oh = container_of0(th, struct osd_thandle, ot_super);
686         LASSERT(oh != NULL);
687         LASSERT(oh->ot_handle == NULL);
688
689         rc = dt_txn_hook_start(env, d, th);
690         if (rc != 0)
691                 GOTO(out, rc);
692
693         if (unlikely(osd_param_is_not_sane(dev, th))) {
694                 static unsigned long last_printed;
695                 static int last_credits;
696
697                 CWARN("%.16s: too many transaction credits (%d > %d)\n",
698                       LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
699                       oh->ot_credits,
700                       osd_journal(dev)->j_max_transaction_buffers);
701 #ifdef OSD_TRACK_DECLARES
702                 CWARN("  create: %u/%u, delete: %u/%u, destroy: %u/%u\n",
703                       oh->ot_declare_create, oh->ot_declare_create_cred,
704                       oh->ot_declare_delete, oh->ot_declare_delete_cred,
705                       oh->ot_declare_destroy, oh->ot_declare_destroy_cred);
706                 CWARN("  attr_set: %u/%u, xattr_set: %u/%u\n",
707                       oh->ot_declare_attr_set, oh->ot_declare_attr_set_cred,
708                       oh->ot_declare_xattr_set, oh->ot_declare_xattr_set_cred);
709                 CWARN("  write: %u/%u, punch: %u/%u, quota %u/%u\n",
710                       oh->ot_declare_write, oh->ot_declare_write_cred,
711                       oh->ot_declare_punch, oh->ot_declare_punch_cred,
712                       oh->ot_declare_quota, oh->ot_declare_quota_cred);
713                 CWARN("  insert: %u/%u, delete: %u/%u\n",
714                       oh->ot_declare_insert, oh->ot_declare_insert_cred,
715                       oh->ot_declare_delete, oh->ot_declare_destroy_cred);
716                 CWARN("  ref_add: %u/%u, ref_del: %u/%u\n",
717                       oh->ot_declare_ref_add, oh->ot_declare_ref_add_cred,
718                       oh->ot_declare_ref_del, oh->ot_declare_ref_del_cred);
719
720                 if (last_credits != oh->ot_credits &&
721                     time_after(jiffies, last_printed + 60 * HZ)) {
722                         libcfs_debug_dumpstack(NULL);
723                         last_credits = oh->ot_credits;
724                         last_printed = jiffies;
725                 }
726 #endif
727                 /* XXX Limit the credits to 'max_transaction_buffers', and
728                  *     let the underlying filesystem to catch the error if
729                  *     we really need so many credits.
730                  *
731                  *     This should be removed when we can calculate the
732                  *     credits precisely. */
733                 oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
734         }
735
736         /*
737          * XXX temporary stuff. Some abstraction layer should
738          * be used.
739          */
740         jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
741         osd_th_started(oh);
742         if (!IS_ERR(jh)) {
743                 oh->ot_handle = jh;
744                 LASSERT(oti->oti_txns == 0);
745                 lu_context_init(&th->th_ctx, th->th_tags);
746                 lu_context_enter(&th->th_ctx);
747
748                 lu_device_get(&d->dd_lu_dev);
749                 oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
750                                              "osd-tx", th);
751                 oti->oti_txns++;
752                 rc = 0;
753         } else {
754                 rc = PTR_ERR(jh);
755         }
756 out:
757         RETURN(rc);
758 }
759
760 /*
761  * Concurrency: shouldn't matter.
762  */
763 static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
764 {
765         int                     rc = 0;
766         struct osd_thandle     *oh;
767         struct osd_thread_info *oti = osd_oti_get(env);
768         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
769         struct qsd_instance    *qsd = oti->oti_dev->od_quota_slave;
770         ENTRY;
771
772         oh = container_of0(th, struct osd_thandle, ot_super);
773
774         if (qsd != NULL)
775                 /* inform the quota slave device that the transaction is
776                  * stopping */
777                 qsd_op_end(env, qsd, oh->ot_quota_trans);
778         oh->ot_quota_trans = NULL;
779
780         if (oh->ot_handle != NULL) {
781                 handle_t *hdl = oh->ot_handle;
782
783                 /*
784                  * add commit callback
785                  * notice we don't do this in osd_trans_start()
786                  * as underlying transaction can change during truncate
787                  */
788                 osd_journal_callback_set(hdl, osd_trans_commit_cb,
789                                          &oh->ot_jcb);
790
791                 LASSERT(oti->oti_txns == 1);
792                 oti->oti_txns--;
793                 rc = dt_txn_hook_stop(env, th);
794                 if (rc != 0)
795                         CERROR("Failure in transaction hook: %d\n", rc);
796
797                 /* hook functions might modify th_sync */
798                 hdl->h_sync = th->th_sync;
799
800                 oh->ot_handle = NULL;
801                 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
802                                   rc = ldiskfs_journal_stop(hdl));
803                 if (rc != 0)
804                         CERROR("Failure to stop transaction: %d\n", rc);
805         } else {
806                 OBD_FREE_PTR(oh);
807         }
808
809         /* as we want IO to journal and data IO be concurrent, we don't block
810          * awaiting data IO completion in osd_do_bio(), instead we wait here
811          * once transaction is submitted to the journal. all reqular requests
812          * don't do direct IO (except read/write), thus this wait_event becomes
813          * no-op for them.
814          *
815          * IMPORTANT: we have to wait till any IO submited by the thread is
816          * completed otherwise iobuf may be corrupted by different request
817          */
818         cfs_wait_event(iobuf->dr_wait,
819                        cfs_atomic_read(&iobuf->dr_numreqs) == 0);
820         if (!rc)
821                 rc = iobuf->dr_error;
822
823         RETURN(rc);
824 }
825
826 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
827 {
828         struct osd_thandle *oh = container_of0(th, struct osd_thandle,
829                                                ot_super);
830
831         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
832         LASSERT(&dcb->dcb_func != NULL);
833         cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
834
835         return 0;
836 }
837
838 /*
839  * Called just before object is freed. Releases all resources except for
840  * object itself (that is released by osd_object_free()).
841  *
842  * Concurrency: no concurrent access is possible that late in object
843  * life-cycle.
844  */
845 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
846 {
847         struct osd_object *obj   = osd_obj(l);
848         struct inode      *inode = obj->oo_inode;
849
850         LINVRNT(osd_invariant(obj));
851
852         /*
853          * If object is unlinked remove fid->ino mapping from object index.
854          */
855
856         osd_index_fini(obj);
857         if (inode != NULL) {
858                 struct qsd_instance     *qsd = osd_obj2dev(obj)->od_quota_slave;
859                 qid_t                    uid = inode->i_uid;
860                 qid_t                    gid = inode->i_gid;
861
862                 iput(inode);
863                 obj->oo_inode = NULL;
864
865                 if (qsd != NULL) {
866                         struct osd_thread_info  *info = osd_oti_get(env);
867                         struct lquota_id_info   *qi = &info->oti_qi;
868
869                         /* Release granted quota to master if necessary */
870                         qi->lqi_id.qid_uid = uid;
871                         qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
872
873                         qi->lqi_id.qid_uid = gid;
874                         qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
875                 }
876         }
877 }
878
879 /*
880  * Concurrency: ->loo_object_release() is called under site spin-lock.
881  */
882 static void osd_object_release(const struct lu_env *env,
883                                struct lu_object *l)
884 {
885 }
886
887 /*
888  * Concurrency: shouldn't matter.
889  */
890 static int osd_object_print(const struct lu_env *env, void *cookie,
891                             lu_printer_t p, const struct lu_object *l)
892 {
893         struct osd_object *o = osd_obj(l);
894         struct iam_descr  *d;
895
896         if (o->oo_dir != NULL)
897                 d = o->oo_dir->od_container.ic_descr;
898         else
899                 d = NULL;
900         return (*p)(env, cookie,
901                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
902                     o, o->oo_inode,
903                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
904                     o->oo_inode ? o->oo_inode->i_generation : 0,
905                     d ? d->id_ops->id_name : "plain");
906 }
907
908 /*
909  * Concurrency: shouldn't matter.
910  */
911 int osd_statfs(const struct lu_env *env, struct dt_device *d,
912                struct obd_statfs *sfs)
913 {
914         struct osd_device  *osd = osd_dt_dev(d);
915         struct super_block *sb = osd_sb(osd);
916         struct kstatfs     *ksfs;
917         int result = 0;
918
919         if (unlikely(osd->od_mnt == NULL))
920                 return -EINPROGRESS;
921
922         /* osd_lproc.c call this without env, allocate ksfs for that case */
923         if (unlikely(env == NULL)) {
924                 OBD_ALLOC_PTR(ksfs);
925                 if (ksfs == NULL)
926                         return -ENOMEM;
927         } else {
928                 ksfs = &osd_oti_get(env)->oti_ksfs;
929         }
930
931         spin_lock(&osd->od_osfs_lock);
932         /* cache 1 second */
933         if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
934                 result = sb->s_op->statfs(sb->s_root, ksfs);
935                 if (likely(result == 0)) { /* N.B. statfs can't really fail */
936                         osd->od_osfs_age = cfs_time_current_64();
937                         statfs_pack(&osd->od_statfs, ksfs);
938                         if (sb->s_flags & MS_RDONLY)
939                                 sfs->os_state = OS_STATE_READONLY;
940                 }
941         }
942
943         if (likely(result == 0))
944                 *sfs = osd->od_statfs;
945         spin_unlock(&osd->od_osfs_lock);
946
947         if (unlikely(env == NULL))
948                 OBD_FREE_PTR(ksfs);
949
950         return result;
951 }
952
953 /**
954  * Estimate space needed for file creations. We assume the largest filename
955  * which is 2^64 - 1, hence a filename of 20 chars.
956  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
957  */
958 #ifdef __LDISKFS_DIR_REC_LEN
959 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
960 #else
961 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
962 #endif
963
964 /*
965  * Concurrency: doesn't access mutable data.
966  */
967 static void osd_conf_get(const struct lu_env *env,
968                          const struct dt_device *dev,
969                          struct dt_device_param *param)
970 {
971         struct super_block *sb = osd_sb(osd_dt_dev(dev));
972
973         /*
974          * XXX should be taken from not-yet-existing fs abstraction layer.
975          */
976         param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
977         param->ddp_max_name_len = LDISKFS_NAME_LEN;
978         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
979         param->ddp_block_shift  = sb->s_blocksize_bits;
980         param->ddp_mount_type     = LDD_MT_LDISKFS;
981         param->ddp_maxbytes       = sb->s_maxbytes;
982         /* Overhead estimate should be fairly accurate, so we really take a tiny
983          * error margin which also avoids fragmenting the filesystem too much */
984         param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
985         /* inode are statically allocated, so per-inode space consumption
986          * is the space consumed by the directory entry */
987         param->ddp_inodespace     = PER_OBJ_USAGE;
988         /* per-fragment overhead to be used by the client code */
989         param->ddp_grant_frag     = 6 * LDISKFS_BLOCK_SIZE(sb);
990         param->ddp_mntopts      = 0;
991         if (test_opt(sb, XATTR_USER))
992                 param->ddp_mntopts |= MNTOPT_USERXATTR;
993         if (test_opt(sb, POSIX_ACL))
994                 param->ddp_mntopts |= MNTOPT_ACL;
995
996 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
997         if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
998                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
999         else
1000 #endif
1001                 param->ddp_max_ea_size = sb->s_blocksize;
1002
1003 }
1004
1005 /**
1006  * Helper function to get and fill the buffer with input values.
1007  */
1008 static struct lu_buf *osd_buf_get(const struct lu_env *env, void *area, ssize_t len)
1009 {
1010         struct lu_buf *buf;
1011
1012         buf = &osd_oti_get(env)->oti_buf;
1013         buf->lb_buf = area;
1014         buf->lb_len = len;
1015         return buf;
1016 }
1017
1018 /*
1019  * Concurrency: shouldn't matter.
1020  */
1021 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1022 {
1023         CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1024         return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1025 }
1026
1027 /**
1028  * Start commit for OSD device.
1029  *
1030  * An implementation of dt_commit_async method for OSD device.
1031  * Asychronously starts underlayng fs sync and thereby a transaction
1032  * commit.
1033  *
1034  * \param env environment
1035  * \param d dt device
1036  *
1037  * \see dt_device_operations
1038  */
1039 static int osd_commit_async(const struct lu_env *env,
1040                             struct dt_device *d)
1041 {
1042         struct super_block *s = osd_sb(osd_dt_dev(d));
1043         ENTRY;
1044
1045         CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1046         RETURN(s->s_op->sync_fs(s, 0));
1047 }
1048
1049 /*
1050  * Concurrency: shouldn't matter.
1051  */
1052
1053 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1054 {
1055         struct super_block *sb = osd_sb(osd_dt_dev(d));
1056         int rc;
1057         ENTRY;
1058
1059         CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1060
1061         rc = __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
1062         RETURN(rc);
1063 }
1064
1065 /*
1066  * Concurrency: serialization provided by callers.
1067  */
1068 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
1069                               int mode, unsigned long timeout, __u32 alg,
1070                               struct lustre_capa_key *keys)
1071 {
1072         struct osd_device *dev = osd_dt_dev(d);
1073         ENTRY;
1074
1075         dev->od_fl_capa = mode;
1076         dev->od_capa_timeout = timeout;
1077         dev->od_capa_alg = alg;
1078         dev->od_capa_keys = keys;
1079         RETURN(0);
1080 }
1081
1082 /**
1083  * Note: we do not count into QUOTA here.
1084  * If we mount with --data_journal we may need more.
1085  */
1086 const int osd_dto_credits_noquota[DTO_NR] = {
1087         /**
1088          * Insert/Delete.
1089          * INDEX_EXTRA_TRANS_BLOCKS(8) +
1090          * SINGLEDATA_TRANS_BLOCKS(8)
1091          * XXX Note: maybe iam need more, since iam have more level than
1092          *           EXT3 htree.
1093          */
1094         [DTO_INDEX_INSERT]  = 16,
1095         [DTO_INDEX_DELETE]  = 16,
1096         /**
1097          * Used for OI scrub
1098          */
1099         [DTO_INDEX_UPDATE]  = 16,
1100         /**
1101          * Create a object. The same as create object in EXT3.
1102          * DATA_TRANS_BLOCKS(14) +
1103          * INDEX_EXTRA_BLOCKS(8) +
1104          * 3(inode bits, groups, GDT)
1105          */
1106         [DTO_OBJECT_CREATE] = 25,
1107         /**
1108          * XXX: real credits to be fixed
1109          */
1110         [DTO_OBJECT_DELETE] = 25,
1111         /**
1112          * Attr set credits (inode)
1113          */
1114         [DTO_ATTR_SET_BASE] = 1,
1115         /**
1116          * Xattr set. The same as xattr of EXT3.
1117          * DATA_TRANS_BLOCKS(14)
1118          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1119          * are also counted in. Do not know why?
1120          */
1121         [DTO_XATTR_SET]     = 14,
1122         [DTO_LOG_REC]       = 14,
1123         /**
1124          * credits for inode change during write.
1125          */
1126         [DTO_WRITE_BASE]    = 3,
1127         /**
1128          * credits for single block write.
1129          */
1130         [DTO_WRITE_BLOCK]   = 14,
1131         /**
1132          * Attr set credits for chown.
1133          * This is extra credits for setattr, and it is null without quota
1134          */
1135         [DTO_ATTR_SET_CHOWN]= 0
1136 };
1137
1138 static const struct dt_device_operations osd_dt_ops = {
1139         .dt_root_get       = osd_root_get,
1140         .dt_statfs         = osd_statfs,
1141         .dt_trans_create   = osd_trans_create,
1142         .dt_trans_start    = osd_trans_start,
1143         .dt_trans_stop     = osd_trans_stop,
1144         .dt_trans_cb_add   = osd_trans_cb_add,
1145         .dt_conf_get       = osd_conf_get,
1146         .dt_sync           = osd_sync,
1147         .dt_ro             = osd_ro,
1148         .dt_commit_async   = osd_commit_async,
1149         .dt_init_capa_ctxt = osd_init_capa_ctxt,
1150 };
1151
1152 static void osd_object_read_lock(const struct lu_env *env,
1153                                  struct dt_object *dt, unsigned role)
1154 {
1155         struct osd_object *obj = osd_dt_obj(dt);
1156         struct osd_thread_info *oti = osd_oti_get(env);
1157
1158         LINVRNT(osd_invariant(obj));
1159
1160         LASSERT(obj->oo_owner != env);
1161         down_read_nested(&obj->oo_sem, role);
1162
1163         LASSERT(obj->oo_owner == NULL);
1164         oti->oti_r_locks++;
1165 }
1166
1167 static void osd_object_write_lock(const struct lu_env *env,
1168                                   struct dt_object *dt, unsigned role)
1169 {
1170         struct osd_object *obj = osd_dt_obj(dt);
1171         struct osd_thread_info *oti = osd_oti_get(env);
1172
1173         LINVRNT(osd_invariant(obj));
1174
1175         LASSERT(obj->oo_owner != env);
1176         down_write_nested(&obj->oo_sem, role);
1177
1178         LASSERT(obj->oo_owner == NULL);
1179         obj->oo_owner = env;
1180         oti->oti_w_locks++;
1181 }
1182
1183 static void osd_object_read_unlock(const struct lu_env *env,
1184                                    struct dt_object *dt)
1185 {
1186         struct osd_object *obj = osd_dt_obj(dt);
1187         struct osd_thread_info *oti = osd_oti_get(env);
1188
1189         LINVRNT(osd_invariant(obj));
1190
1191         LASSERT(oti->oti_r_locks > 0);
1192         oti->oti_r_locks--;
1193         up_read(&obj->oo_sem);
1194 }
1195
1196 static void osd_object_write_unlock(const struct lu_env *env,
1197                                     struct dt_object *dt)
1198 {
1199         struct osd_object *obj = osd_dt_obj(dt);
1200         struct osd_thread_info *oti = osd_oti_get(env);
1201
1202         LINVRNT(osd_invariant(obj));
1203
1204         LASSERT(obj->oo_owner == env);
1205         LASSERT(oti->oti_w_locks > 0);
1206         oti->oti_w_locks--;
1207         obj->oo_owner = NULL;
1208         up_write(&obj->oo_sem);
1209 }
1210
1211 static int osd_object_write_locked(const struct lu_env *env,
1212                                    struct dt_object *dt)
1213 {
1214         struct osd_object *obj = osd_dt_obj(dt);
1215
1216         LINVRNT(osd_invariant(obj));
1217
1218         return obj->oo_owner == env;
1219 }
1220
1221 static int capa_is_sane(const struct lu_env *env,
1222                         struct osd_device *dev,
1223                         struct lustre_capa *capa,
1224                         struct lustre_capa_key *keys)
1225 {
1226         struct osd_thread_info *oti = osd_oti_get(env);
1227         struct lustre_capa *tcapa = &oti->oti_capa;
1228         struct obd_capa *oc;
1229         int i, rc = 0;
1230         ENTRY;
1231
1232         oc = capa_lookup(dev->od_capa_hash, capa, 0);
1233         if (oc) {
1234                 if (capa_is_expired(oc)) {
1235                         DEBUG_CAPA(D_ERROR, capa, "expired");
1236                         rc = -ESTALE;
1237                 }
1238                 capa_put(oc);
1239                 RETURN(rc);
1240         }
1241
1242         if (capa_is_expired_sec(capa)) {
1243                 DEBUG_CAPA(D_ERROR, capa, "expired");
1244                 RETURN(-ESTALE);
1245         }
1246
1247         spin_lock(&capa_lock);
1248         for (i = 0; i < 2; i++) {
1249                 if (keys[i].lk_keyid == capa->lc_keyid) {
1250                         oti->oti_capa_key = keys[i];
1251                         break;
1252                 }
1253         }
1254         spin_unlock(&capa_lock);
1255
1256         if (i == 2) {
1257                 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1258                 RETURN(-ESTALE);
1259         }
1260
1261         rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1262         if (rc)
1263                 RETURN(rc);
1264
1265         if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1266                 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1267                 RETURN(-EACCES);
1268         }
1269
1270         oc = capa_add(dev->od_capa_hash, capa);
1271         capa_put(oc);
1272
1273         RETURN(0);
1274 }
1275
1276 int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1277                     struct lustre_capa *capa, __u64 opc)
1278 {
1279         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1280         struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1281         struct md_capainfo *ci;
1282         int rc;
1283
1284         if (!dev->od_fl_capa)
1285                 return 0;
1286
1287         if (capa == BYPASS_CAPA)
1288                 return 0;
1289
1290         ci = md_capainfo(env);
1291         if (unlikely(!ci))
1292                 return 0;
1293
1294         if (ci->mc_auth == LC_ID_NONE)
1295                 return 0;
1296
1297         if (!capa) {
1298                 CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
1299                 return -EACCES;
1300         }
1301
1302         if (!lu_fid_eq(fid, &capa->lc_fid)) {
1303                 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1304                            PFID(fid));
1305                 return -EACCES;
1306         }
1307
1308         if (!capa_opc_supported(capa, opc)) {
1309                 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1310                 return -EACCES;
1311         }
1312
1313         if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
1314                 DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
1315                 return -EACCES;
1316         }
1317
1318         return 0;
1319 }
1320
1321 static struct timespec *osd_inode_time(const struct lu_env *env,
1322                                        struct inode *inode, __u64 seconds)
1323 {
1324         struct osd_thread_info  *oti = osd_oti_get(env);
1325         struct timespec         *t   = &oti->oti_time;
1326
1327         t->tv_sec = seconds;
1328         t->tv_nsec = 0;
1329         *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1330         return t;
1331 }
1332
1333
1334 static void osd_inode_getattr(const struct lu_env *env,
1335                               struct inode *inode, struct lu_attr *attr)
1336 {
1337         attr->la_valid      |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1338                                LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1339                                LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
1340
1341         attr->la_atime      = LTIME_S(inode->i_atime);
1342         attr->la_mtime      = LTIME_S(inode->i_mtime);
1343         attr->la_ctime      = LTIME_S(inode->i_ctime);
1344         attr->la_mode       = inode->i_mode;
1345         attr->la_size       = i_size_read(inode);
1346         attr->la_blocks     = inode->i_blocks;
1347         attr->la_uid        = inode->i_uid;
1348         attr->la_gid        = inode->i_gid;
1349         attr->la_flags      = LDISKFS_I(inode)->i_flags;
1350         attr->la_nlink      = inode->i_nlink;
1351         attr->la_rdev       = inode->i_rdev;
1352         attr->la_blksize    = 1 << inode->i_blkbits;
1353         attr->la_blkbits    = inode->i_blkbits;
1354 }
1355
1356 static int osd_attr_get(const struct lu_env *env,
1357                         struct dt_object *dt,
1358                         struct lu_attr *attr,
1359                         struct lustre_capa *capa)
1360 {
1361         struct osd_object *obj = osd_dt_obj(dt);
1362
1363         LASSERT(dt_object_exists(dt));
1364         LINVRNT(osd_invariant(obj));
1365
1366         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1367                 return -EACCES;
1368
1369         spin_lock(&obj->oo_guard);
1370         osd_inode_getattr(env, obj->oo_inode, attr);
1371         spin_unlock(&obj->oo_guard);
1372         return 0;
1373 }
1374
1375 static int osd_declare_attr_set(const struct lu_env *env,
1376                                 struct dt_object *dt,
1377                                 const struct lu_attr *attr,
1378                                 struct thandle *handle)
1379 {
1380         struct osd_thandle     *oh;
1381         struct osd_object      *obj;
1382         struct osd_thread_info *info = osd_oti_get(env);
1383         struct lquota_id_info  *qi = &info->oti_qi;
1384         long long               bspace;
1385         int                     rc = 0;
1386         bool                    allocated;
1387         ENTRY;
1388
1389         LASSERT(dt != NULL);
1390         LASSERT(handle != NULL);
1391
1392         obj = osd_dt_obj(dt);
1393         LASSERT(osd_invariant(obj));
1394
1395         oh = container_of0(handle, struct osd_thandle, ot_super);
1396         LASSERT(oh->ot_handle == NULL);
1397
1398         OSD_DECLARE_OP(oh, attr_set,
1399                        osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1400
1401         if (attr == NULL || obj->oo_inode == NULL)
1402                 RETURN(rc);
1403
1404         bspace   = obj->oo_inode->i_blocks;
1405         bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1406         bspace   = toqb(bspace);
1407
1408         /* Changing ownership is always preformed by super user, it should not
1409          * fail with EDQUOT.
1410          *
1411          * We still need to call the osd_declare_qid() to calculate the journal
1412          * credits for updating quota accounting files and to trigger quota
1413          * space adjustment once the operation is completed.*/
1414         if ((attr->la_valid & LA_UID) != 0 &&
1415              attr->la_uid != obj->oo_inode->i_uid) {
1416                 qi->lqi_type = USRQUOTA;
1417
1418                 /* inode accounting */
1419                 qi->lqi_is_blk = false;
1420
1421                 /* one more inode for the new owner ... */
1422                 qi->lqi_id.qid_uid = attr->la_uid;
1423                 qi->lqi_space      = 1;
1424                 allocated = (attr->la_uid == 0) ? true : false;
1425                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1426                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1427                         rc = 0;
1428                 if (rc)
1429                         RETURN(rc);
1430
1431                 /* and one less inode for the current uid */
1432                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1433                 qi->lqi_space      = -1;
1434                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1435                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1436                         rc = 0;
1437                 if (rc)
1438                         RETURN(rc);
1439
1440                 /* block accounting */
1441                 qi->lqi_is_blk = true;
1442
1443                 /* more blocks for the new owner ... */
1444                 qi->lqi_id.qid_uid = attr->la_uid;
1445                 qi->lqi_space      = bspace;
1446                 allocated = (attr->la_uid == 0) ? true : false;
1447                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1448                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1449                         rc = 0;
1450                 if (rc)
1451                         RETURN(rc);
1452
1453                 /* and finally less blocks for the current owner */
1454                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1455                 qi->lqi_space      = -bspace;
1456                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1457                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1458                         rc = 0;
1459                 if (rc)
1460                         RETURN(rc);
1461         }
1462
1463         if (attr->la_valid & LA_GID &&
1464             attr->la_gid != obj->oo_inode->i_gid) {
1465                 qi->lqi_type = GRPQUOTA;
1466
1467                 /* inode accounting */
1468                 qi->lqi_is_blk = false;
1469
1470                 /* one more inode for the new group owner ... */
1471                 qi->lqi_id.qid_gid = attr->la_gid;
1472                 qi->lqi_space      = 1;
1473                 allocated = (attr->la_gid == 0) ? true : false;
1474                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1475                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1476                         rc = 0;
1477                 if (rc)
1478                         RETURN(rc);
1479
1480                 /* and one less inode for the current gid */
1481                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1482                 qi->lqi_space      = -1;
1483                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1484                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1485                         rc = 0;
1486                 if (rc)
1487                         RETURN(rc);
1488
1489                 /* block accounting */
1490                 qi->lqi_is_blk = true;
1491
1492                 /* more blocks for the new owner ... */
1493                 qi->lqi_id.qid_gid = attr->la_gid;
1494                 qi->lqi_space      = bspace;
1495                 allocated = (attr->la_gid == 0) ? true : false;
1496                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1497                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1498                         rc = 0;
1499                 if (rc)
1500                         RETURN(rc);
1501
1502                 /* and finally less blocks for the current owner */
1503                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1504                 qi->lqi_space      = -bspace;
1505                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1506                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1507                         rc = 0;
1508                 if (rc)
1509                         RETURN(rc);
1510         }
1511
1512         RETURN(rc);
1513 }
1514
1515 static int osd_inode_setattr(const struct lu_env *env,
1516                              struct inode *inode, const struct lu_attr *attr)
1517 {
1518         __u64 bits;
1519
1520         bits = attr->la_valid;
1521
1522         LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
1523
1524         if (bits & LA_ATIME)
1525                 inode->i_atime  = *osd_inode_time(env, inode, attr->la_atime);
1526         if (bits & LA_CTIME)
1527                 inode->i_ctime  = *osd_inode_time(env, inode, attr->la_ctime);
1528         if (bits & LA_MTIME)
1529                 inode->i_mtime  = *osd_inode_time(env, inode, attr->la_mtime);
1530         if (bits & LA_SIZE) {
1531                 LDISKFS_I(inode)->i_disksize = attr->la_size;
1532                 i_size_write(inode, attr->la_size);
1533         }
1534
1535 #if 0
1536         /* OSD should not change "i_blocks" which is used by quota.
1537          * "i_blocks" should be changed by ldiskfs only. */
1538         if (bits & LA_BLOCKS)
1539                 inode->i_blocks = attr->la_blocks;
1540 #endif
1541         if (bits & LA_MODE)
1542                 inode->i_mode   = (inode->i_mode & S_IFMT) |
1543                         (attr->la_mode & ~S_IFMT);
1544         if (bits & LA_UID)
1545                 inode->i_uid    = attr->la_uid;
1546         if (bits & LA_GID)
1547                 inode->i_gid    = attr->la_gid;
1548         if (bits & LA_NLINK)
1549                 set_nlink(inode, attr->la_nlink);
1550         if (bits & LA_RDEV)
1551                 inode->i_rdev   = attr->la_rdev;
1552
1553         if (bits & LA_FLAGS) {
1554                 /* always keep S_NOCMTIME */
1555                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1556                                  S_NOCMTIME;
1557         }
1558         return 0;
1559 }
1560
1561 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1562 {
1563         if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
1564             (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
1565                 struct iattr    iattr;
1566                 int             rc;
1567
1568                 iattr.ia_valid = 0;
1569                 if (attr->la_valid & LA_UID)
1570                         iattr.ia_valid |= ATTR_UID;
1571                 if (attr->la_valid & LA_GID)
1572                         iattr.ia_valid |= ATTR_GID;
1573                 iattr.ia_uid = attr->la_uid;
1574                 iattr.ia_gid = attr->la_gid;
1575
1576                 rc = ll_vfs_dq_transfer(inode, &iattr);
1577                 if (rc) {
1578                         CERROR("%s: quota transfer failed: rc = %d. Is quota "
1579                                "enforcement enabled on the ldiskfs filesystem?",
1580                                inode->i_sb->s_id, rc);
1581                         return rc;
1582                 }
1583         }
1584         return 0;
1585 }
1586
1587 static int osd_attr_set(const struct lu_env *env,
1588                         struct dt_object *dt,
1589                         const struct lu_attr *attr,
1590                         struct thandle *handle,
1591                         struct lustre_capa *capa)
1592 {
1593         struct osd_object *obj = osd_dt_obj(dt);
1594         struct inode      *inode;
1595         int rc;
1596
1597         LASSERT(handle != NULL);
1598         LASSERT(dt_object_exists(dt));
1599         LASSERT(osd_invariant(obj));
1600
1601         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1602                 return -EACCES;
1603
1604         OSD_EXEC_OP(handle, attr_set);
1605
1606         inode = obj->oo_inode;
1607         ll_vfs_dq_init(inode);
1608
1609         rc = osd_quota_transfer(inode, attr);
1610         if (rc)
1611                 return rc;
1612
1613         spin_lock(&obj->oo_guard);
1614         rc = osd_inode_setattr(env, inode, attr);
1615         spin_unlock(&obj->oo_guard);
1616
1617         if (!rc)
1618                 inode->i_sb->s_op->dirty_inode(inode);
1619         return rc;
1620 }
1621
1622 struct dentry *osd_child_dentry_get(const struct lu_env *env,
1623                                     struct osd_object *obj,
1624                                     const char *name, const int namelen)
1625 {
1626         return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
1627 }
1628
1629 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1630                       cfs_umode_t mode,
1631                       struct dt_allocation_hint *hint,
1632                       struct thandle *th)
1633 {
1634         int result;
1635         struct osd_device  *osd = osd_obj2dev(obj);
1636         struct osd_thandle *oth;
1637         struct dt_object   *parent = NULL;
1638         struct inode       *inode;
1639
1640         LINVRNT(osd_invariant(obj));
1641         LASSERT(obj->oo_inode == NULL);
1642         LASSERT(obj->oo_hl_head == NULL);
1643
1644         if (S_ISDIR(mode) && ldiskfs_pdo) {
1645                 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1646                 if (obj->oo_hl_head == NULL)
1647                         return -ENOMEM;
1648         }
1649
1650         oth = container_of(th, struct osd_thandle, ot_super);
1651         LASSERT(oth->ot_handle->h_transaction != NULL);
1652
1653         if (hint && hint->dah_parent)
1654                 parent = hint->dah_parent;
1655
1656         inode = ldiskfs_create_inode(oth->ot_handle,
1657                                      parent ? osd_dt_obj(parent)->oo_inode :
1658                                               osd_sb(osd)->s_root->d_inode,
1659                                      mode);
1660         if (!IS_ERR(inode)) {
1661                 /* Do not update file c/mtime in ldiskfs.
1662                  * NB: don't need any lock because no contention at this
1663                  * early stage */
1664                 inode->i_flags |= S_NOCMTIME;
1665                 inode->i_state |= I_LUSTRE_NOSCRUB;
1666                 obj->oo_inode = inode;
1667                 result = 0;
1668         } else {
1669                 if (obj->oo_hl_head != NULL) {
1670                         ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1671                         obj->oo_hl_head = NULL;
1672                 }
1673                 result = PTR_ERR(inode);
1674         }
1675         LINVRNT(osd_invariant(obj));
1676         return result;
1677 }
1678
1679 enum {
1680         OSD_NAME_LEN = 255
1681 };
1682
1683 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1684                      struct lu_attr *attr,
1685                      struct dt_allocation_hint *hint,
1686                      struct dt_object_format *dof,
1687                      struct thandle *th)
1688 {
1689         int result;
1690         struct osd_thandle *oth;
1691         __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1692
1693         LASSERT(S_ISDIR(attr->la_mode));
1694
1695         oth = container_of(th, struct osd_thandle, ot_super);
1696         LASSERT(oth->ot_handle->h_transaction != NULL);
1697         result = osd_mkfile(info, obj, mode, hint, th);
1698
1699         return result;
1700 }
1701
1702 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
1703                         struct lu_attr *attr,
1704                         struct dt_allocation_hint *hint,
1705                         struct dt_object_format *dof,
1706                         struct thandle *th)
1707 {
1708         int result;
1709         struct osd_thandle *oth;
1710         const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
1711
1712         __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
1713
1714         LASSERT(S_ISREG(attr->la_mode));
1715
1716         oth = container_of(th, struct osd_thandle, ot_super);
1717         LASSERT(oth->ot_handle->h_transaction != NULL);
1718
1719         result = osd_mkfile(info, obj, mode, hint, th);
1720         if (result == 0) {
1721                 LASSERT(obj->oo_inode != NULL);
1722                 if (feat->dif_flags & DT_IND_VARKEY)
1723                         result = iam_lvar_create(obj->oo_inode,
1724                                                  feat->dif_keysize_max,
1725                                                  feat->dif_ptrsize,
1726                                                  feat->dif_recsize_max,
1727                                                  oth->ot_handle);
1728                 else
1729                         result = iam_lfix_create(obj->oo_inode,
1730                                                  feat->dif_keysize_max,
1731                                                  feat->dif_ptrsize,
1732                                                  feat->dif_recsize_max,
1733                                                  oth->ot_handle);
1734
1735         }
1736         return result;
1737 }
1738
1739 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1740                      struct lu_attr *attr,
1741                      struct dt_allocation_hint *hint,
1742                      struct dt_object_format *dof,
1743                      struct thandle *th)
1744 {
1745         LASSERT(S_ISREG(attr->la_mode));
1746         return osd_mkfile(info, obj, (attr->la_mode &
1747                                (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1748 }
1749
1750 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1751                      struct lu_attr *attr,
1752                      struct dt_allocation_hint *hint,
1753                      struct dt_object_format *dof,
1754                      struct thandle *th)
1755 {
1756         LASSERT(S_ISLNK(attr->la_mode));
1757         return osd_mkfile(info, obj, (attr->la_mode &
1758                               (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1759 }
1760
1761 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1762                      struct lu_attr *attr,
1763                      struct dt_allocation_hint *hint,
1764                      struct dt_object_format *dof,
1765                      struct thandle *th)
1766 {
1767         cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
1768         int result;
1769
1770         LINVRNT(osd_invariant(obj));
1771         LASSERT(obj->oo_inode == NULL);
1772         LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1773                 S_ISFIFO(mode) || S_ISSOCK(mode));
1774
1775         result = osd_mkfile(info, obj, mode, hint, th);
1776         if (result == 0) {
1777                 LASSERT(obj->oo_inode != NULL);
1778                 /*
1779                  * This inode should be marked dirty for i_rdev.  Currently
1780                  * that is done in the osd_attr_init().
1781                  */
1782                 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
1783                                    attr->la_rdev);
1784         }
1785         LINVRNT(osd_invariant(obj));
1786         return result;
1787 }
1788
1789 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1790                               struct lu_attr *,
1791                               struct dt_allocation_hint *hint,
1792                               struct dt_object_format *dof,
1793                               struct thandle *);
1794
1795 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1796 {
1797         osd_obj_type_f result;
1798
1799         switch (type) {
1800         case DFT_DIR:
1801                 result = osd_mkdir;
1802                 break;
1803         case DFT_REGULAR:
1804                 result = osd_mkreg;
1805                 break;
1806         case DFT_SYM:
1807                 result = osd_mksym;
1808                 break;
1809         case DFT_NODE:
1810                 result = osd_mknod;
1811                 break;
1812         case DFT_INDEX:
1813                 result = osd_mk_index;
1814                 break;
1815
1816         default:
1817                 LBUG();
1818                 break;
1819         }
1820         return result;
1821 }
1822
1823
1824 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1825                         struct dt_object *parent, struct dt_object *child,
1826                         cfs_umode_t child_mode)
1827 {
1828         LASSERT(ah);
1829
1830         memset(ah, 0, sizeof(*ah));
1831         ah->dah_parent = parent;
1832         ah->dah_mode = child_mode;
1833 }
1834
1835 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
1836                           struct lu_attr *attr, struct dt_object_format *dof)
1837 {
1838         struct inode   *inode = obj->oo_inode;
1839         __u64           valid = attr->la_valid;
1840         int             result;
1841
1842         attr->la_valid &= ~(LA_TYPE | LA_MODE);
1843
1844         if (dof->dof_type != DFT_NODE)
1845                 attr->la_valid &= ~LA_RDEV;
1846         if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
1847                 attr->la_valid &= ~LA_ATIME;
1848         if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
1849                 attr->la_valid &= ~LA_CTIME;
1850         if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
1851                 attr->la_valid &= ~LA_MTIME;
1852
1853         result = osd_quota_transfer(inode, attr);
1854         if (result)
1855                 return;
1856
1857         if (attr->la_valid != 0) {
1858                 result = osd_inode_setattr(info->oti_env, inode, attr);
1859                 /*
1860                  * The osd_inode_setattr() should always succeed here.  The
1861                  * only error that could be returned is EDQUOT when we are
1862                  * trying to change the UID or GID of the inode. However, this
1863                  * should not happen since quota enforcement is no longer
1864                  * enabled on ldiskfs (lquota takes care of it).
1865                  */
1866                 LASSERTF(result == 0, "%d", result);
1867                 inode->i_sb->s_op->dirty_inode(inode);
1868         }
1869
1870         attr->la_valid = valid;
1871 }
1872
1873 /**
1874  * Helper function for osd_object_create()
1875  *
1876  * \retval 0, on success
1877  */
1878 static int __osd_object_create(struct osd_thread_info *info,
1879                                struct osd_object *obj, struct lu_attr *attr,
1880                                struct dt_allocation_hint *hint,
1881                                struct dt_object_format *dof,
1882                                struct thandle *th)
1883 {
1884         int     result;
1885
1886         result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
1887                                                   th);
1888         if (result == 0) {
1889                 osd_attr_init(info, obj, attr, dof);
1890                 osd_object_init0(obj);
1891                 /* bz 24037 */
1892                 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
1893                         unlock_new_inode(obj->oo_inode);
1894         }
1895
1896         return result;
1897 }
1898
1899 /**
1900  * Helper function for osd_object_create()
1901  *
1902  * \retval 0, on success
1903  */
1904 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
1905                            const struct lu_fid *fid, struct thandle *th)
1906 {
1907         struct osd_thread_info *info = osd_oti_get(env);
1908         struct osd_inode_id    *id   = &info->oti_id;
1909         struct osd_device      *osd  = osd_obj2dev(obj);
1910
1911         LASSERT(obj->oo_inode != NULL);
1912
1913         osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
1914         return osd_oi_insert(info, osd, fid, id, th);
1915 }
1916
1917 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
1918                    const struct lu_fid *fid, struct lu_seq_range *range)
1919 {
1920         struct seq_server_site  *ss = osd_seq_site(osd);
1921         int                     rc;
1922
1923         if (fid_is_igif(fid)) {
1924                 range->lsr_flags = LU_SEQ_RANGE_MDT;
1925                 range->lsr_index = 0;
1926                 return 0;
1927         }
1928
1929         if (fid_is_idif(fid)) {
1930                 range->lsr_flags = LU_SEQ_RANGE_OST;
1931                 range->lsr_index = fid_idif_ost_idx(fid);
1932                 return 0;
1933         }
1934
1935         if (!fid_is_norm(fid)) {
1936                 range->lsr_flags = LU_SEQ_RANGE_MDT;
1937                 if (ss != NULL)
1938                         /* FIXME: If ss is NULL, it suppose not get lsr_index
1939                          * at all */
1940                         range->lsr_index = ss->ss_node_id;
1941                 return 0;
1942         }
1943
1944         LASSERT(ss != NULL);
1945         range->lsr_flags = -1;
1946         rc = fld_server_lookup(env, ss->ss_server_fld, fid_seq(fid), range);
1947         if (rc != 0) {
1948                 CERROR("%s can not find "DFID": rc = %d\n",
1949                        osd2lu_dev(osd)->ld_obd->obd_name, PFID(fid), rc);
1950         }
1951         return rc;
1952 }
1953
1954
1955 static int osd_declare_object_create(const struct lu_env *env,
1956                                      struct dt_object *dt,
1957                                      struct lu_attr *attr,
1958                                      struct dt_allocation_hint *hint,
1959                                      struct dt_object_format *dof,
1960                                      struct thandle *handle)
1961 {
1962         struct lu_seq_range     *range = &osd_oti_get(env)->oti_seq_range;
1963         struct osd_thandle      *oh;
1964         int                      rc;
1965         ENTRY;
1966
1967         LASSERT(handle != NULL);
1968
1969         oh = container_of0(handle, struct osd_thandle, ot_super);
1970         LASSERT(oh->ot_handle == NULL);
1971
1972         OSD_DECLARE_OP(oh, create, osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
1973         /* XXX: So far, only normal fid needs be inserted into the oi,
1974          *      things could be changed later. Revise following code then. */
1975         if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
1976             !fid_is_on_ost(osd_oti_get(env), osd_dt_dev(handle->th_dev),
1977                            lu_object_fid(&dt->do_lu))) {
1978                 /* Reuse idle OI block may cause additional one OI block
1979                  * to be changed. */
1980                 OSD_DECLARE_OP(oh, insert,
1981                                osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
1982         }
1983         /* If this is directory, then we expect . and .. to be inserted as
1984          * well. The one directory block always needs to be created for the
1985          * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
1986          * block), there is no danger of needing a tree for the first block.
1987          */
1988         if (attr && S_ISDIR(attr->la_mode)) {
1989                 OSD_DECLARE_OP(oh, insert,
1990                                osd_dto_credits_noquota[DTO_WRITE_BASE]);
1991                 OSD_DECLARE_OP(oh, insert, 0);
1992         }
1993
1994         if (!attr)
1995                 RETURN(0);
1996
1997         rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
1998                                    false, false, NULL, false);
1999         if (rc != 0)
2000                 RETURN(rc);
2001
2002         /* It does fld look up inside declare, and the result will be
2003          * added to fld cache, so the following fld lookup inside insert
2004          * does not need send RPC anymore, so avoid send rpc with holding
2005          * transaction */
2006         if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
2007                 !fid_is_last_id(lu_object_fid(&dt->do_lu)))
2008                 osd_fld_lookup(env, osd_dt_dev(handle->th_dev),
2009                                lu_object_fid(&dt->do_lu), range);
2010
2011
2012         RETURN(rc);
2013 }
2014
2015 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
2016                              struct lu_attr *attr,
2017                              struct dt_allocation_hint *hint,
2018                              struct dt_object_format *dof,
2019                              struct thandle *th)
2020 {
2021         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
2022         struct osd_object      *obj    = osd_dt_obj(dt);
2023         struct osd_thread_info *info   = osd_oti_get(env);
2024         int result;
2025
2026         ENTRY;
2027
2028         LINVRNT(osd_invariant(obj));
2029         LASSERT(!dt_object_exists(dt));
2030         LASSERT(osd_write_locked(env, obj));
2031         LASSERT(th != NULL);
2032
2033         if (unlikely(fid_is_acct(fid)))
2034                 /* Quota files can't be created from the kernel any more,
2035                  * 'tune2fs -O quota' will take care of creating them */
2036                 RETURN(-EPERM);
2037
2038         OSD_EXEC_OP(th, create);
2039
2040         result = __osd_object_create(info, obj, attr, hint, dof, th);
2041         if (result == 0)
2042                 result = __osd_oi_insert(env, obj, fid, th);
2043
2044         LASSERT(ergo(result == 0, dt_object_exists(dt)));
2045         LASSERT(osd_invariant(obj));
2046         RETURN(result);
2047 }
2048
2049 /**
2050  * Called to destroy on-disk representation of the object
2051  *
2052  * Concurrency: must be locked
2053  */
2054 static int osd_declare_object_destroy(const struct lu_env *env,
2055                                       struct dt_object *dt,
2056                                       struct thandle *th)
2057 {
2058         struct osd_object  *obj = osd_dt_obj(dt);
2059         struct inode       *inode = obj->oo_inode;
2060         struct osd_thandle *oh;
2061         int                 rc;
2062         ENTRY;
2063
2064         oh = container_of0(th, struct osd_thandle, ot_super);
2065         LASSERT(oh->ot_handle == NULL);
2066         LASSERT(inode);
2067
2068         OSD_DECLARE_OP(oh, delete, osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2069         /* XXX: So far, only normal fid needs to be inserted into the OI,
2070          *      so only normal fid needs to be removed from the OI also.
2071          * Recycle idle OI leaf may cause additional three OI blocks
2072          * to be changed. */
2073         OSD_DECLARE_OP(oh, destroy, fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2074                         osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3 : 0);
2075
2076         /* one less inode */
2077         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
2078                                    false, true, NULL, false);
2079         if (rc)
2080                 RETURN(rc);
2081         /* data to be truncated */
2082         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
2083                                    true, true, NULL, false);
2084         RETURN(rc);
2085 }
2086
2087 static int osd_object_destroy(const struct lu_env *env,
2088                               struct dt_object *dt,
2089                               struct thandle *th)
2090 {
2091         const struct lu_fid    *fid = lu_object_fid(&dt->do_lu);
2092         struct osd_object      *obj = osd_dt_obj(dt);
2093         struct inode           *inode = obj->oo_inode;
2094         struct osd_device      *osd = osd_obj2dev(obj);
2095         struct osd_thandle     *oh;
2096         int                     result;
2097         ENTRY;
2098
2099         oh = container_of0(th, struct osd_thandle, ot_super);
2100         LASSERT(oh->ot_handle);
2101         LASSERT(inode);
2102         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2103
2104         if (unlikely(fid_is_acct(fid)))
2105                 RETURN(-EPERM);
2106
2107         /* Parallel control for OI scrub. For most of cases, there is no
2108          * lock contention. So it will not affect unlink performance. */
2109         mutex_lock(&inode->i_mutex);
2110         if (S_ISDIR(inode->i_mode)) {
2111                 LASSERT(osd_inode_unlinked(inode) ||
2112                         inode->i_nlink == 1);
2113                 spin_lock(&obj->oo_guard);
2114                 clear_nlink(inode);
2115                 spin_unlock(&obj->oo_guard);
2116                 inode->i_sb->s_op->dirty_inode(inode);
2117         }
2118
2119         OSD_EXEC_OP(th, destroy);
2120
2121         result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
2122         mutex_unlock(&inode->i_mutex);
2123
2124         /* XXX: add to ext3 orphan list */
2125         /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2126
2127         /* not needed in the cache anymore */
2128         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2129
2130         RETURN(0);
2131 }
2132
2133 /**
2134  * Helper function for osd_xattr_set()
2135  */
2136 static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2137                            const struct lu_buf *buf, const char *name, int fl)
2138 {
2139         struct osd_object      *obj      = osd_dt_obj(dt);
2140         struct inode           *inode    = obj->oo_inode;
2141         struct osd_thread_info *info     = osd_oti_get(env);
2142         struct dentry          *dentry   = &info->oti_child_dentry;
2143         int                     fs_flags = 0;
2144         int                     rc;
2145
2146         LASSERT(dt_object_exists(dt));
2147         LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
2148
2149         if (fl & LU_XATTR_REPLACE)
2150                 fs_flags |= XATTR_REPLACE;
2151
2152         if (fl & LU_XATTR_CREATE)
2153                 fs_flags |= XATTR_CREATE;
2154
2155         ll_vfs_dq_init(inode);
2156         dentry->d_inode = inode;
2157         rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
2158                                    buf->lb_len, fs_flags);
2159         return rc;
2160 }
2161
2162 /**
2163  * Put the fid into lustre_mdt_attrs, and then place the structure
2164  * inode's ea. This fid should not be altered during the life time
2165  * of the inode.
2166  *
2167  * \retval +ve, on success
2168  * \retval -ve, on error
2169  *
2170  * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2171  */
2172 static int osd_ea_fid_set(const struct lu_env *env, struct dt_object *dt,
2173                           const struct lu_fid *fid)
2174 {
2175         struct osd_thread_info  *info      = osd_oti_get(env);
2176         struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
2177
2178         lustre_lma_init(mdt_attrs, fid);
2179         lustre_lma_swab(mdt_attrs);
2180         return __osd_xattr_set(env, dt,
2181                                osd_buf_get(env, mdt_attrs, sizeof *mdt_attrs),
2182                                XATTR_NAME_LMA, LU_XATTR_CREATE);
2183
2184 }
2185
2186 /**
2187  * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2188  * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2189  * To have compatilibility with 1.8 ldiskfs driver we need to have
2190  * magic number at start of fid data.
2191  * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2192  * its inmemory API.
2193  */
2194 void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2195                                   const struct dt_rec *fid)
2196 {
2197         param->edp_magic = LDISKFS_LUFID_MAGIC;
2198         param->edp_len =  sizeof(struct lu_fid) + 1;
2199
2200         fid_cpu_to_be((struct lu_fid *)param->edp_data,
2201                       (struct lu_fid *)fid);
2202 }
2203
2204 /**
2205  * Try to read the fid from inode ea into dt_rec, if return value
2206  * i.e. rc is +ve, then we got fid, otherwise we will have to form igif
2207  *
2208  * \param fid object fid.
2209  *
2210  * \retval 0 on success
2211  */
2212 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2213                           __u32 ino, struct lu_fid *fid,
2214                           struct osd_inode_id *id)
2215 {
2216         struct osd_thread_info *info  = osd_oti_get(env);
2217         struct inode           *inode;
2218         ENTRY;
2219
2220         osd_id_gen(id, ino, OSD_OII_NOGEN);
2221         inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2222         if (IS_ERR(inode))
2223                 RETURN(PTR_ERR(inode));
2224
2225         iput(inode);
2226         RETURN(0);
2227 }
2228
2229 /**
2230  * OSD layer object create function for interoperability mode (b11826).
2231  * This is mostly similar to osd_object_create(). Only difference being, fid is
2232  * inserted into inode ea here.
2233  *
2234  * \retval   0, on success
2235  * \retval -ve, on error
2236  */
2237 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2238                                 struct lu_attr *attr,
2239                                 struct dt_allocation_hint *hint,
2240                                 struct dt_object_format *dof,
2241                                 struct thandle *th)
2242 {
2243         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
2244         struct osd_object      *obj    = osd_dt_obj(dt);
2245         struct osd_thread_info *info   = osd_oti_get(env);
2246         int                     result;
2247
2248         ENTRY;
2249
2250         LASSERT(osd_invariant(obj));
2251         LASSERT(!dt_object_exists(dt));
2252         LASSERT(osd_write_locked(env, obj));
2253         LASSERT(th != NULL);
2254
2255         if (unlikely(fid_is_acct(fid)))
2256                 /* Quota files can't be created from the kernel any more,
2257                  * 'tune2fs -O quota' will take care of creating them */
2258                 RETURN(-EPERM);
2259
2260         OSD_EXEC_OP(th, create);
2261
2262         result = __osd_object_create(info, obj, attr, hint, dof, th);
2263         /* objects under osd root shld have igif fid, so dont add fid EA */
2264         /* For ost object, the fid will be stored during first write */
2265         if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL &&
2266             !fid_is_on_ost(info, osd_dt_dev(th->th_dev), fid))
2267                 result = osd_ea_fid_set(env, dt, fid);
2268
2269         if (result == 0)
2270                 result = __osd_oi_insert(env, obj, fid, th);
2271
2272         LASSERT(ergo(result == 0, dt_object_exists(dt)));
2273         LINVRNT(osd_invariant(obj));
2274         RETURN(result);
2275 }
2276
2277 static int osd_declare_object_ref_add(const struct lu_env *env,
2278                                       struct dt_object *dt,
2279                                       struct thandle *handle)
2280 {
2281         struct osd_thandle       *oh;
2282
2283         /* it's possible that object doesn't exist yet */
2284         LASSERT(handle != NULL);
2285
2286         oh = container_of0(handle, struct osd_thandle, ot_super);
2287         LASSERT(oh->ot_handle == NULL);
2288
2289         OSD_DECLARE_OP(oh, ref_add, osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2290
2291         return 0;
2292 }
2293
2294 /*
2295  * Concurrency: @dt is write locked.
2296  */
2297 static int osd_object_ref_add(const struct lu_env *env,
2298                               struct dt_object *dt, struct thandle *th)
2299 {
2300         struct osd_object *obj = osd_dt_obj(dt);
2301         struct inode      *inode = obj->oo_inode;
2302
2303         LINVRNT(osd_invariant(obj));
2304         LASSERT(dt_object_exists(dt));
2305         LASSERT(osd_write_locked(env, obj));
2306         LASSERT(th != NULL);
2307
2308         OSD_EXEC_OP(th, ref_add);
2309
2310         /*
2311          * DIR_NLINK feature is set for compatibility reasons if:
2312          * 1) nlinks > LDISKFS_LINK_MAX, or
2313          * 2) nlinks == 2, since this indicates i_nlink was previously 1.
2314          *
2315          * It is easier to always set this flag (rather than check and set),
2316          * since it has less overhead, and the superblock will be dirtied
2317          * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
2318          * do not actually care whether this flag is set or not.
2319          */
2320         spin_lock(&obj->oo_guard);
2321         /* inc_nlink from 0 may cause WARN_ON */
2322         if(inode->i_nlink == 0)
2323                 set_nlink(inode, 1);
2324         else
2325                 inc_nlink(inode);
2326         if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
2327                 if (inode->i_nlink >= LDISKFS_LINK_MAX ||
2328                     inode->i_nlink == 2)
2329                         set_nlink(inode, 1);
2330         }
2331         LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2332         spin_unlock(&obj->oo_guard);
2333         inode->i_sb->s_op->dirty_inode(inode);
2334         LINVRNT(osd_invariant(obj));
2335
2336         return 0;
2337 }
2338
2339 static int osd_declare_object_ref_del(const struct lu_env *env,
2340                                       struct dt_object *dt,
2341                                       struct thandle *handle)
2342 {
2343         struct osd_thandle *oh;
2344
2345         LASSERT(dt_object_exists(dt));
2346         LASSERT(handle != NULL);
2347
2348         oh = container_of0(handle, struct osd_thandle, ot_super);
2349         LASSERT(oh->ot_handle == NULL);
2350
2351         OSD_DECLARE_OP(oh, ref_del, osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2352
2353         return 0;
2354 }
2355
2356 /*
2357  * Concurrency: @dt is write locked.
2358  */
2359 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2360                               struct thandle *th)
2361 {
2362         struct osd_object *obj = osd_dt_obj(dt);
2363         struct inode      *inode = obj->oo_inode;
2364
2365         LINVRNT(osd_invariant(obj));
2366         LASSERT(dt_object_exists(dt));
2367         LASSERT(osd_write_locked(env, obj));
2368         LASSERT(th != NULL);
2369
2370         OSD_EXEC_OP(th, ref_del);
2371
2372         spin_lock(&obj->oo_guard);
2373         LASSERT(inode->i_nlink > 0);
2374         drop_nlink(inode);
2375         /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
2376          * then the nlink count is 1. Don't let it be set to 0 or the directory
2377          * inode will be deleted incorrectly. */
2378         if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
2379                 set_nlink(inode, 1);
2380         spin_unlock(&obj->oo_guard);
2381         inode->i_sb->s_op->dirty_inode(inode);
2382         LINVRNT(osd_invariant(obj));
2383
2384         return 0;
2385 }
2386
2387 /*
2388  * Get the 64-bit version for an inode.
2389  */
2390 static int osd_object_version_get(const struct lu_env *env,
2391                                   struct dt_object *dt, dt_obj_version_t *ver)
2392 {
2393         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2394
2395         CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2396                LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2397         *ver = LDISKFS_I(inode)->i_fs_version;
2398         return 0;
2399 }
2400
2401 /*
2402  * Concurrency: @dt is read locked.
2403  */
2404 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2405                          struct lu_buf *buf, const char *name,
2406                          struct lustre_capa *capa)
2407 {
2408         struct osd_object      *obj    = osd_dt_obj(dt);
2409         struct inode           *inode  = obj->oo_inode;
2410         struct osd_thread_info *info   = osd_oti_get(env);
2411         struct dentry          *dentry = &info->oti_obj_dentry;
2412
2413         /* version get is not real XATTR but uses xattr API */
2414         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2415                 /* for version we are just using xattr API but change inode
2416                  * field instead */
2417                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2418                 osd_object_version_get(env, dt, buf->lb_buf);
2419                 return sizeof(dt_obj_version_t);
2420         }
2421
2422         LASSERT(dt_object_exists(dt));
2423         LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
2424
2425         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2426                 return -EACCES;
2427
2428         dentry->d_inode = inode;
2429         return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
2430 }
2431
2432
2433 static int osd_declare_xattr_set(const struct lu_env *env,
2434                                  struct dt_object *dt,
2435                                  const struct lu_buf *buf, const char *name,
2436                                  int fl, struct thandle *handle)
2437 {
2438         struct osd_thandle *oh;
2439
2440         LASSERT(handle != NULL);
2441
2442         oh = container_of0(handle, struct osd_thandle, ot_super);
2443         LASSERT(oh->ot_handle == NULL);
2444
2445         OSD_DECLARE_OP(oh, xattr_set, strcmp(name, XATTR_NAME_VERSION) == 0 ?
2446                        osd_dto_credits_noquota[DTO_ATTR_SET_BASE] :
2447                        osd_dto_credits_noquota[DTO_XATTR_SET]);
2448
2449         return 0;
2450 }
2451
2452 /*
2453  * Set the 64-bit version for object
2454  */
2455 static void osd_object_version_set(const struct lu_env *env,
2456                                    struct dt_object *dt,
2457                                    dt_obj_version_t *new_version)
2458 {
2459         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2460
2461         CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
2462                *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2463
2464         LDISKFS_I(inode)->i_fs_version = *new_version;
2465         /** Version is set after all inode operations are finished,
2466          *  so we should mark it dirty here */
2467         inode->i_sb->s_op->dirty_inode(inode);
2468 }
2469
2470 /*
2471  * Concurrency: @dt is write locked.
2472  */
2473 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2474                          const struct lu_buf *buf, const char *name, int fl,
2475                          struct thandle *handle, struct lustre_capa *capa)
2476 {
2477         LASSERT(handle != NULL);
2478
2479         /* version set is not real XATTR */
2480         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2481                 /* for version we are just using xattr API but change inode
2482                  * field instead */
2483                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2484                 osd_object_version_set(env, dt, buf->lb_buf);
2485                 return sizeof(dt_obj_version_t);
2486         }
2487
2488         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2489                 return -EACCES;
2490
2491         OSD_EXEC_OP(handle, xattr_set);
2492         return __osd_xattr_set(env, dt, buf, name, fl);
2493 }
2494
2495 /*
2496  * Concurrency: @dt is read locked.
2497  */
2498 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
2499                           struct lu_buf *buf, struct lustre_capa *capa)
2500 {
2501         struct osd_object      *obj    = osd_dt_obj(dt);
2502         struct inode           *inode  = obj->oo_inode;
2503         struct osd_thread_info *info   = osd_oti_get(env);
2504         struct dentry          *dentry = &info->oti_obj_dentry;
2505
2506         LASSERT(dt_object_exists(dt));
2507         LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
2508         LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2509
2510         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2511                 return -EACCES;
2512
2513         dentry->d_inode = inode;
2514         return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
2515 }
2516
2517 static int osd_declare_xattr_del(const struct lu_env *env,
2518                                  struct dt_object *dt, const char *name,
2519                                  struct thandle *handle)
2520 {
2521         struct osd_thandle *oh;
2522
2523         LASSERT(dt_object_exists(dt));
2524         LASSERT(handle != NULL);
2525
2526         oh = container_of0(handle, struct osd_thandle, ot_super);
2527         LASSERT(oh->ot_handle == NULL);
2528
2529         OSD_DECLARE_OP(oh, xattr_set, osd_dto_credits_noquota[DTO_XATTR_SET]);
2530
2531         return 0;
2532 }
2533
2534 /*
2535  * Concurrency: @dt is write locked.
2536  */
2537 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
2538                          const char *name, struct thandle *handle,
2539                          struct lustre_capa *capa)
2540 {
2541         struct osd_object      *obj    = osd_dt_obj(dt);
2542         struct inode           *inode  = obj->oo_inode;
2543         struct osd_thread_info *info   = osd_oti_get(env);
2544         struct dentry          *dentry = &info->oti_obj_dentry;
2545         int                     rc;
2546
2547         LASSERT(dt_object_exists(dt));
2548         LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
2549         LASSERT(osd_write_locked(env, obj));
2550         LASSERT(handle != NULL);
2551
2552         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2553                 return -EACCES;
2554
2555         OSD_EXEC_OP(handle, xattr_set);
2556
2557         ll_vfs_dq_init(inode);
2558         dentry->d_inode = inode;
2559         rc = inode->i_op->removexattr(dentry, name);
2560         return rc;
2561 }
2562
2563 static struct obd_capa *osd_capa_get(const struct lu_env *env,
2564                                      struct dt_object *dt,
2565                                      struct lustre_capa *old,
2566                                      __u64 opc)
2567 {
2568         struct osd_thread_info *info = osd_oti_get(env);
2569         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2570         struct osd_object *obj = osd_dt_obj(dt);
2571         struct osd_device *dev = osd_obj2dev(obj);
2572         struct lustre_capa_key *key = &info->oti_capa_key;
2573         struct lustre_capa *capa = &info->oti_capa;
2574         struct obd_capa *oc;
2575         struct md_capainfo *ci;
2576         int rc;
2577         ENTRY;
2578
2579         if (!dev->od_fl_capa)
2580                 RETURN(ERR_PTR(-ENOENT));
2581
2582         LASSERT(dt_object_exists(dt));
2583         LINVRNT(osd_invariant(obj));
2584
2585         /* renewal sanity check */
2586         if (old && osd_object_auth(env, dt, old, opc))
2587                 RETURN(ERR_PTR(-EACCES));
2588
2589         ci = md_capainfo(env);
2590         if (unlikely(!ci))
2591                 RETURN(ERR_PTR(-ENOENT));
2592
2593         switch (ci->mc_auth) {
2594         case LC_ID_NONE:
2595                 RETURN(NULL);
2596         case LC_ID_PLAIN:
2597                 capa->lc_uid = obj->oo_inode->i_uid;
2598                 capa->lc_gid = obj->oo_inode->i_gid;
2599                 capa->lc_flags = LC_ID_PLAIN;
2600                 break;
2601         case LC_ID_CONVERT: {
2602                 __u32 d[4], s[4];
2603
2604                 s[0] = obj->oo_inode->i_uid;
2605                 cfs_get_random_bytes(&(s[1]), sizeof(__u32));
2606                 s[2] = obj->oo_inode->i_gid;
2607                 cfs_get_random_bytes(&(s[3]), sizeof(__u32));
2608                 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
2609                 if (unlikely(rc))
2610                         RETURN(ERR_PTR(rc));
2611
2612                 capa->lc_uid   = ((__u64)d[1] << 32) | d[0];
2613                 capa->lc_gid   = ((__u64)d[3] << 32) | d[2];
2614                 capa->lc_flags = LC_ID_CONVERT;
2615                 break;
2616         }
2617         default:
2618                 RETURN(ERR_PTR(-EINVAL));
2619         }
2620
2621         capa->lc_fid = *fid;
2622         capa->lc_opc = opc;
2623         capa->lc_flags |= dev->od_capa_alg << 24;
2624         capa->lc_timeout = dev->od_capa_timeout;
2625         capa->lc_expiry = 0;
2626
2627         oc = capa_lookup(dev->od_capa_hash, capa, 1);
2628         if (oc) {
2629                 LASSERT(!capa_is_expired(oc));
2630                 RETURN(oc);
2631         }
2632
2633         spin_lock(&capa_lock);
2634         *key = dev->od_capa_keys[1];
2635         spin_unlock(&capa_lock);
2636
2637         capa->lc_keyid = key->lk_keyid;
2638         capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
2639
2640         rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
2641         if (rc) {
2642                 DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
2643                 RETURN(ERR_PTR(rc));
2644         }
2645
2646         oc = capa_add(dev->od_capa_hash, capa);
2647         RETURN(oc);
2648 }
2649
2650 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
2651 {
2652         struct osd_object       *obj    = osd_dt_obj(dt);
2653         struct inode            *inode  = obj->oo_inode;
2654         struct osd_thread_info  *info   = osd_oti_get(env);
2655         struct dentry           *dentry = &info->oti_obj_dentry;
2656         struct file             *file   = &info->oti_file;
2657         int                     rc;
2658
2659         ENTRY;
2660
2661         dentry->d_inode = inode;
2662         file->f_dentry = dentry;
2663         file->f_mapping = inode->i_mapping;
2664         file->f_op = inode->i_fop;
2665         mutex_lock(&inode->i_mutex);
2666         rc = file->f_op->fsync(file, dentry, 0);
2667         mutex_unlock(&inode->i_mutex);
2668         RETURN(rc);
2669 }
2670
2671 static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
2672                         void **data)
2673 {
2674         struct osd_object *obj = osd_dt_obj(dt);
2675         ENTRY;
2676
2677         *data = (void *)obj->oo_inode;
2678         RETURN(0);
2679 }
2680
2681 /*
2682  * Index operations.
2683  */
2684
2685 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
2686                            const struct dt_index_features *feat)
2687 {
2688         struct iam_descr *descr;
2689
2690         if (osd_object_is_root(o))
2691                 return feat == &dt_directory_features;
2692
2693         LASSERT(o->oo_dir != NULL);
2694
2695         descr = o->oo_dir->od_container.ic_descr;
2696         if (feat == &dt_directory_features) {
2697                 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
2698                         return 1;
2699                 else
2700                         return 0;
2701         } else {
2702                 return
2703                         feat->dif_keysize_min <= descr->id_key_size &&
2704                         descr->id_key_size <= feat->dif_keysize_max &&
2705                         feat->dif_recsize_min <= descr->id_rec_size &&
2706                         descr->id_rec_size <= feat->dif_recsize_max &&
2707                         !(feat->dif_flags & (DT_IND_VARKEY |
2708                                              DT_IND_VARREC | DT_IND_NONUNQ)) &&
2709                         ergo(feat->dif_flags & DT_IND_UPDATE,
2710                              1 /* XXX check that object (and file system) is
2711                                 * writable */);
2712         }
2713 }
2714
2715 static int osd_iam_container_init(const struct lu_env *env,
2716                                   struct osd_object *obj,
2717                                   struct osd_directory *dir)
2718 {
2719         struct iam_container *bag = &dir->od_container;
2720         int result;
2721
2722         result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
2723         if (result != 0)
2724                 return result;
2725
2726         result = iam_container_setup(bag);
2727         if (result == 0)
2728                 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
2729         else
2730                 iam_container_fini(bag);
2731
2732         return result;
2733 }
2734
2735
2736 /*
2737  * Concurrency: no external locking is necessary.
2738  */
2739 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
2740                          const struct dt_index_features *feat)
2741 {
2742         int                      result;
2743         int                      skip_iam = 0;
2744         struct osd_object       *obj = osd_dt_obj(dt);
2745
2746         LINVRNT(osd_invariant(obj));
2747         LASSERT(dt_object_exists(dt));
2748
2749         if (osd_object_is_root(obj)) {
2750                 dt->do_index_ops = &osd_index_ea_ops;
2751                 result = 0;
2752         } else if (feat == &dt_directory_features) {
2753                 dt->do_index_ops = &osd_index_ea_ops;
2754                 if (S_ISDIR(obj->oo_inode->i_mode))
2755                         result = 0;
2756                 else
2757                         result = -ENOTDIR;
2758                 skip_iam = 1;
2759         } else if (unlikely(feat == &dt_otable_features)) {
2760                 dt->do_index_ops = &osd_otable_ops;
2761                 return 0;
2762         } else if (feat == &dt_acct_features) {
2763                 dt->do_index_ops = &osd_acct_index_ops;
2764                 result = 0;
2765                 skip_iam = 1;
2766         } else if (!osd_has_index(obj)) {
2767                 struct osd_directory *dir;
2768
2769                 OBD_ALLOC_PTR(dir);
2770                 if (dir != NULL) {
2771
2772                         spin_lock(&obj->oo_guard);
2773                         if (obj->oo_dir == NULL)
2774                                 obj->oo_dir = dir;
2775                         else
2776                                 /*
2777                                  * Concurrent thread allocated container data.
2778                                  */
2779                                 OBD_FREE_PTR(dir);
2780                         spin_unlock(&obj->oo_guard);
2781                         /*
2782                          * Now, that we have container data, serialize its
2783                          * initialization.
2784                          */
2785                         down_write(&obj->oo_ext_idx_sem);
2786                         /*
2787                          * recheck under lock.
2788                          */
2789                         if (!osd_has_index(obj))
2790                                 result = osd_iam_container_init(env, obj, dir);
2791                         else
2792                                 result = 0;
2793                         up_write(&obj->oo_ext_idx_sem);
2794                 } else {
2795                         result = -ENOMEM;
2796                 }
2797         } else {
2798                 result = 0;
2799         }
2800
2801         if (result == 0 && skip_iam == 0) {
2802                 if (!osd_iam_index_probe(env, obj, feat))
2803                         result = -ENOTDIR;
2804         }
2805         LINVRNT(osd_invariant(obj));
2806
2807         if (is_quota_glb_feat(feat))
2808                 result = osd_quota_migration(env, dt, feat);
2809
2810         return result;
2811 }
2812
2813 static int osd_otable_it_attr_get(const struct lu_env *env,
2814                                  struct dt_object *dt,
2815                                  struct lu_attr *attr,
2816                                  struct lustre_capa *capa)
2817 {
2818         attr->la_valid = 0;
2819         return 0;
2820 }
2821
2822 static const struct dt_object_operations osd_obj_ops = {
2823         .do_read_lock         = osd_object_read_lock,
2824         .do_write_lock        = osd_object_write_lock,
2825         .do_read_unlock       = osd_object_read_unlock,
2826         .do_write_unlock      = osd_object_write_unlock,
2827         .do_write_locked      = osd_object_write_locked,
2828         .do_attr_get          = osd_attr_get,
2829         .do_declare_attr_set  = osd_declare_attr_set,
2830         .do_attr_set          = osd_attr_set,
2831         .do_ah_init           = osd_ah_init,
2832         .do_declare_create    = osd_declare_object_create,
2833         .do_create            = osd_object_create,
2834         .do_declare_destroy   = osd_declare_object_destroy,
2835         .do_destroy           = osd_object_destroy,
2836         .do_index_try         = osd_index_try,
2837         .do_declare_ref_add   = osd_declare_object_ref_add,
2838         .do_ref_add           = osd_object_ref_add,
2839         .do_declare_ref_del   = osd_declare_object_ref_del,
2840         .do_ref_del           = osd_object_ref_del,
2841         .do_xattr_get         = osd_xattr_get,
2842         .do_declare_xattr_set = osd_declare_xattr_set,
2843         .do_xattr_set         = osd_xattr_set,
2844         .do_declare_xattr_del = osd_declare_xattr_del,
2845         .do_xattr_del         = osd_xattr_del,
2846         .do_xattr_list        = osd_xattr_list,
2847         .do_capa_get          = osd_capa_get,
2848         .do_object_sync       = osd_object_sync,
2849         .do_data_get          = osd_data_get,
2850 };
2851
2852 /**
2853  * dt_object_operations for interoperability mode
2854  * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
2855  */
2856 static const struct dt_object_operations osd_obj_ea_ops = {
2857         .do_read_lock         = osd_object_read_lock,
2858         .do_write_lock        = osd_object_write_lock,
2859         .do_read_unlock       = osd_object_read_unlock,
2860         .do_write_unlock      = osd_object_write_unlock,
2861         .do_write_locked      = osd_object_write_locked,
2862         .do_attr_get          = osd_attr_get,
2863         .do_declare_attr_set  = osd_declare_attr_set,
2864         .do_attr_set          = osd_attr_set,
2865         .do_ah_init           = osd_ah_init,
2866         .do_declare_create    = osd_declare_object_create,
2867         .do_create            = osd_object_ea_create,
2868         .do_declare_destroy   = osd_declare_object_destroy,
2869         .do_destroy           = osd_object_destroy,
2870         .do_index_try         = osd_index_try,
2871         .do_declare_ref_add   = osd_declare_object_ref_add,
2872         .do_ref_add           = osd_object_ref_add,
2873         .do_declare_ref_del   = osd_declare_object_ref_del,
2874         .do_ref_del           = osd_object_ref_del,
2875         .do_xattr_get         = osd_xattr_get,
2876         .do_declare_xattr_set = osd_declare_xattr_set,
2877         .do_xattr_set         = osd_xattr_set,
2878         .do_declare_xattr_del = osd_declare_xattr_del,
2879         .do_xattr_del         = osd_xattr_del,
2880         .do_xattr_list        = osd_xattr_list,
2881         .do_capa_get          = osd_capa_get,
2882         .do_object_sync       = osd_object_sync,
2883         .do_data_get          = osd_data_get,
2884 };
2885
2886 static const struct dt_object_operations osd_obj_otable_it_ops = {
2887         .do_attr_get    = osd_otable_it_attr_get,
2888         .do_index_try   = osd_index_try,
2889 };
2890
2891 static int osd_index_declare_iam_delete(const struct lu_env *env,
2892                                         struct dt_object *dt,
2893                                         const struct dt_key *key,
2894                                         struct thandle *handle)
2895 {
2896         struct osd_thandle    *oh;
2897
2898         oh = container_of0(handle, struct osd_thandle, ot_super);
2899         LASSERT(oh->ot_handle == NULL);
2900
2901         OSD_DECLARE_OP(oh, delete, osd_dto_credits_noquota[DTO_INDEX_DELETE]);
2902
2903         return 0;
2904 }
2905
2906 /**
2907  *      delete a (key, value) pair from index \a dt specified by \a key
2908  *
2909  *      \param  dt      osd index object
2910  *      \param  key     key for index
2911  *      \param  rec     record reference
2912  *      \param  handle  transaction handler
2913  *
2914  *      \retval  0  success
2915  *      \retval -ve   failure
2916  */
2917
2918 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
2919                                 const struct dt_key *key,
2920                                 struct thandle *handle,
2921                                 struct lustre_capa *capa)
2922 {
2923         struct osd_thread_info *oti = osd_oti_get(env);
2924         struct osd_object      *obj = osd_dt_obj(dt);
2925         struct osd_thandle     *oh;
2926         struct iam_path_descr  *ipd;
2927         struct iam_container   *bag = &obj->oo_dir->od_container;
2928         int                     rc;
2929
2930         ENTRY;
2931
2932         LINVRNT(osd_invariant(obj));
2933         LASSERT(dt_object_exists(dt));
2934         LASSERT(bag->ic_object == obj->oo_inode);
2935         LASSERT(handle != NULL);
2936
2937         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2938                 RETURN(-EACCES);
2939
2940         OSD_EXEC_OP(handle, delete);
2941
2942         ipd = osd_idx_ipd_get(env, bag);
2943         if (unlikely(ipd == NULL))
2944                 RETURN(-ENOMEM);
2945
2946         oh = container_of0(handle, struct osd_thandle, ot_super);
2947         LASSERT(oh->ot_handle != NULL);
2948         LASSERT(oh->ot_handle->h_transaction != NULL);
2949
2950         if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
2951                 /* swab quota uid/gid provided by caller */
2952                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
2953                 key = (const struct dt_key *)&oti->oti_quota_id;
2954         }
2955
2956         rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
2957         osd_ipd_put(env, bag, ipd);
2958         LINVRNT(osd_invariant(obj));
2959         RETURN(rc);
2960 }
2961
2962 static int osd_index_declare_ea_delete(const struct lu_env *env,
2963                                        struct dt_object *dt,
2964                                        const struct dt_key *key,
2965                                        struct thandle *handle)
2966 {
2967         struct osd_thandle *oh;
2968         struct inode       *inode;
2969         int                 rc;
2970         ENTRY;
2971
2972         LASSERT(dt_object_exists(dt));
2973         LASSERT(handle != NULL);
2974
2975         oh = container_of0(handle, struct osd_thandle, ot_super);
2976         LASSERT(oh->ot_handle == NULL);
2977
2978         OSD_DECLARE_OP(oh, delete, osd_dto_credits_noquota[DTO_INDEX_DELETE]);
2979
2980         inode = osd_dt_obj(dt)->oo_inode;
2981         LASSERT(inode);
2982
2983         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
2984                                    true, true, NULL, false);
2985         RETURN(rc);
2986 }
2987
2988 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
2989                                           struct dt_rec *fid)
2990 {
2991         struct osd_fid_pack *rec;
2992         int                  rc = -ENODATA;
2993
2994         if (de->file_type & LDISKFS_DIRENT_LUFID) {
2995                 rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
2996                 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
2997         }
2998         RETURN(rc);
2999 }
3000
3001 /**
3002  * Index delete function for interoperability mode (b11826).
3003  * It will remove the directory entry added by osd_index_ea_insert().
3004  * This entry is needed to maintain name->fid mapping.
3005  *
3006  * \param key,  key i.e. file entry to be deleted
3007  *
3008  * \retval   0, on success
3009  * \retval -ve, on error
3010  */
3011 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
3012                                const struct dt_key *key,
3013                                struct thandle *handle,
3014                                struct lustre_capa *capa)
3015 {
3016         struct osd_object          *obj    = osd_dt_obj(dt);
3017         struct inode               *dir    = obj->oo_inode;
3018         struct dentry              *dentry;
3019         struct osd_thandle         *oh;
3020         struct ldiskfs_dir_entry_2 *de;
3021         struct buffer_head         *bh;
3022         struct htree_lock          *hlock = NULL;
3023         int                         rc;
3024
3025         ENTRY;
3026
3027         LINVRNT(osd_invariant(obj));
3028         LASSERT(dt_object_exists(dt));
3029         LASSERT(handle != NULL);
3030
3031         OSD_EXEC_OP(handle, delete);
3032
3033         oh = container_of(handle, struct osd_thandle, ot_super);
3034         LASSERT(oh->ot_handle != NULL);
3035         LASSERT(oh->ot_handle->h_transaction != NULL);
3036
3037         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
3038                 RETURN(-EACCES);
3039
3040         ll_vfs_dq_init(dir);
3041         dentry = osd_child_dentry_get(env, obj,
3042                                       (char *)key, strlen((char *)key));
3043
3044         if (obj->oo_hl_head != NULL) {
3045                 hlock = osd_oti_get(env)->oti_hlock;
3046                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3047                                    dir, LDISKFS_HLOCK_DEL);
3048         } else {
3049                 down_write(&obj->oo_ext_idx_sem);
3050         }
3051
3052         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
3053         if (bh) {
3054                 rc = ldiskfs_delete_entry(oh->ot_handle,
3055                                           dir, de, bh);
3056                 brelse(bh);
3057         } else {
3058                 rc = -ENOENT;
3059         }
3060         if (hlock != NULL)
3061                 ldiskfs_htree_unlock(hlock);
3062         else
3063                 up_write(&obj->oo_ext_idx_sem);
3064
3065         LASSERT(osd_invariant(obj));
3066         RETURN(rc);
3067 }
3068
3069 /**
3070  *      Lookup index for \a key and copy record to \a rec.
3071  *
3072  *      \param  dt      osd index object
3073  *      \param  key     key for index
3074  *      \param  rec     record reference
3075  *
3076  *      \retval  +ve  success : exact mach
3077  *      \retval  0    return record with key not greater than \a key
3078  *      \retval -ve   failure
3079  */
3080 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
3081                                 struct dt_rec *rec, const struct dt_key *key,
3082                                 struct lustre_capa *capa)
3083 {
3084         struct osd_object      *obj = osd_dt_obj(dt);
3085         struct iam_path_descr  *ipd;
3086         struct iam_container   *bag = &obj->oo_dir->od_container;
3087         struct osd_thread_info *oti = osd_oti_get(env);
3088         struct iam_iterator    *it = &oti->oti_idx_it;
3089         struct iam_rec         *iam_rec;
3090         int                     rc;
3091
3092         ENTRY;
3093
3094         LASSERT(osd_invariant(obj));
3095         LASSERT(dt_object_exists(dt));
3096         LASSERT(bag->ic_object == obj->oo_inode);
3097
3098         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
3099                 RETURN(-EACCES);
3100
3101         ipd = osd_idx_ipd_get(env, bag);
3102         if (IS_ERR(ipd))
3103                 RETURN(-ENOMEM);
3104
3105         /* got ipd now we can start iterator. */
3106         iam_it_init(it, bag, 0, ipd);
3107
3108         if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3109                 /* swab quota uid/gid provided by caller */
3110                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3111                 key = (const struct dt_key *)&oti->oti_quota_id;
3112         }
3113
3114         rc = iam_it_get(it, (struct iam_key *)key);
3115         if (rc >= 0) {
3116                 if (S_ISDIR(obj->oo_inode->i_mode))
3117                         iam_rec = (struct iam_rec *)oti->oti_ldp;
3118                 else
3119                         iam_rec = (struct iam_rec *) rec;
3120
3121                 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
3122
3123                 if (S_ISDIR(obj->oo_inode->i_mode))
3124                         osd_fid_unpack((struct lu_fid *) rec,
3125                                        (struct osd_fid_pack *)iam_rec);
3126                 else if (fid_is_quota(lu_object_fid(&dt->do_lu)))
3127                         osd_quota_unpack(obj, rec);
3128         }
3129
3130         iam_it_put(it);
3131         iam_it_fini(it);
3132         osd_ipd_put(env, bag, ipd);
3133
3134         LINVRNT(osd_invariant(obj));
3135
3136         RETURN(rc);
3137 }
3138
3139 static int osd_index_declare_iam_insert(const struct lu_env *env,
3140                                         struct dt_object *dt,
3141                                         const struct dt_rec *rec,
3142                                         const struct dt_key *key,
3143                                         struct thandle *handle)
3144 {
3145         struct osd_thandle *oh;
3146
3147         LASSERT(dt_object_exists(dt));
3148         LASSERT(handle != NULL);
3149
3150         oh = container_of0(handle, struct osd_thandle, ot_super);
3151         LASSERT(oh->ot_handle == NULL);
3152
3153         OSD_DECLARE_OP(oh, insert, osd_dto_credits_noquota[DTO_INDEX_INSERT]);
3154
3155         return 0;
3156 }
3157
3158 /**
3159  *      Inserts (key, value) pair in \a dt index object.
3160  *
3161  *      \param  dt      osd index object
3162  *      \param  key     key for index
3163  *      \param  rec     record reference
3164  *      \param  th      transaction handler
3165  *
3166  *      \retval  0  success
3167  *      \retval -ve failure
3168  */
3169 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
3170                                 const struct dt_rec *rec,
3171                                 const struct dt_key *key, struct thandle *th,
3172                                 struct lustre_capa *capa, int ignore_quota)
3173 {
3174         struct osd_object     *obj = osd_dt_obj(dt);
3175         struct iam_path_descr *ipd;
3176         struct osd_thandle    *oh;
3177         struct iam_container  *bag = &obj->oo_dir->od_container;
3178         struct osd_thread_info *oti = osd_oti_get(env);
3179         struct iam_rec         *iam_rec;
3180         int                     rc;
3181
3182         ENTRY;
3183
3184         LINVRNT(osd_invariant(obj));
3185         LASSERT(dt_object_exists(dt));
3186         LASSERT(bag->ic_object == obj->oo_inode);
3187         LASSERT(th != NULL);
3188
3189         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3190                 RETURN(-EACCES);
3191
3192         OSD_EXEC_OP(th, insert);
3193
3194         ipd = osd_idx_ipd_get(env, bag);
3195         if (unlikely(ipd == NULL))
3196                 RETURN(-ENOMEM);
3197
3198         oh = container_of0(th, struct osd_thandle, ot_super);
3199         LASSERT(oh->ot_handle != NULL);
3200         LASSERT(oh->ot_handle->h_transaction != NULL);
3201         if (S_ISDIR(obj->oo_inode->i_mode)) {
3202                 iam_rec = (struct iam_rec *)oti->oti_ldp;
3203                 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
3204         } else if (fid_is_quota(lu_object_fid(&dt->do_lu))) {
3205                 /* pack quota uid/gid */
3206                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3207                 key = (const struct dt_key *)&oti->oti_quota_id;
3208                 /* pack quota record */
3209                 rec = osd_quota_pack(obj, rec, &oti->oti_quota_rec);
3210                 iam_rec = (struct iam_rec *)rec;
3211         } else {
3212                 iam_rec = (struct iam_rec *)rec;
3213         }
3214
3215         rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
3216                         iam_rec, ipd);
3217         osd_ipd_put(env, bag, ipd);
3218         LINVRNT(osd_invariant(obj));
3219         RETURN(rc);
3220 }
3221
3222 /**
3223  * Calls ldiskfs_add_entry() to add directory entry
3224  * into the directory. This is required for
3225  * interoperability mode (b11826)
3226  *
3227  * \retval   0, on success
3228  * \retval -ve, on error
3229  */
3230 static int __osd_ea_add_rec(struct osd_thread_info *info,
3231                             struct osd_object *pobj, struct inode  *cinode,
3232                             const char *name, const struct dt_rec *fid,
3233                             struct htree_lock *hlock, struct thandle *th)
3234 {
3235         struct ldiskfs_dentry_param *ldp;
3236         struct dentry               *child;
3237         struct osd_thandle          *oth;
3238         int                          rc;
3239
3240         oth = container_of(th, struct osd_thandle, ot_super);
3241         LASSERT(oth->ot_handle != NULL);
3242         LASSERT(oth->ot_handle->h_transaction != NULL);
3243
3244         child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
3245
3246         /* XXX: remove fid_is_igif() check here.
3247          * IGIF check is just to handle insertion of .. when it is 'ROOT',
3248          * it is IGIF now but needs FID in dir entry as well for readdir
3249          * to work.
3250          * LU-838 should fix that and remove fid_is_igif() check */
3251         if (fid_is_igif((struct lu_fid *)fid) ||
3252             fid_is_norm((struct lu_fid *)fid)) {
3253                 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3254                 osd_get_ldiskfs_dirent_param(ldp, fid);
3255                 child->d_fsdata = (void *)ldp;
3256         } else {
3257                 child->d_fsdata = NULL;
3258         }
3259         LASSERT(pobj->oo_inode);
3260         ll_vfs_dq_init(pobj->oo_inode);
3261         rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
3262
3263         RETURN(rc);
3264 }
3265
3266 /**
3267  * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
3268  * into the directory.Also sets flags into osd object to
3269  * indicate dot and dotdot are created. This is required for
3270  * interoperability mode (b11826)
3271  *
3272  * \param dir   directory for dot and dotdot fixup.
3273  * \param obj   child object for linking
3274  *
3275  * \retval   0, on success
3276  * \retval -ve, on error
3277  */
3278 static int osd_add_dot_dotdot(struct osd_thread_info *info,
3279                               struct osd_object *dir,
3280                               struct inode  *parent_dir, const char *name,
3281                               const struct dt_rec *dot_fid,
3282                               const struct dt_rec *dot_dot_fid,
3283                               struct thandle *th)
3284 {
3285         struct inode                *inode = dir->oo_inode;
3286         struct ldiskfs_dentry_param *dot_ldp;
3287         struct ldiskfs_dentry_param *dot_dot_ldp;
3288         struct osd_thandle          *oth;
3289         int result = 0;
3290
3291         oth = container_of(th, struct osd_thandle, ot_super);
3292         LASSERT(oth->ot_handle->h_transaction != NULL);
3293         LASSERT(S_ISDIR(dir->oo_inode->i_mode));
3294
3295         if (strcmp(name, dot) == 0) {
3296                 if (dir->oo_compat_dot_created) {
3297                         result = -EEXIST;
3298                 } else {
3299                         LASSERT(inode == parent_dir);
3300                         dir->oo_compat_dot_created = 1;
3301                         result = 0;
3302                 }
3303         } else if(strcmp(name, dotdot) == 0) {
3304                 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3305                 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3306
3307                 if (!dir->oo_compat_dot_created)
3308                         return -EINVAL;
3309                 if (!fid_is_igif((struct lu_fid *)dot_fid)) {
3310                         osd_get_ldiskfs_dirent_param(dot_ldp, dot_fid);
3311                         osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3312                 } else {
3313                         dot_ldp = NULL;
3314                         dot_dot_ldp = NULL;
3315                 }
3316                 /* in case of rename, dotdot is already created */
3317                 if (dir->oo_compat_dotdot_created) {
3318                         return __osd_ea_add_rec(info, dir, parent_dir, name,
3319                                                 dot_dot_fid, NULL, th);
3320                 }
3321
3322                 result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3323                                                 inode, dot_ldp, dot_dot_ldp);
3324                 if (result == 0)
3325                        dir->oo_compat_dotdot_created = 1;
3326         }
3327
3328         return result;
3329 }
3330
3331
3332 /**
3333  * It will call the appropriate osd_add* function and return the
3334  * value, return by respective functions.
3335  */
3336 static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
3337                           struct inode *cinode, const char *name,
3338                           const struct dt_rec *fid, struct thandle *th)
3339 {
3340         struct osd_thread_info *info   = osd_oti_get(env);
3341         struct htree_lock      *hlock;
3342         int                     rc;
3343
3344         hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
3345
3346         if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' &&
3347                                                    name[2] =='\0'))) {
3348                 if (hlock != NULL) {
3349                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3350                                            pobj->oo_inode, 0);
3351                 } else {
3352                         down_write(&pobj->oo_ext_idx_sem);
3353                 }
3354                 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
3355                      (struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
3356                                         fid, th);
3357         } else {
3358                 if (hlock != NULL) {
3359                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3360                                            pobj->oo_inode, LDISKFS_HLOCK_ADD);
3361                 } else {
3362                         down_write(&pobj->oo_ext_idx_sem);
3363                 }
3364
3365                 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
3366                                       hlock, th);
3367         }
3368         if (hlock != NULL)
3369                 ldiskfs_htree_unlock(hlock);
3370         else
3371                 up_write(&pobj->oo_ext_idx_sem);
3372
3373         return rc;
3374 }
3375
3376 static void
3377 osd_consistency_check(struct osd_thread_info *oti, struct osd_device *dev,
3378                       struct osd_idmap_cache *oic)
3379 {
3380         struct osd_scrub    *scrub = &dev->od_scrub;
3381         struct lu_fid       *fid   = &oic->oic_fid;
3382         struct osd_inode_id *id    = &oti->oti_id;
3383         int                  once  = 0;
3384         int                  rc;
3385         ENTRY;
3386
3387         if (!fid_is_norm(fid) && !fid_is_igif(fid))
3388                 RETURN_EXIT;
3389
3390 again:
3391         rc = osd_oi_lookup(oti, dev, fid, id);
3392         if (rc != 0 && rc != -ENOENT)
3393                 RETURN_EXIT;
3394
3395         if (rc == 0 && osd_id_eq(id, &oic->oic_lid))
3396                 RETURN_EXIT;
3397
3398         if (thread_is_running(&scrub->os_thread)) {
3399                 rc = osd_oii_insert(dev, oic, rc == -ENOENT);
3400                 /* There is race condition between osd_oi_lookup and OI scrub.
3401                  * The OI scrub finished just after osd_oi_lookup() failure.
3402                  * Under such case, it is unnecessary to trigger OI scrub again,
3403                  * but try to call osd_oi_lookup() again. */
3404                 if (unlikely(rc == -EAGAIN))
3405                         goto again;
3406
3407                 RETURN_EXIT;
3408         }
3409
3410         if (!dev->od_noscrub && ++once == 1) {
3411                 CDEBUG(D_LFSCK, "Trigger OI scrub by RPC for "DFID"\n",
3412                        PFID(fid));
3413                 rc = osd_scrub_start(dev);
3414                 LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC for "DFID
3415                                ", rc = %d [2]\n",
3416                                LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
3417                                PFID(fid), rc);
3418                 if (rc == 0)
3419                         goto again;
3420         }
3421
3422         EXIT;
3423 }
3424
3425 /**
3426  * Calls ->lookup() to find dentry. From dentry get inode and
3427  * read inode's ea to get fid. This is required for  interoperability
3428  * mode (b11826)
3429  *
3430  * \retval   0, on success
3431  * \retval -ve, on error
3432  */
3433 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
3434                              struct dt_rec *rec, const struct dt_key *key)
3435 {
3436         struct inode               *dir    = obj->oo_inode;
3437         struct dentry              *dentry;
3438         struct ldiskfs_dir_entry_2 *de;
3439         struct buffer_head         *bh;
3440         struct lu_fid              *fid = (struct lu_fid *) rec;
3441         struct htree_lock          *hlock = NULL;
3442         int                         ino;
3443         int                         rc;
3444
3445         LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
3446
3447         dentry = osd_child_dentry_get(env, obj,
3448                                       (char *)key, strlen((char *)key));
3449
3450         if (obj->oo_hl_head != NULL) {
3451                 hlock = osd_oti_get(env)->oti_hlock;
3452                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3453                                    dir, LDISKFS_HLOCK_LOOKUP);
3454         } else {
3455                 down_read(&obj->oo_ext_idx_sem);
3456         }
3457
3458         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
3459         if (bh) {
3460                 struct osd_thread_info *oti = osd_oti_get(env);
3461                 struct osd_idmap_cache *oic = &oti->oti_cache;
3462                 struct osd_device *dev = osd_obj2dev(obj);
3463                 struct osd_scrub *scrub = &dev->od_scrub;
3464                 struct scrub_file *sf = &scrub->os_file;
3465
3466                 ino = le32_to_cpu(de->inode);
3467                 rc = osd_get_fid_from_dentry(de, rec);
3468
3469                 /* done with de, release bh */
3470                 brelse(bh);
3471                 if (rc != 0)
3472                         rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
3473                 else
3474                         osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
3475                 if (rc != 0) {
3476                         fid_zero(&oic->oic_fid);
3477                         GOTO(out, rc);
3478                 }
3479
3480                 oic->oic_fid = *fid;
3481                 if ((scrub->os_pos_current <= ino) &&
3482                     (sf->sf_flags & SF_INCONSISTENT ||
3483                      ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
3484                                       sf->sf_oi_bitmap)))
3485                         osd_consistency_check(oti, dev, oic);
3486         } else {
3487                 rc = -ENOENT;
3488         }
3489
3490         GOTO(out, rc);
3491
3492 out:
3493         if (hlock != NULL)
3494                 ldiskfs_htree_unlock(hlock);
3495         else
3496                 up_read(&obj->oo_ext_idx_sem);
3497         return rc;
3498 }
3499
3500 /**
3501  * Find the osd object for given fid.
3502  *
3503  * \param fid need to find the osd object having this fid
3504  *
3505  * \retval osd_object on success
3506  * \retval        -ve on error
3507  */
3508 struct osd_object *osd_object_find(const struct lu_env *env,
3509                                    struct dt_object *dt,
3510                                    const struct lu_fid *fid)
3511 {
3512         struct lu_device  *ludev = dt->do_lu.lo_dev;
3513         struct osd_object *child = NULL;
3514         struct lu_object  *luch;
3515         struct lu_object  *lo;
3516
3517         /*
3518          * at this point topdev might not exist yet
3519          * (i.e. MGS is preparing profiles). so we can
3520          * not rely on topdev and instead lookup with
3521          * our device passed as topdev. this can't work
3522          * if the object isn't cached yet (as osd doesn't
3523          * allocate lu_header). IOW, the object must be
3524          * in the cache, otherwise lu_object_alloc() crashes
3525          * -bzzz
3526          */
3527         luch = lu_object_find_at(env, ludev, fid, NULL);
3528         if (!IS_ERR(luch)) {
3529                 if (lu_object_exists(luch)) {
3530                         lo = lu_object_locate(luch->lo_header, ludev->ld_type);
3531                         if (lo != NULL)
3532                                 child = osd_obj(lo);
3533                         else
3534                                 LU_OBJECT_DEBUG(D_ERROR, env, luch,
3535                                                 "lu_object can't be located"
3536                                                 DFID"\n", PFID(fid));
3537
3538                         if (child == NULL) {
3539                                 lu_object_put(env, luch);
3540                                 CERROR("Unable to get osd_object\n");
3541                                 child = ERR_PTR(-ENOENT);
3542                         }
3543                 } else {
3544                         LU_OBJECT_DEBUG(D_ERROR, env, luch,
3545                                         "lu_object does not exists "DFID"\n",
3546                                         PFID(fid));
3547                         lu_object_put(env, luch);
3548                         child = ERR_PTR(-ENOENT);
3549                 }
3550         } else
3551                 child = (void *)luch;
3552
3553         return child;
3554 }
3555
3556 /**
3557  * Put the osd object once done with it.
3558  *
3559  * \param obj osd object that needs to be put
3560  */
3561 static inline void osd_object_put(const struct lu_env *env,
3562                                   struct osd_object *obj)
3563 {
3564         lu_object_put(env, &obj->oo_dt.do_lu);
3565 }
3566
3567 static int osd_index_declare_ea_insert(const struct lu_env *env,
3568                                        struct dt_object *dt,
3569                                        const struct dt_rec *rec,
3570                                        const struct dt_key *key,
3571                                        struct thandle *handle)
3572 {
3573         struct osd_thandle      *oh;
3574         struct inode            *inode;
3575         struct lu_fid           *fid = (struct lu_fid *)rec;
3576         int                     rc;
3577         ENTRY;
3578
3579         LASSERT(dt_object_exists(dt));
3580         LASSERT(handle != NULL);
3581
3582         oh = container_of0(handle, struct osd_thandle, ot_super);
3583         LASSERT(oh->ot_handle == NULL);
3584
3585         OSD_DECLARE_OP(oh, insert, osd_dto_credits_noquota[DTO_INDEX_INSERT]);
3586
3587         inode = osd_dt_obj(dt)->oo_inode;
3588         LASSERT(inode);
3589
3590         /* We ignore block quota on meta pool (MDTs), so needn't
3591          * calculate how many blocks will be consumed by this index
3592          * insert */
3593         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
3594                                    true, true, NULL, false);
3595         if (fid == NULL)
3596                 RETURN(0);
3597
3598         /* It does fld look up inside declare, and the result will be
3599         * added to fld cache, so the following fld lookup inside insert
3600         * does not need send RPC anymore, so avoid send rpc with holding
3601         * transaction */
3602         LASSERTF(fid_is_sane(fid), "fid is insane"DFID"\n", PFID(fid));
3603         osd_fld_lookup(env, osd_dt_dev(handle->th_dev), fid,
3604                         &osd_oti_get(env)->oti_seq_range);
3605
3606         RETURN(rc);
3607 }
3608
3609 /**
3610  * Index add function for interoperability mode (b11826).
3611  * It will add the directory entry.This entry is needed to
3612  * maintain name->fid mapping.
3613  *
3614  * \param key it is key i.e. file entry to be inserted
3615  * \param rec it is value of given key i.e. fid
3616  *
3617  * \retval   0, on success
3618  * \retval -ve, on error
3619  */
3620 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
3621                                const struct dt_rec *rec,
3622                                const struct dt_key *key, struct thandle *th,
3623                                struct lustre_capa *capa, int ignore_quota)
3624 {
3625         struct osd_object *obj   = osd_dt_obj(dt);
3626         struct lu_fid     *fid   = (struct lu_fid *) rec;
3627         const char        *name  = (const char *)key;
3628         struct osd_object *child;
3629         int                rc;
3630
3631         ENTRY;
3632
3633         LASSERT(osd_invariant(obj));
3634         LASSERT(dt_object_exists(dt));
3635         LASSERT(th != NULL);
3636
3637         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3638                 RETURN(-EACCES);
3639
3640         child = osd_object_find(env, dt, fid);
3641         if (!IS_ERR(child)) {
3642                 rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
3643                 osd_object_put(env, child);
3644         } else {
3645                 rc = PTR_ERR(child);
3646         }
3647
3648         LASSERT(osd_invariant(obj));
3649         RETURN(rc);
3650 }
3651
3652 /**
3653  *  Initialize osd Iterator for given osd index object.
3654  *
3655  *  \param  dt      osd index object
3656  */
3657
3658 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
3659                                      struct dt_object *dt,
3660                                      __u32 unused,
3661                                      struct lustre_capa *capa)
3662 {
3663         struct osd_it_iam      *it;
3664         struct osd_thread_info *oti = osd_oti_get(env);
3665         struct osd_object      *obj = osd_dt_obj(dt);
3666         struct lu_object       *lo  = &dt->do_lu;
3667         struct iam_path_descr  *ipd;
3668         struct iam_container   *bag = &obj->oo_dir->od_container;
3669
3670         LASSERT(lu_object_exists(lo));
3671
3672         if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
3673                 return ERR_PTR(-EACCES);
3674
3675         it = &oti->oti_it;
3676         ipd = osd_it_ipd_get(env, bag);
3677         if (likely(ipd != NULL)) {
3678                 it->oi_obj = obj;
3679                 it->oi_ipd = ipd;
3680                 lu_object_get(lo);
3681                 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
3682                 return (struct dt_it *)it;
3683         }
3684         return ERR_PTR(-ENOMEM);
3685 }
3686
3687 /**
3688  * free given Iterator.
3689  */
3690
3691 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
3692 {
3693         struct osd_it_iam *it = (struct osd_it_iam *)di;
3694         struct osd_object *obj = it->oi_obj;
3695
3696         iam_it_fini(&it->oi_it);
3697         osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
3698         lu_object_put(env, &obj->oo_dt.do_lu);
3699 }
3700
3701 /**
3702  *  Move Iterator to record specified by \a key
3703  *
3704  *  \param  di      osd iterator
3705  *  \param  key     key for index
3706  *
3707  *  \retval +ve  di points to record with least key not larger than key
3708  *  \retval  0   di points to exact matched key
3709  *  \retval -ve  failure
3710  */
3711
3712 static int osd_it_iam_get(const struct lu_env *env,
3713                           struct dt_it *di, const struct dt_key *key)
3714 {
3715         struct osd_thread_info  *oti = osd_oti_get(env);
3716         struct osd_it_iam       *it = (struct osd_it_iam *)di;
3717
3718         if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
3719                 /* swab quota uid/gid */
3720                 oti->oti_quota_id = cpu_to_le64(*((__u64 *)key));
3721                 key = (struct dt_key *)&oti->oti_quota_id;
3722         }
3723
3724         return iam_it_get(&it->oi_it, (const struct iam_key *)key);
3725 }
3726
3727 /**
3728  *  Release Iterator
3729  *
3730  *  \param  di      osd iterator
3731  */
3732
3733 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
3734 {
3735         struct osd_it_iam *it = (struct osd_it_iam *)di;
3736
3737         iam_it_put(&it->oi_it);
3738 }
3739
3740 /**
3741  *  Move iterator by one record
3742  *
3743  *  \param  di      osd iterator
3744  *
3745  *  \retval +1   end of container reached
3746  *  \retval  0   success
3747  *  \retval -ve  failure
3748  */
3749
3750 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
3751 {
3752         struct osd_it_iam *it = (struct osd_it_iam *)di;
3753
3754         return iam_it_next(&it->oi_it);
3755 }
3756
3757 /**
3758  * Return pointer to the key under iterator.
3759  */
3760
3761 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
3762                                  const struct dt_it *di)
3763 {
3764         struct osd_thread_info *oti = osd_oti_get(env);
3765         struct osd_it_iam      *it = (struct osd_it_iam *)di;
3766         struct osd_object      *obj = it->oi_obj;
3767         struct dt_key          *key;
3768
3769         key = (struct dt_key *)iam_it_key_get(&it->oi_it);
3770
3771         if (!IS_ERR(key) && fid_is_quota(lu_object_fid(&obj->oo_dt.do_lu))) {
3772                 /* swab quota uid/gid */
3773                 oti->oti_quota_id = le64_to_cpu(*((__u64 *)key));
3774                 key = (struct dt_key *)&oti->oti_quota_id;
3775         }
3776
3777         return key;
3778 }
3779
3780 /**
3781  * Return size of key under iterator (in bytes)
3782  */
3783
3784 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
3785 {
3786         struct osd_it_iam *it = (struct osd_it_iam *)di;
3787
3788         return iam_it_key_size(&it->oi_it);
3789 }
3790
3791 static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
3792                                        int len, __u16 type)
3793 {
3794         struct luda_type *lt;
3795         const unsigned    align = sizeof(struct luda_type) - 1;
3796
3797         /* check if file type is required */
3798         if (attr & LUDA_TYPE) {
3799                         len = (len + align) & ~align;
3800
3801                         lt = (void *) ent->lde_name + len;
3802                         lt->lt_type = cpu_to_le16(CFS_DTTOIF(type));
3803                         ent->lde_attrs |= LUDA_TYPE;
3804         }
3805
3806         ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
3807 }
3808
3809 /**
3810  * build lu direct from backend fs dirent.
3811  */
3812
3813 static inline void osd_it_pack_dirent(struct lu_dirent *ent,
3814                                       struct lu_fid *fid, __u64 offset,
3815                                       char *name, __u16 namelen,
3816                                       __u16 type, __u32 attr)
3817 {
3818         fid_cpu_to_le(&ent->lde_fid, fid);
3819         ent->lde_attrs = LUDA_FID;
3820
3821         ent->lde_hash = cpu_to_le64(offset);
3822         ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
3823
3824         strncpy(ent->lde_name, name, namelen);
3825         ent->lde_namelen = cpu_to_le16(namelen);
3826
3827         /* append lustre attributes */
3828         osd_it_append_attrs(ent, attr, namelen, type);
3829 }
3830
3831 /**
3832  * Return pointer to the record under iterator.
3833  */
3834 static int osd_it_iam_rec(const struct lu_env *env,
3835                           const struct dt_it *di,
3836                           struct dt_rec *dtrec, __u32 attr)
3837 {
3838         struct osd_it_iam      *it   = (struct osd_it_iam *)di;
3839         struct osd_thread_info *info = osd_oti_get(env);
3840         ENTRY;
3841
3842         if (S_ISDIR(it->oi_obj->oo_inode->i_mode)) {
3843                 const struct osd_fid_pack *rec;
3844                 struct lu_fid             *fid = &info->oti_fid;
3845                 struct lu_dirent          *lde = (struct lu_dirent *)dtrec;
3846                 char                      *name;
3847                 int                        namelen;
3848                 __u64                      hash;
3849                 int                        rc;
3850
3851                 name = (char *)iam_it_key_get(&it->oi_it);
3852                 if (IS_ERR(name))
3853                         RETURN(PTR_ERR(name));
3854
3855                 namelen = iam_it_key_size(&it->oi_it);
3856
3857                 rec = (const struct osd_fid_pack *)iam_it_rec_get(&it->oi_it);
3858                 if (IS_ERR(rec))
3859                         RETURN(PTR_ERR(rec));
3860
3861                 rc = osd_fid_unpack(fid, rec);
3862                 if (rc)
3863                         RETURN(rc);
3864
3865                 hash = iam_it_store(&it->oi_it);
3866
3867                 /* IAM does not store object type in IAM index (dir) */
3868                 osd_it_pack_dirent(lde, fid, hash, name, namelen,
3869                                    0, LUDA_FID);
3870         } else if (fid_is_quota(lu_object_fid(&it->oi_obj->oo_dt.do_lu))) {
3871                 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
3872                            (struct iam_rec *)dtrec);
3873                 osd_quota_unpack(it->oi_obj, dtrec);
3874         } else {
3875                 iam_reccpy(&it->oi_it.ii_path.ip_leaf,
3876                            (struct iam_rec *)dtrec);
3877         }
3878
3879         RETURN(0);
3880 }
3881
3882 /**
3883  * Returns cookie for current Iterator position.
3884  */
3885 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
3886 {
3887         struct osd_it_iam *it = (struct osd_it_iam *)di;
3888
3889         return iam_it_store(&it->oi_it);
3890 }
3891
3892 /**
3893  * Restore iterator from cookie.
3894  *
3895  * \param  di      osd iterator
3896  * \param  hash    Iterator location cookie
3897  *
3898  * \retval +ve  di points to record with least key not larger than key.
3899  * \retval  0   di points to exact matched key
3900  * \retval -ve  failure
3901  */
3902
3903 static int osd_it_iam_load(const struct lu_env *env,
3904                            const struct dt_it *di, __u64 hash)
3905 {
3906         struct osd_it_iam *it = (struct osd_it_iam *)di;
3907
3908         return iam_it_load(&it->oi_it, hash);
3909 }
3910
3911 static const struct dt_index_operations osd_index_iam_ops = {
3912         .dio_lookup         = osd_index_iam_lookup,
3913         .dio_declare_insert = osd_index_declare_iam_insert,
3914         .dio_insert         = osd_index_iam_insert,
3915         .dio_declare_delete = osd_index_declare_iam_delete,
3916         .dio_delete         = osd_index_iam_delete,
3917         .dio_it     = {
3918                 .init     = osd_it_iam_init,
3919                 .fini     = osd_it_iam_fini,
3920                 .get      = osd_it_iam_get,
3921                 .put      = osd_it_iam_put,
3922                 .next     = osd_it_iam_next,
3923                 .key      = osd_it_iam_key,
3924                 .key_size = osd_it_iam_key_size,
3925                 .rec      = osd_it_iam_rec,
3926                 .store    = osd_it_iam_store,
3927                 .load     = osd_it_iam_load
3928         }
3929 };
3930
3931
3932 /**
3933  * Creates or initializes iterator context.
3934  *
3935  * \retval struct osd_it_ea, iterator structure on success
3936  *
3937  */
3938 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
3939                                     struct dt_object *dt,
3940                                     __u32 attr,
3941                                     struct lustre_capa *capa)
3942 {
3943         struct osd_object       *obj  = osd_dt_obj(dt);
3944         struct osd_thread_info  *info = osd_oti_get(env);
3945         struct osd_it_ea        *it   = &info->oti_it_ea;
3946         struct lu_object        *lo   = &dt->do_lu;
3947         struct dentry           *obj_dentry = &info->oti_it_dentry;
3948         ENTRY;
3949         LASSERT(lu_object_exists(lo));
3950
3951         obj_dentry->d_inode = obj->oo_inode;
3952         obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
3953         obj_dentry->d_name.hash = 0;
3954
3955         it->oie_rd_dirent       = 0;
3956         it->oie_it_dirent       = 0;
3957         it->oie_dirent          = NULL;
3958         it->oie_buf             = info->oti_it_ea_buf;
3959         it->oie_obj             = obj;
3960         it->oie_file.f_pos      = 0;
3961         it->oie_file.f_dentry   = obj_dentry;
3962         if (attr & LUDA_64BITHASH)
3963                 it->oie_file.f_mode |= FMODE_64BITHASH;
3964         else
3965                 it->oie_file.f_mode |= FMODE_32BITHASH;
3966         it->oie_file.f_mapping    = obj->oo_inode->i_mapping;
3967         it->oie_file.f_op         = obj->oo_inode->i_fop;
3968         it->oie_file.private_data = NULL;
3969         lu_object_get(lo);
3970         RETURN((struct dt_it *) it);
3971 }
3972
3973 /**
3974  * Destroy or finishes iterator context.
3975  *
3976  * \param di iterator structure to be destroyed
3977  */
3978 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
3979 {
3980         struct osd_it_ea     *it   = (struct osd_it_ea *)di;
3981         struct osd_object    *obj  = it->oie_obj;
3982         struct inode       *inode  = obj->oo_inode;
3983
3984         ENTRY;
3985         it->oie_file.f_op->release(inode, &it->oie_file);
3986         lu_object_put(env, &obj->oo_dt.do_lu);
3987         EXIT;
3988 }
3989
3990 /**
3991  * It position the iterator at given key, so that next lookup continues from
3992  * that key Or it is similar to dio_it->load() but based on a key,
3993  * rather than file position.
3994  *
3995  * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
3996  * to the beginning.
3997  *
3998  * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
3999  */
4000 static int osd_it_ea_get(const struct lu_env *env,
4001                          struct dt_it *di, const struct dt_key *key)
4002 {
4003         struct osd_it_ea     *it   = (struct osd_it_ea *)di;
4004
4005         ENTRY;
4006         LASSERT(((const char *)key)[0] == '\0');
4007         it->oie_file.f_pos      = 0;
4008         it->oie_rd_dirent       = 0;
4009         it->oie_it_dirent       = 0;
4010         it->oie_dirent          = NULL;
4011
4012         RETURN(+1);
4013 }
4014
4015 /**
4016  * Does nothing
4017  */
4018 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
4019 {
4020 }
4021
4022 /**
4023  * It is called internally by ->readdir(). It fills the
4024  * iterator's in-memory data structure with required
4025  * information i.e. name, namelen, rec_size etc.
4026  *
4027  * \param buf in which information to be filled in.
4028  * \param name name of the file in given dir
4029  *
4030  * \retval 0 on success
4031  * \retval 1 on buffer full
4032  */
4033 static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
4034                                loff_t offset, __u64 ino,
4035                                unsigned d_type)
4036 {
4037         struct osd_it_ea        *it   = (struct osd_it_ea *)buf;
4038         struct osd_it_ea_dirent *ent  = it->oie_dirent;
4039         struct lu_fid           *fid  = &ent->oied_fid;
4040         struct osd_fid_pack     *rec;
4041         ENTRY;
4042
4043         /* this should never happen */
4044         if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
4045                 CERROR("ldiskfs return invalid namelen %d\n", namelen);
4046                 RETURN(-EIO);
4047         }
4048
4049         if ((void *) ent - it->oie_buf + sizeof(*ent) + namelen >
4050             OSD_IT_EA_BUFSIZE)
4051                 RETURN(1);
4052
4053         if (d_type & LDISKFS_DIRENT_LUFID) {
4054                 rec = (struct osd_fid_pack*) (name + namelen + 1);
4055
4056                 if (osd_fid_unpack(fid, rec) != 0)
4057                         fid_zero(fid);
4058
4059                 d_type &= ~LDISKFS_DIRENT_LUFID;
4060         } else {
4061                 fid_zero(fid);
4062         }
4063
4064         ent->oied_ino     = ino;
4065         ent->oied_off     = offset;
4066         ent->oied_namelen = namelen;
4067         ent->oied_type    = d_type;
4068
4069         memcpy(ent->oied_name, name, namelen);
4070
4071         it->oie_rd_dirent++;
4072         it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
4073         RETURN(0);
4074 }
4075
4076 /**
4077  * Calls ->readdir() to load a directory entry at a time
4078  * and stored it in iterator's in-memory data structure.
4079  *
4080  * \param di iterator's in memory structure
4081  *
4082  * \retval   0 on success
4083  * \retval -ve on error
4084  */
4085 static int osd_ldiskfs_it_fill(const struct lu_env *env,
4086                                const struct dt_it *di)
4087 {
4088         struct osd_it_ea   *it    = (struct osd_it_ea *)di;
4089         struct osd_object  *obj   = it->oie_obj;
4090         struct inode       *inode = obj->oo_inode;
4091         struct htree_lock  *hlock = NULL;
4092         int                 result = 0;
4093
4094         ENTRY;
4095         it->oie_dirent = it->oie_buf;
4096         it->oie_rd_dirent = 0;
4097
4098         if (obj->oo_hl_head != NULL) {
4099                 hlock = osd_oti_get(env)->oti_hlock;
4100                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
4101                                    inode, LDISKFS_HLOCK_READDIR);
4102         } else {
4103                 down_read(&obj->oo_ext_idx_sem);
4104         }
4105
4106         result = inode->i_fop->readdir(&it->oie_file, it,
4107                                        (filldir_t) osd_ldiskfs_filldir);
4108
4109         if (hlock != NULL)
4110                 ldiskfs_htree_unlock(hlock);
4111         else
4112                 up_read(&obj->oo_ext_idx_sem);
4113
4114         if (it->oie_rd_dirent == 0) {
4115                 result = -EIO;
4116         } else {
4117                 it->oie_dirent = it->oie_buf;
4118                 it->oie_it_dirent = 1;
4119         }
4120
4121         RETURN(result);
4122 }
4123
4124 /**
4125  * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4126  * to load a directory entry at a time and stored it in
4127  * iterator's in-memory data structure.
4128  *
4129  * \param di iterator's in memory structure
4130  *
4131  * \retval +ve iterator reached to end
4132  * \retval   0 iterator not reached to end
4133  * \retval -ve on error
4134  */
4135 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
4136 {
4137         struct osd_it_ea *it = (struct osd_it_ea *)di;
4138         int rc;
4139
4140         ENTRY;
4141
4142         if (it->oie_it_dirent < it->oie_rd_dirent) {
4143                 it->oie_dirent =
4144                         (void *) it->oie_dirent +
4145                         cfs_size_round(sizeof(struct osd_it_ea_dirent) +
4146                                        it->oie_dirent->oied_namelen);
4147                 it->oie_it_dirent++;
4148                 RETURN(0);
4149         } else {
4150                 if (it->oie_file.f_pos == ldiskfs_get_htree_eof(&it->oie_file))
4151                         rc = +1;
4152                 else
4153                         rc = osd_ldiskfs_it_fill(env, di);
4154         }
4155
4156         RETURN(rc);
4157 }
4158
4159 /**
4160  * Returns the key at current position from iterator's in memory structure.
4161  *
4162  * \param di iterator's in memory structure
4163  *
4164  * \retval key i.e. struct dt_key on success
4165  */
4166 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
4167                                     const struct dt_it *di)
4168 {
4169         struct osd_it_ea *it = (struct osd_it_ea *)di;
4170
4171         return (struct dt_key *)it->oie_dirent->oied_name;
4172 }
4173
4174 /**
4175  * Returns the key's size at current position from iterator's in memory structure.
4176  *
4177  * \param di iterator's in memory structure
4178  *
4179  * \retval key_size i.e. struct dt_key on success
4180  */
4181 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
4182 {
4183         struct osd_it_ea *it = (struct osd_it_ea *)di;
4184
4185         return it->oie_dirent->oied_namelen;
4186 }
4187
4188
4189 /**
4190  * Returns the value (i.e. fid/igif) at current position from iterator's
4191  * in memory structure.
4192  *
4193  * \param di struct osd_it_ea, iterator's in memory structure
4194  * \param attr attr requested for dirent.
4195  * \param lde lustre dirent
4196  *
4197  * \retval   0 no error and \param lde has correct lustre dirent.
4198  * \retval -ve on error
4199  */
4200 static inline int osd_it_ea_rec(const struct lu_env *env,
4201                                 const struct dt_it *di,
4202                                 struct dt_rec *dtrec, __u32 attr)
4203 {
4204         struct osd_it_ea       *it    = (struct osd_it_ea *)di;
4205         struct osd_object      *obj   = it->oie_obj;
4206         struct osd_device      *dev   = osd_obj2dev(obj);
4207         struct osd_scrub       *scrub = &dev->od_scrub;
4208         struct scrub_file      *sf    = &scrub->os_file;
4209         struct osd_thread_info *oti   = osd_oti_get(env);
4210         struct osd_idmap_cache *oic   = &oti->oti_cache;
4211         struct lu_fid          *fid   = &it->oie_dirent->oied_fid;
4212         struct lu_dirent       *lde   = (struct lu_dirent *)dtrec;
4213         __u32                   ino   = it->oie_dirent->oied_ino;
4214         int                     rc    = 0;
4215         ENTRY;
4216
4217         if (!fid_is_sane(fid)) {
4218                 rc = osd_ea_fid_get(env, obj, ino, fid, &oic->oic_lid);
4219                 if (rc != 0) {
4220                         fid_zero(&oic->oic_fid);
4221                         RETURN(rc);
4222                 }
4223         } else {
4224                 osd_id_gen(&oic->oic_lid, ino, OSD_OII_NOGEN);
4225         }
4226
4227         osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
4228                            it->oie_dirent->oied_name,
4229                            it->oie_dirent->oied_namelen,
4230                            it->oie_dirent->oied_type, attr);
4231         oic->oic_fid = *fid;
4232         if ((scrub->os_pos_current <= ino) &&
4233             (sf->sf_flags & SF_INCONSISTENT ||
4234              ldiskfs_test_bit(osd_oi_fid2idx(dev, fid), sf->sf_oi_bitmap)))
4235                 osd_consistency_check(oti, dev, oic);
4236
4237         RETURN(rc);
4238 }
4239
4240 /**
4241  * Returns a cookie for current position of the iterator head, so that
4242  * user can use this cookie to load/start the iterator next time.
4243  *
4244  * \param di iterator's in memory structure
4245  *
4246  * \retval cookie for current position, on success
4247  */
4248 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
4249 {
4250         struct osd_it_ea *it = (struct osd_it_ea *)di;
4251
4252         return it->oie_dirent->oied_off;
4253 }
4254
4255 /**
4256  * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4257  * to load a directory entry at a time and stored it i inn,
4258  * in iterator's in-memory data structure.
4259  *
4260  * \param di struct osd_it_ea, iterator's in memory structure
4261  *
4262  * \retval +ve on success
4263  * \retval -ve on error
4264  */
4265 static int osd_it_ea_load(const struct lu_env *env,
4266                           const struct dt_it *di, __u64 hash)
4267 {
4268         struct osd_it_ea *it = (struct osd_it_ea *)di;
4269         int rc;
4270
4271         ENTRY;
4272         it->oie_file.f_pos = hash;
4273
4274         rc =  osd_ldiskfs_it_fill(env, di);
4275         if (rc == 0)
4276                 rc = +1;
4277
4278         RETURN(rc);
4279 }
4280
4281 /**
4282  * Index lookup function for interoperability mode (b11826).
4283  *
4284  * \param key,  key i.e. file name to be searched
4285  *
4286  * \retval +ve, on success
4287  * \retval -ve, on error
4288  */
4289 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
4290                                struct dt_rec *rec, const struct dt_key *key,
4291                                struct lustre_capa *capa)
4292 {
4293         struct osd_object *obj = osd_dt_obj(dt);
4294         int rc = 0;
4295
4296         ENTRY;
4297
4298         LASSERT(S_ISDIR(obj->oo_inode->i_mode));
4299         LINVRNT(osd_invariant(obj));
4300
4301         if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
4302                 return -EACCES;
4303
4304         rc = osd_ea_lookup_rec(env, obj, rec, key);
4305         if (rc == 0)
4306                 rc = +1;
4307         RETURN(rc);
4308 }
4309
4310 /**
4311  * Index and Iterator operations for interoperability
4312  * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
4313  */
4314 static const struct dt_index_operations osd_index_ea_ops = {
4315         .dio_lookup         = osd_index_ea_lookup,
4316         .dio_declare_insert = osd_index_declare_ea_insert,
4317         .dio_insert         = osd_index_ea_insert,
4318         .dio_declare_delete = osd_index_declare_ea_delete,
4319         .dio_delete         = osd_index_ea_delete,
4320         .dio_it     = {
4321                 .init     = osd_it_ea_init,
4322                 .fini     = osd_it_ea_fini,
4323                 .get      = osd_it_ea_get,
4324                 .put      = osd_it_ea_put,
4325                 .next     = osd_it_ea_next,
4326                 .key      = osd_it_ea_key,
4327                 .key_size = osd_it_ea_key_size,
4328                 .rec      = osd_it_ea_rec,
4329                 .store    = osd_it_ea_store,
4330                 .load     = osd_it_ea_load
4331         }
4332 };
4333
4334 static void *osd_key_init(const struct lu_context *ctx,
4335                           struct lu_context_key *key)
4336 {
4337         struct osd_thread_info *info;
4338
4339         OBD_ALLOC_PTR(info);
4340         if (info == NULL)
4341                 return ERR_PTR(-ENOMEM);
4342
4343         OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4344         if (info->oti_it_ea_buf == NULL)
4345                 goto out_free_info;
4346
4347         info->oti_env = container_of(ctx, struct lu_env, le_ctx);
4348
4349         info->oti_hlock = ldiskfs_htree_lock_alloc();
4350         if (info->oti_hlock == NULL)
4351                 goto out_free_ea;
4352
4353         return info;
4354
4355  out_free_ea:
4356         OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4357  out_free_info:
4358         OBD_FREE_PTR(info);
4359         return ERR_PTR(-ENOMEM);
4360 }
4361
4362 static void osd_key_fini(const struct lu_context *ctx,
4363                          struct lu_context_key *key, void* data)
4364 {
4365         struct osd_thread_info *info = data;
4366
4367         if (info->oti_hlock != NULL)
4368                 ldiskfs_htree_lock_free(info->oti_hlock);
4369         OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4370         OBD_FREE_PTR(info);
4371 }
4372
4373 static void osd_key_exit(const struct lu_context *ctx,
4374                          struct lu_context_key *key, void *data)
4375 {
4376         struct osd_thread_info *info = data;
4377
4378         LASSERT(info->oti_r_locks == 0);
4379         LASSERT(info->oti_w_locks == 0);
4380         LASSERT(info->oti_txns    == 0);
4381 }
4382
4383 /* type constructor/destructor: osd_type_init, osd_type_fini */
4384 LU_TYPE_INIT_FINI(osd, &osd_key);
4385
4386 struct lu_context_key osd_key = {
4387         .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL,
4388         .lct_init = osd_key_init,
4389         .lct_fini = osd_key_fini,
4390         .lct_exit = osd_key_exit
4391 };
4392
4393
4394 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
4395                            const char *name, struct lu_device *next)
4396 {
4397         struct osd_device *osd = osd_dev(d);
4398
4399         strncpy(osd->od_svname, name, MAX_OBD_NAME);
4400         return osd_procfs_init(osd, name);
4401 }
4402
4403 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
4404 {
4405         ENTRY;
4406
4407         osd_scrub_cleanup(env, o);
4408
4409         if (o->od_fsops) {
4410                 fsfilt_put_ops(o->od_fsops);
4411                 o->od_fsops = NULL;
4412         }
4413
4414         /* shutdown quota slave instance associated with the device */
4415         if (o->od_quota_slave != NULL) {
4416                 qsd_fini(env, o->od_quota_slave);
4417                 o->od_quota_slave = NULL;
4418         }
4419
4420         RETURN(0);
4421 }
4422
4423 static int osd_mount(const struct lu_env *env,
4424                      struct osd_device *o, struct lustre_cfg *cfg)
4425 {
4426         const char              *name  = lustre_cfg_string(cfg, 0);
4427         const char              *dev  = lustre_cfg_string(cfg, 1);
4428         const char              *opts;
4429         unsigned long            page, s_flags, lmd_flags = 0;
4430         struct page             *__page;
4431         struct file_system_type *type;
4432         char                    *options = NULL;
4433         char                    *str;
4434         int                       rc = 0;
4435         ENTRY;
4436
4437         if (o->od_mnt != NULL)
4438                 RETURN(0);
4439
4440         if (strlen(dev) >= sizeof(o->od_mntdev))
4441                 RETURN(-E2BIG);
4442         strcpy(o->od_mntdev, dev);
4443
4444         o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
4445         if (o->od_fsops == NULL) {
4446                 CERROR("Can't find fsfilt_ldiskfs\n");
4447                 RETURN(-ENOTSUPP);
4448         }
4449
4450         OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
4451         if (__page == NULL)
4452                 GOTO(out, rc = -ENOMEM);
4453
4454         str = lustre_cfg_string(cfg, 2);
4455         s_flags = simple_strtoul(str, NULL, 0);
4456         str = strstr(str, ":");
4457         if (str)
4458                 lmd_flags = simple_strtoul(str + 1, NULL, 0);
4459         opts = lustre_cfg_string(cfg, 3);
4460         page = (unsigned long)cfs_page_address(__page);
4461         options = (char *)page;
4462         *options = '\0';
4463         if (opts == NULL)
4464                 strcat(options, "user_xattr,acl");
4465         else
4466                 strcat(options, opts);
4467
4468         /* Glom up mount options */
4469         if (*options != '\0')
4470                 strcat(options, ",");
4471         strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
4472
4473         type = get_fs_type("ldiskfs");
4474         if (!type) {
4475                 CERROR("%s: cannot find ldiskfs module\n", name);
4476                 GOTO(out, rc = -ENODEV);
4477         }
4478
4479         o->od_mnt = vfs_kern_mount(type, s_flags, dev, options);
4480         cfs_module_put(type->owner);
4481
4482         if (IS_ERR(o->od_mnt)) {
4483                 rc = PTR_ERR(o->od_mnt);
4484                 CERROR("%s: can't mount %s: %d\n", name, dev, rc);
4485                 o->od_mnt = NULL;
4486                 GOTO(out, rc);
4487         }
4488
4489         if (lvfs_check_rdonly(o->od_mnt->mnt_sb->s_bdev)) {
4490                 CERROR("%s: underlying device %s is marked as read-only. "
4491                        "Setup failed\n", name, dev);
4492                 mntput(o->od_mnt);
4493                 o->od_mnt = NULL;
4494                 GOTO(out, rc = -EROFS);
4495         }
4496
4497         if (!LDISKFS_HAS_COMPAT_FEATURE(o->od_mnt->mnt_sb,
4498             LDISKFS_FEATURE_COMPAT_HAS_JOURNAL)) {
4499                 CERROR("%s: device %s is mounted w/o journal\n", name, dev);
4500                 mntput(o->od_mnt);
4501                 o->od_mnt = NULL;
4502                 GOTO(out, rc = -EINVAL);
4503         }
4504
4505         if (lmd_flags & LMD_FLG_NOSCRUB)
4506                 o->od_noscrub = 1;
4507
4508 out:
4509         if (__page)
4510                 OBD_PAGE_FREE(__page);
4511         if (rc)
4512                 fsfilt_put_ops(o->od_fsops);
4513
4514         RETURN(rc);
4515 }
4516
4517 static struct lu_device *osd_device_fini(const struct lu_env *env,
4518                                          struct lu_device *d)
4519 {
4520         int rc;
4521         ENTRY;
4522
4523         rc = osd_shutdown(env, osd_dev(d));
4524
4525         osd_obj_map_fini(osd_dev(d));
4526
4527         shrink_dcache_sb(osd_sb(osd_dev(d)));
4528         osd_sync(env, lu2dt_dev(d));
4529
4530         rc = osd_procfs_fini(osd_dev(d));
4531         if (rc) {
4532                 CERROR("proc fini error %d \n", rc);
4533                 RETURN (ERR_PTR(rc));
4534         }
4535
4536         if (osd_dev(d)->od_mnt) {
4537                 mntput(osd_dev(d)->od_mnt);
4538                 osd_dev(d)->od_mnt = NULL;
4539         }
4540
4541         RETURN(NULL);
4542 }
4543
4544 static int osd_device_init0(const struct lu_env *env,
4545                             struct osd_device *o,
4546                             struct lustre_cfg *cfg)
4547 {
4548         struct lu_device        *l = osd2lu_dev(o);
4549         struct osd_thread_info *info;
4550         int                     rc;
4551
4552         /* if the module was re-loaded, env can loose its keys */
4553         rc = lu_env_refill((struct lu_env *) env);
4554         if (rc)
4555                 GOTO(out, rc);
4556         info = osd_oti_get(env);
4557         LASSERT(info);
4558
4559         l->ld_ops = &osd_lu_ops;
4560         o->od_dt_dev.dd_ops = &osd_dt_ops;
4561
4562         spin_lock_init(&o->od_osfs_lock);
4563         mutex_init(&o->od_otable_mutex);
4564         o->od_osfs_age = cfs_time_shift_64(-1000);
4565
4566         o->od_capa_hash = init_capa_hash();
4567         if (o->od_capa_hash == NULL)
4568                 GOTO(out, rc = -ENOMEM);
4569
4570         o->od_read_cache = 1;
4571         o->od_writethrough_cache = 1;
4572         o->od_readcache_max_filesize = OSD_MAX_CACHE_SIZE;
4573
4574         rc = osd_mount(env, o, cfg);
4575         if (rc)
4576                 GOTO(out_capa, rc);
4577
4578         /* setup scrub, including OI files initialization */
4579         rc = osd_scrub_setup(env, o);
4580         if (rc < 0)
4581                 GOTO(out_mnt, rc);
4582
4583         strncpy(o->od_svname, lustre_cfg_string(cfg, 4),
4584                         sizeof(o->od_svname) - 1);
4585
4586         rc = osd_obj_map_init(o);
4587         if (rc != 0)
4588                 GOTO(out_scrub, rc);
4589
4590         rc = lu_site_init(&o->od_site, l);
4591         if (rc)
4592                 GOTO(out_compat, rc);
4593         o->od_site.ls_bottom_dev = l;
4594
4595         rc = lu_site_init_finish(&o->od_site);
4596         if (rc)
4597                 GOTO(out_site, rc);
4598
4599         rc = osd_procfs_init(o, o->od_svname);
4600         if (rc != 0) {
4601                 CERROR("%s: can't initialize procfs: rc = %d\n",
4602                        o->od_svname, rc);
4603                 GOTO(out_site, rc);
4604         }
4605
4606         LASSERT(l->ld_site->ls_linkage.next && l->ld_site->ls_linkage.prev);
4607
4608         /* initialize quota slave instance */
4609         o->od_quota_slave = qsd_init(env, o->od_svname, &o->od_dt_dev,
4610                                      o->od_proc_entry);
4611         if (IS_ERR(o->od_quota_slave)) {
4612                 rc = PTR_ERR(o->od_quota_slave);
4613                 o->od_quota_slave = NULL;
4614                 GOTO(out_procfs, rc);
4615         }
4616
4617         RETURN(0);
4618 out_procfs:
4619         osd_procfs_fini(o);
4620 out_site:
4621         lu_site_fini(&o->od_site);
4622 out_compat:
4623         osd_obj_map_fini(o);
4624 out_scrub:
4625         osd_scrub_cleanup(env, o);
4626 out_mnt:
4627         osd_oi_fini(info, o);
4628         osd_shutdown(env, o);
4629         mntput(o->od_mnt);
4630         o->od_mnt = NULL;
4631 out_capa:
4632         cleanup_capa_hash(o->od_capa_hash);
4633 out:
4634         RETURN(rc);
4635 }
4636
4637 static struct lu_device *osd_device_alloc(const struct lu_env *env,
4638                                           struct lu_device_type *t,
4639                                           struct lustre_cfg *cfg)
4640 {
4641         struct osd_device *o;
4642         int                rc;
4643
4644         OBD_ALLOC_PTR(o);
4645         if (o == NULL)
4646                 return ERR_PTR(-ENOMEM);
4647
4648         rc = dt_device_init(&o->od_dt_dev, t);
4649         if (rc == 0) {
4650                 /* Because the ctx might be revived in dt_device_init,
4651                  * refill the env here */
4652                 lu_env_refill((struct lu_env *)env);
4653                 rc = osd_device_init0(env, o, cfg);
4654                 if (rc)
4655                         dt_device_fini(&o->od_dt_dev);
4656         }
4657
4658         if (unlikely(rc != 0))
4659                 OBD_FREE_PTR(o);
4660
4661         return rc == 0 ? osd2lu_dev(o) : ERR_PTR(rc);
4662 }
4663
4664 static struct lu_device *osd_device_free(const struct lu_env *env,
4665                                          struct lu_device *d)
4666 {
4667         struct osd_device *o = osd_dev(d);
4668         ENTRY;
4669
4670         cleanup_capa_hash(o->od_capa_hash);
4671         /* XXX: make osd top device in order to release reference */
4672         d->ld_site->ls_top_dev = d;
4673         lu_site_purge(env, d->ld_site, -1);
4674         if (!cfs_hash_is_empty(d->ld_site->ls_obj_hash)) {
4675                 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
4676                 lu_site_print(env, d->ld_site, &msgdata, lu_cdebug_printer);
4677         }
4678         lu_site_fini(&o->od_site);
4679         dt_device_fini(&o->od_dt_dev);
4680         OBD_FREE_PTR(o);
4681         RETURN(NULL);
4682 }
4683
4684 static int osd_process_config(const struct lu_env *env,
4685                               struct lu_device *d, struct lustre_cfg *cfg)
4686 {
4687         struct osd_device *o = osd_dev(d);
4688         int err;
4689         ENTRY;
4690
4691         switch(cfg->lcfg_command) {
4692         case LCFG_SETUP:
4693                 err = osd_mount(env, o, cfg);
4694                 break;
4695         case LCFG_CLEANUP:
4696                 lu_dev_del_linkage(d->ld_site, d);
4697                 err = osd_shutdown(env, o);
4698                 break;
4699         default:
4700                 err = -ENOSYS;
4701         }
4702
4703         RETURN(err);
4704 }
4705
4706 static int osd_recovery_complete(const struct lu_env *env,
4707                                  struct lu_device *d)
4708 {
4709         struct osd_device       *osd = osd_dev(d);
4710         int                      rc = 0;
4711         ENTRY;
4712
4713         if (osd->od_quota_slave == NULL)
4714                 RETURN(0);
4715
4716         /* start qsd instance on recovery completion, this notifies the quota
4717          * slave code that we are about to process new requests now */
4718         rc = qsd_start(env, osd->od_quota_slave);
4719         RETURN(rc);
4720 }
4721
4722 /*
4723  * we use exports to track all osd users
4724  */
4725 static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
4726                            struct obd_device *obd, struct obd_uuid *cluuid,
4727                            struct obd_connect_data *data, void *localdata)
4728 {
4729         struct osd_device    *osd = osd_dev(obd->obd_lu_dev);
4730         struct lustre_handle  conn;
4731         int                   rc;
4732         ENTRY;
4733
4734         CDEBUG(D_CONFIG, "connect #%d\n", osd->od_connects);
4735
4736         rc = class_connect(&conn, obd, cluuid);
4737         if (rc)
4738                 RETURN(rc);
4739
4740         *exp = class_conn2export(&conn);
4741
4742         spin_lock(&osd->od_osfs_lock);
4743         osd->od_connects++;
4744         spin_unlock(&osd->od_osfs_lock);
4745
4746         RETURN(0);
4747 }
4748
4749 /*
4750  * once last export (we don't count self-export) disappeared
4751  * osd can be released
4752  */
4753 static int osd_obd_disconnect(struct obd_export *exp)
4754 {
4755         struct obd_device *obd = exp->exp_obd;
4756         struct osd_device *osd = osd_dev(obd->obd_lu_dev);
4757         int                rc, release = 0;
4758         ENTRY;
4759
4760         /* Only disconnect the underlying layers on the final disconnect. */
4761         spin_lock(&osd->od_osfs_lock);
4762         osd->od_connects--;
4763         if (osd->od_connects == 0)
4764                 release = 1;
4765         spin_unlock(&osd->od_osfs_lock);
4766
4767         rc = class_disconnect(exp); /* bz 9811 */
4768
4769         if (rc == 0 && release)
4770                 class_manual_cleanup(obd);
4771         RETURN(rc);
4772 }
4773
4774 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
4775                        struct lu_device *dev)
4776 {
4777         struct osd_device *osd = osd_dev(dev);
4778         int                result = 0;
4779         ENTRY;
4780
4781         if (dev->ld_site && lu_device_is_md(dev->ld_site->ls_top_dev)) {
4782                 /* MDT/MDD still use old infrastructure to create
4783                  * special files */
4784                 result = llo_local_objects_setup(env, lu2md_dev(pdev),
4785                                                  lu2dt_dev(dev));
4786                 if (result)
4787                         RETURN(result);
4788         }
4789
4790         if (osd->od_quota_slave != NULL)
4791                 /* set up quota slave objects */
4792                 result = qsd_prepare(env, osd->od_quota_slave);
4793
4794         RETURN(result);
4795 }
4796
4797 static const struct lu_object_operations osd_lu_obj_ops = {
4798         .loo_object_init      = osd_object_init,
4799         .loo_object_delete    = osd_object_delete,
4800         .loo_object_release   = osd_object_release,
4801         .loo_object_free      = osd_object_free,
4802         .loo_object_print     = osd_object_print,
4803         .loo_object_invariant = osd_object_invariant
4804 };
4805
4806 const struct lu_device_operations osd_lu_ops = {
4807         .ldo_object_alloc      = osd_object_alloc,
4808         .ldo_process_config    = osd_process_config,
4809         .ldo_recovery_complete = osd_recovery_complete,
4810         .ldo_prepare           = osd_prepare,
4811 };
4812
4813 static const struct lu_device_type_operations osd_device_type_ops = {
4814         .ldto_init = osd_type_init,
4815         .ldto_fini = osd_type_fini,
4816
4817         .ldto_start = osd_type_start,
4818         .ldto_stop  = osd_type_stop,
4819
4820         .ldto_device_alloc = osd_device_alloc,
4821         .ldto_device_free  = osd_device_free,
4822
4823         .ldto_device_init    = osd_device_init,
4824         .ldto_device_fini    = osd_device_fini
4825 };
4826
4827 struct lu_device_type osd_device_type = {
4828         .ldt_tags     = LU_DEVICE_DT,
4829         .ldt_name     = LUSTRE_OSD_LDISKFS_NAME,
4830         .ldt_ops      = &osd_device_type_ops,
4831         .ldt_ctx_tags = LCT_LOCAL,
4832 };
4833
4834 /*
4835  * lprocfs legacy support.
4836  */
4837 static struct obd_ops osd_obd_device_ops = {
4838         .o_owner = THIS_MODULE,
4839         .o_connect      = osd_obd_connect,
4840         .o_disconnect   = osd_obd_disconnect
4841 };
4842
4843 static int __init osd_mod_init(void)
4844 {
4845         struct lprocfs_static_vars lvars;
4846
4847         osd_oi_mod_init();
4848         lprocfs_osd_init_vars(&lvars);
4849         return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
4850                                    LUSTRE_OSD_LDISKFS_NAME, &osd_device_type);
4851 }
4852
4853 static void __exit osd_mod_exit(void)
4854 {
4855         class_unregister_type(LUSTRE_OSD_LDISKFS_NAME);
4856 }
4857
4858 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4859 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_LDISKFS_NAME")");
4860 MODULE_LICENSE("GPL");
4861
4862 cfs_module(osd, "0.1.0", osd_mod_init, osd_mod_exit);