Whamcloud - gitweb
LU-8130 lu_object: convert lu_object cache to rhashtable
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_handler.c
33  *
34  * Top-level entry points into osd module
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
38  */
39
40 #define DEBUG_SUBSYSTEM S_OSD
41
42 #include <linux/kallsyms.h>
43 #include <linux/module.h>
44 #include <linux/user_namespace.h>
45 #include <linux/uidgid.h>
46
47 /* prerequisite for linux/xattr.h */
48 #include <linux/types.h>
49 /* prerequisite for linux/xattr.h */
50 #include <linux/fs.h>
51 /* XATTR_{REPLACE,CREATE} */
52 #include <linux/xattr.h>
53
54 #include <ldiskfs/ldiskfs.h>
55 #include <ldiskfs/xattr.h>
56 #include <ldiskfs/ldiskfs_extents.h>
57 #undef ENTRY
58 /*
59  * struct OBD_{ALLOC,FREE}*()
60  * OBD_FAIL_CHECK
61  */
62 #include <obd_support.h>
63 /* struct ptlrpc_thread */
64 #include <lustre_net.h>
65 #include <lustre_fid.h>
66 /* process_config */
67 #include <uapi/linux/lustre/lustre_param.h>
68
69 #include "osd_internal.h"
70 #include "osd_dynlocks.h"
71
72 /* llo_* api support */
73 #include <md_object.h>
74 #include <lustre_quota.h>
75
76 #include <lustre_linkea.h>
77
78 /* Maximum EA size is limited by LNET_MTU for remote objects */
79 #define OSD_MAX_EA_SIZE 1048364
80
81 int ldiskfs_pdo = 1;
82 module_param(ldiskfs_pdo, int, 0644);
83 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
84
85 int ldiskfs_track_declares_assert;
86 module_param(ldiskfs_track_declares_assert, int, 0644);
87 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
88
89 /* Slab to allocate dynlocks */
90 struct kmem_cache *dynlock_cachep;
91
92 /* Slab to allocate osd_it_ea */
93 struct kmem_cache *osd_itea_cachep;
94
95 static struct lu_kmem_descr ldiskfs_caches[] = {
96         {
97                 .ckd_cache = &dynlock_cachep,
98                 .ckd_name  = "dynlock_cache",
99                 .ckd_size  = sizeof(struct dynlock_handle)
100         },
101         {
102                 .ckd_cache = &osd_itea_cachep,
103                 .ckd_name  = "osd_itea_cache",
104                 .ckd_size  = sizeof(struct osd_it_ea)
105         },
106         {
107                 .ckd_cache = NULL
108         }
109 };
110
111 static const char dot[] = ".";
112 static const char dotdot[] = "..";
113
114 static const struct lu_object_operations      osd_lu_obj_ops;
115 static const struct dt_object_operations      osd_obj_ops;
116 static const struct dt_object_operations      osd_obj_otable_it_ops;
117 static const struct dt_index_operations       osd_index_iam_ops;
118 static const struct dt_index_operations       osd_index_ea_ops;
119
120 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
121                           const struct lu_fid *fid);
122 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
123                                                 struct osd_device *osd);
124
125 int osd_trans_declare_op2rb[] = {
126         [OSD_OT_ATTR_SET]       = OSD_OT_ATTR_SET,
127         [OSD_OT_PUNCH]          = OSD_OT_MAX,
128         [OSD_OT_XATTR_SET]      = OSD_OT_XATTR_SET,
129         [OSD_OT_CREATE]         = OSD_OT_DESTROY,
130         [OSD_OT_DESTROY]        = OSD_OT_CREATE,
131         [OSD_OT_REF_ADD]        = OSD_OT_REF_DEL,
132         [OSD_OT_REF_DEL]        = OSD_OT_REF_ADD,
133         [OSD_OT_WRITE]          = OSD_OT_WRITE,
134         [OSD_OT_INSERT]         = OSD_OT_DELETE,
135         [OSD_OT_DELETE]         = OSD_OT_INSERT,
136         [OSD_OT_QUOTA]          = OSD_OT_MAX,
137 };
138
139 static int osd_has_index(const struct osd_object *obj)
140 {
141         return obj->oo_dt.do_index_ops != NULL;
142 }
143
144 static int osd_object_invariant(const struct lu_object *l)
145 {
146         return osd_invariant(osd_obj(l));
147 }
148
149 /*
150  * Concurrency: doesn't matter
151  */
152 static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
153 {
154         struct osd_thread_info *oti = osd_oti_get(env);
155
156         return oti->oti_w_locks > 0 && o->oo_owner == env;
157 }
158
159 /*
160  * Concurrency: doesn't access mutable data
161  */
162 static int osd_root_get(const struct lu_env *env,
163                         struct dt_device *dev, struct lu_fid *f)
164 {
165         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
166         return 0;
167 }
168
169 /*
170  * the following set of functions are used to maintain per-thread
171  * cache of FID->ino mapping. this mechanism is needed to resolve
172  * FID to inode at dt_insert() which in turn stores ino in the
173  * directory entries to keep ldiskfs compatible with ext[34].
174  * due to locking-originated restrictions we can't lookup ino
175  * using LU cache (deadlock is possible). lookup using OI is quite
176  * expensive. so instead we maintain this cache and methods like
177  * dt_create() fill it. so in the majority of cases dt_insert() is
178  * able to find needed mapping in lockless manner.
179  */
180 static struct osd_idmap_cache *
181 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
182              const struct lu_fid *fid)
183 {
184         struct osd_thread_info *oti = osd_oti_get(env);
185         struct osd_idmap_cache *idc = oti->oti_ins_cache;
186         int i;
187
188         for (i = 0; i < oti->oti_ins_cache_used; i++) {
189                 if (!lu_fid_eq(&idc[i].oic_fid, fid))
190                         continue;
191                 if (idc[i].oic_dev != osd)
192                         continue;
193
194                 return idc + i;
195         }
196
197         return NULL;
198 }
199
200 static struct osd_idmap_cache *
201 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
202             const struct lu_fid *fid)
203 {
204         struct osd_thread_info *oti   = osd_oti_get(env);
205         struct osd_idmap_cache *idc;
206         int i;
207
208         if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
209                 i = oti->oti_ins_cache_size * 2;
210                 if (i == 0)
211                         i = OSD_INS_CACHE_SIZE;
212                 OBD_ALLOC_PTR_ARRAY(idc, i);
213                 if (idc == NULL)
214                         return ERR_PTR(-ENOMEM);
215                 if (oti->oti_ins_cache != NULL) {
216                         memcpy(idc, oti->oti_ins_cache,
217                                oti->oti_ins_cache_used * sizeof(*idc));
218                         OBD_FREE_PTR_ARRAY(oti->oti_ins_cache,
219                                            oti->oti_ins_cache_used);
220                 }
221                 oti->oti_ins_cache = idc;
222                 oti->oti_ins_cache_size = i;
223         }
224
225         idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
226         idc->oic_fid = *fid;
227         idc->oic_dev = osd;
228         idc->oic_lid.oii_ino = 0;
229         idc->oic_lid.oii_gen = 0;
230         idc->oic_remote = 0;
231
232         return idc;
233 }
234
235 /*
236  * lookup mapping for the given fid in the cache, initialize a
237  * new one if not found. the initialization checks whether the
238  * object is local or remote. for local objects, OI is used to
239  * learn ino/generation. the function is used when the caller
240  * has no information about the object, e.g. at dt_insert().
241  */
242 static struct osd_idmap_cache *
243 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
244                      const struct lu_fid *fid)
245 {
246         struct osd_idmap_cache *idc;
247         int rc;
248
249         idc = osd_idc_find(env, osd, fid);
250         LASSERT(!IS_ERR(idc));
251         if (idc != NULL)
252                 return idc;
253
254         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
255                osd->od_svname, PFID(fid));
256
257         /* new mapping is needed */
258         idc = osd_idc_add(env, osd, fid);
259         if (IS_ERR(idc)) {
260                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
261                        osd->od_svname, PFID(fid), PTR_ERR(idc));
262                 return idc;
263         }
264
265         /* initialize it */
266         rc = osd_remote_fid(env, osd, fid);
267         if (unlikely(rc < 0))
268                 return ERR_PTR(rc);
269
270         if (rc == 0) {
271                 /* the object is local, lookup in OI */
272                 /* XXX: probably cheaper to lookup in LU first? */
273                 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
274                                    &idc->oic_lid, 0);
275                 if (unlikely(rc < 0)) {
276                         CERROR("can't lookup: rc = %d\n", rc);
277                         return ERR_PTR(rc);
278                 }
279         } else {
280                 /* the object is remote */
281                 idc->oic_remote = 1;
282         }
283
284         return idc;
285 }
286
287 /*
288  * lookup mapping for given FID and fill it from the given object.
289  * the object is lolcal by definition.
290  */
291 static int osd_idc_find_and_init(const struct lu_env *env,
292                                  struct osd_device *osd,
293                                  struct osd_object *obj)
294 {
295         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
296         struct osd_idmap_cache *idc;
297
298         idc = osd_idc_find(env, osd, fid);
299         LASSERT(!IS_ERR(idc));
300         if (idc != NULL) {
301                 if (obj->oo_inode == NULL)
302                         return 0;
303                 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
304                         LASSERT(idc->oic_lid.oii_ino == 0);
305                         idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
306                         idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
307                 }
308                 return 0;
309         }
310
311         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
312                osd->od_svname, PFID(fid));
313
314         /* new mapping is needed */
315         idc = osd_idc_add(env, osd, fid);
316         if (IS_ERR(idc)) {
317                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
318                        osd->od_svname, PFID(fid), PTR_ERR(idc));
319                 return PTR_ERR(idc);
320         }
321
322         if (obj->oo_inode != NULL) {
323                 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
324                 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
325         }
326         return 0;
327 }
328
329 /*
330  * OSD object methods.
331  */
332
333 /*
334  * Concurrency: no concurrent access is possible that early in object
335  * life-cycle.
336  */
337 static struct lu_object *osd_object_alloc(const struct lu_env *env,
338                                           const struct lu_object_header *hdr,
339                                           struct lu_device *d)
340 {
341         struct osd_object *mo;
342
343         OBD_ALLOC_PTR(mo);
344         if (mo != NULL) {
345                 struct lu_object *l;
346                 struct lu_object_header *h;
347                 struct osd_device *o = osd_dev(d);
348
349                 l = &mo->oo_dt.do_lu;
350                 if (unlikely(o->od_in_init)) {
351                         OBD_ALLOC_PTR(h);
352                         if (!h) {
353                                 OBD_FREE_PTR(mo);
354                                 return NULL;
355                         }
356
357                         lu_object_header_init(h);
358                         lu_object_init(l, h, d);
359                         lu_object_add_top(h, l);
360                         mo->oo_header = h;
361                 } else {
362                         dt_object_init(&mo->oo_dt, NULL, d);
363                         mo->oo_header = NULL;
364                 }
365
366                 mo->oo_dt.do_ops = &osd_obj_ops;
367                 l->lo_ops = &osd_lu_obj_ops;
368                 init_rwsem(&mo->oo_sem);
369                 init_rwsem(&mo->oo_ext_idx_sem);
370                 spin_lock_init(&mo->oo_guard);
371                 INIT_LIST_HEAD(&mo->oo_xattr_list);
372                 return l;
373         }
374         return NULL;
375 }
376
377 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
378                 struct dentry *dentry, struct lustre_ost_attrs *loa)
379 {
380         int rc;
381
382         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
383                              (void *)loa, sizeof(*loa));
384         if (rc > 0) {
385                 struct lustre_mdt_attrs *lma = &loa->loa_lma;
386
387                 if (rc < sizeof(*lma))
388                         return -EINVAL;
389
390                 rc = 0;
391                 lustre_loa_swab(loa, true);
392                 /* Check LMA compatibility */
393                 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
394                         CWARN("%s: unsupported incompat LMA feature(s) %#x "
395                               "for fid = "DFID", ino = %lu\n",
396                               osd_ino2name(inode),
397                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
398                               PFID(&lma->lma_self_fid), inode->i_ino);
399                         rc = -EOPNOTSUPP;
400                 }
401         } else if (rc == 0) {
402                 rc = -ENODATA;
403         }
404
405         return rc;
406 }
407
408 /*
409  * retrieve object from backend ext fs.
410  **/
411 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
412                        struct osd_inode_id *id)
413 {
414         int rc;
415         struct inode *inode = NULL;
416
417         /*
418          * if we look for an inode withing a running
419          * transaction, then we risk to deadlock
420          * osd_dirent_check_repair() breaks this
421          */
422          /* LASSERT(current->journal_info == NULL); */
423
424         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
425         if (IS_ERR(inode)) {
426                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
427                        id->oii_ino, PTR_ERR(inode));
428         } else if (id->oii_gen != OSD_OII_NOGEN &&
429                    inode->i_generation != id->oii_gen) {
430                 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
431                        "i_generation = %u\n",
432                        id->oii_ino, id->oii_gen, inode->i_generation);
433                 iput(inode);
434                 inode = ERR_PTR(-ESTALE);
435         } else if (inode->i_nlink == 0) {
436                 /*
437                  * due to parallel readdir and unlink,
438                  * we can have dead inode here.
439                  */
440                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
441                 iput(inode);
442                 inode = ERR_PTR(-ESTALE);
443         } else if (is_bad_inode(inode)) {
444                 CWARN("%s: bad inode: ino = %u\n",
445                       osd_dev2name(dev), id->oii_ino);
446                 iput(inode);
447                 inode = ERR_PTR(-ENOENT);
448         } else if ((rc = osd_attach_jinode(inode))) {
449                 iput(inode);
450                 inode = ERR_PTR(rc);
451         } else {
452                 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
453                 if (id->oii_gen == OSD_OII_NOGEN)
454                         osd_id_gen(id, inode->i_ino, inode->i_generation);
455
456                 /*
457                  * Do not update file c/mtime in ldiskfs.
458                  * NB: we don't have any lock to protect this because we don't
459                  * have reference on osd_object now, but contention with
460                  * another lookup + attr_set can't happen in the tiny window
461                  * between if (...) and set S_NOCMTIME.
462                  */
463                 if (!(inode->i_flags & S_NOCMTIME))
464                         inode->i_flags |= S_NOCMTIME;
465         }
466         return inode;
467 }
468
469 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
470                           handle_t *handle, struct dentry *child,
471                           struct inode *inode, struct htree_lock *hlock)
472 {
473         int rc, rc2;
474
475         rc = __ldiskfs_add_entry(handle, child, inode, hlock);
476         if (rc == -ENOBUFS || rc == -ENOSPC) {
477                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
478                 struct inode *parent = child->d_parent->d_inode;
479                 struct lu_fid *fid = NULL;
480                 char fidstr[FID_LEN + 1] = "unknown";
481
482                 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
483                 if (!rc2) {
484                         fid = &loa->loa_lma.lma_self_fid;
485                 } else if (rc2 == -ENODATA) {
486                         if (unlikely(parent == inode->i_sb->s_root->d_inode)) {
487                                 fid = &info->oti_fid3;
488                                 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
489                         } else if (!osd->od_is_ost && osd->od_index == 0) {
490                                 fid = &info->oti_fid3;
491                                 lu_igif_build(fid, parent->i_ino,
492                                               parent->i_generation);
493                         }
494                 }
495
496                 if (fid != NULL)
497                         snprintf(fidstr, sizeof(fidstr), DFID, PFID(fid));
498
499                 /* below message is checked in sanity.sh test_129 */
500                 if (rc == -ENOSPC) {
501                         CWARN("%s: directory (inode: %lu, FID: %s) has reached max size limit\n",
502                               osd_name(osd), parent->i_ino, fidstr);
503                 } else {
504                         rc = 0; /* ignore such error now */
505                         CWARN("%s: directory (inode: %lu, FID: %s) is approaching max size limit\n",
506                               osd_name(osd), parent->i_ino, fidstr);
507                 }
508
509         }
510
511         return rc;
512 }
513
514
515 struct inode *
516 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
517              struct osd_inode_id *id, struct lu_fid *fid)
518 {
519         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
520         struct inode *inode;
521         int rc;
522
523         inode = osd_iget(info, dev, id);
524         if (IS_ERR(inode))
525                 return inode;
526
527         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
528         if (!rc) {
529                 *fid = loa->loa_lma.lma_self_fid;
530         } else if (rc == -ENODATA) {
531                 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
532                         lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
533                 else
534                         lu_igif_build(fid, inode->i_ino, inode->i_generation);
535         } else {
536                 iput(inode);
537                 inode = ERR_PTR(rc);
538         }
539         return inode;
540 }
541
542 static struct inode *osd_iget_check(struct osd_thread_info *info,
543                                     struct osd_device *dev,
544                                     const struct lu_fid *fid,
545                                     struct osd_inode_id *id,
546                                     bool trusted)
547 {
548         struct inode *inode;
549         int rc = 0;
550
551         ENTRY;
552
553         /*
554          * The cached OI mapping is trustable. If we cannot locate the inode
555          * via the cached OI mapping, then return the failure to the caller
556          * directly without further OI checking.
557          */
558
559 again:
560         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
561         if (IS_ERR(inode)) {
562                 rc = PTR_ERR(inode);
563                 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
564                         goto check_oi;
565
566                 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
567                        PFID(fid), id->oii_ino, rc);
568                 GOTO(put, rc);
569         }
570
571         if (is_bad_inode(inode)) {
572                 rc = -ENOENT;
573                 if (!trusted)
574                         goto check_oi;
575
576                 CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
577                        PFID(fid), id->oii_ino);
578                 GOTO(put, rc);
579         }
580
581         if (id->oii_gen != OSD_OII_NOGEN &&
582             inode->i_generation != id->oii_gen) {
583                 rc = -ESTALE;
584                 if (!trusted)
585                         goto check_oi;
586
587                 CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
588                        "oii_gen = %u, i_generation = %u\n", PFID(fid),
589                        id->oii_ino, id->oii_gen, inode->i_generation);
590                 GOTO(put, rc);
591         }
592
593         if (inode->i_nlink == 0) {
594                 rc = -ENOENT;
595                 if (!trusted)
596                         goto check_oi;
597
598                 CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
599                        PFID(fid), id->oii_ino);
600                 GOTO(put, rc);
601         }
602
603         ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
604
605 check_oi:
606         if (rc != 0) {
607                 __u32 saved_ino = id->oii_ino;
608                 __u32 saved_gen = id->oii_gen;
609
610                 LASSERT(!trusted);
611                 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
612
613                 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
614                 /*
615                  * XXX: There are four possible cases:
616                  *      1. rc = 0.
617                  *         Backup/restore caused the OI invalid.
618                  *      2. rc = 0.
619                  *         Someone unlinked the object but NOT removed
620                  *         the OI mapping, such as mount target device
621                  *         as ldiskfs, and modify something directly.
622                  *      3. rc = -ENOENT.
623                  *         Someone just removed the object between the
624                  *         former oi_lookup and the iget. It is normal.
625                  *      4. Other failure cases.
626                  *
627                  *      Generally, when the device is mounted, it will
628                  *      auto check whether the system is restored from
629                  *      file-level backup or not. We trust such detect
630                  *      to distinguish the 1st case from the 2nd case:
631                  *      if the OI files are consistent but may contain
632                  *      stale OI mappings because of case 2, if iget()
633                  *      returns -ENOENT or -ESTALE, then it should be
634                  *      the case 2.
635                  */
636                 if (rc != 0)
637                         /*
638                          * If the OI mapping was in OI file before the
639                          * osd_iget_check(), but now, it is disappear,
640                          * then it must be removed by race. That is a
641                          * normal race case.
642                          */
643                         GOTO(put, rc);
644
645                 /*
646                  * It is the OI scrub updated the OI mapping by race.
647                  * The new OI mapping must be valid.
648                  */
649                 if (saved_ino != id->oii_ino ||
650                     (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
651                         if (!IS_ERR(inode))
652                                 iput(inode);
653
654                         trusted = true;
655                         goto again;
656                 }
657
658                 if (IS_ERR(inode)) {
659                         if (dev->od_scrub.os_scrub.os_file.sf_flags &
660                             SF_INCONSISTENT)
661                                 /*
662                                  * It still can be the case 2, but we cannot
663                                  * distinguish it from the case 1. So return
664                                  * -EREMCHG to block current operation until
665                                  *  OI scrub rebuilt the OI mappings.
666                                  */
667                                 rc = -EREMCHG;
668                         else
669                                 rc = -ENOENT;
670
671                         GOTO(put, rc);
672                 }
673
674                 if (inode->i_generation == id->oii_gen)
675                         rc = -ENOENT;
676                 else
677                         rc = -EREMCHG;
678         } else {
679                 if (id->oii_gen == OSD_OII_NOGEN)
680                         osd_id_gen(id, inode->i_ino, inode->i_generation);
681
682                 /*
683                  * Do not update file c/mtime in ldiskfs.
684                  * NB: we don't have any lock to protect this because we don't
685                  * have reference on osd_object now, but contention with
686                  * another lookup + attr_set can't happen in the tiny window
687                  * between if (...) and set S_NOCMTIME.
688                  */
689                 if (!(inode->i_flags & S_NOCMTIME))
690                         inode->i_flags |= S_NOCMTIME;
691         }
692
693         GOTO(put, rc);
694
695 put:
696         if (rc != 0) {
697                 if (!IS_ERR(inode))
698                         iput(inode);
699
700                 inode = ERR_PTR(rc);
701         }
702
703         return inode;
704 }
705
706 /**
707  * \retval +v: new filter_fid does not contain self-fid
708  * \retval 0:  filter_fid_18_23, contains self-fid
709  * \retval -v: other failure cases
710  */
711 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
712                  struct dentry *dentry, struct lu_fid *fid)
713 {
714         struct filter_fid *ff = &info->oti_ff;
715         struct ost_id *ostid = &info->oti_ostid;
716         int rc;
717
718         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
719         if (rc == sizeof(struct filter_fid_18_23)) {
720                 struct filter_fid_18_23 *ff_old = (void *)ff;
721
722                 ostid_set_seq(ostid, le64_to_cpu(ff_old->ff_seq));
723                 rc = ostid_set_id(ostid, le64_to_cpu(ff_old->ff_objid));
724                 /*
725                  * XXX: use 0 as the index for compatibility, the caller will
726                  * handle index related issues when necessary.
727                  */
728                 if (!rc)
729                         ostid_to_fid(fid, ostid, 0);
730         } else if (rc >= (int)sizeof(struct filter_fid_24_29)) {
731                 rc = 1;
732         } else if (rc >= 0) {
733                 rc = -EINVAL;
734         }
735
736         return rc;
737 }
738
739 static int osd_lma_self_repair(struct osd_thread_info *info,
740                                struct osd_device *osd, struct inode *inode,
741                                const struct lu_fid *fid, __u32 compat)
742 {
743         handle_t *jh;
744         int rc;
745
746         LASSERT(current->journal_info == NULL);
747
748         jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
749                                   osd_dto_credits_noquota[DTO_XATTR_SET]);
750         if (IS_ERR(jh)) {
751                 rc = PTR_ERR(jh);
752                 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
753                       osd_name(osd), rc);
754                 return rc;
755         }
756
757         rc = osd_ea_fid_set(info, inode, fid, compat, 0);
758         if (rc != 0)
759                 CWARN("%s: cannot self repair the LMA: rc = %d\n",
760                       osd_name(osd), rc);
761         ldiskfs_journal_stop(jh);
762         return rc;
763 }
764
765 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
766 {
767         struct osd_thread_info *info = osd_oti_get(env);
768         struct osd_device *osd = osd_obj2dev(obj);
769         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
770         struct lustre_mdt_attrs *lma = &loa->loa_lma;
771         struct inode *inode = obj->oo_inode;
772         struct dentry *dentry = &info->oti_obj_dentry;
773         struct lu_fid *fid = NULL;
774         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
775         int rc;
776
777         ENTRY;
778
779         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
780                              (void *)loa, sizeof(*loa));
781         if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
782                 fid = &lma->lma_self_fid;
783                 rc = osd_get_idif(info, inode, dentry, fid);
784                 if (rc > 0 || (rc == -ENODATA && osd->od_index_in_idif)) {
785                         /*
786                          * For the given OST-object, if it has neither LMA nor
787                          * FID in XATTR_NAME_FID, then the given FID (which is
788                          * contained in the @obj, from client RPC for locating
789                          * the OST-object) is trusted. We use it to generate
790                          * the LMA.
791                          */
792                         osd_lma_self_repair(info, osd, inode, rfid,
793                                             LMAC_FID_ON_OST);
794                         RETURN(0);
795                 }
796         }
797
798         if (rc < 0)
799                 RETURN(rc);
800
801         if (rc > 0) {
802                 rc = 0;
803                 lustre_lma_swab(lma);
804                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
805                              (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
806                               S_ISREG(inode->i_mode)))) {
807                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
808                               "fid = "DFID", ino = %lu\n", osd_name(osd),
809                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
810                               PFID(rfid), inode->i_ino);
811                         rc = -EOPNOTSUPP;
812                 } else {
813                         fid = &lma->lma_self_fid;
814                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
815                             osd->od_is_ost)
816                                 obj->oo_pfid_in_lma = 1;
817                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
818                             !osd->od_is_ost)
819                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
820                 }
821         }
822
823         if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
824                 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
825                         struct ost_id   *oi   = &info->oti_ostid;
826                         struct lu_fid   *fid1 = &info->oti_fid3;
827                         __u32            idx  = fid_idif_ost_idx(rfid);
828
829                         /*
830                          * For old IDIF, the OST index is not part of the IDIF,
831                          * Means that different OSTs may have the same IDIFs.
832                          * Under such case, we need to make some compatible
833                          * check to make sure to trigger OI scrub properly.
834                          */
835                         if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
836                                 /* Given @rfid is new, LMA is old. */
837                                 fid_to_ostid(fid, oi);
838                                 ostid_to_fid(fid1, oi, idx);
839                                 if (lu_fid_eq(fid1, rfid)) {
840                                         if (osd->od_index_in_idif)
841                                                 osd_lma_self_repair(info, osd,
842                                                         inode, rfid,
843                                                         LMAC_FID_ON_OST);
844                                         RETURN(0);
845                                 }
846                         }
847                 }
848
849                 rc = -EREMCHG;
850         }
851
852         RETURN(rc);
853 }
854
855 struct osd_check_lmv_buf {
856         /* please keep it as first member */
857         struct dir_context ctx;
858         struct osd_thread_info *oclb_info;
859         struct osd_device *oclb_dev;
860         struct osd_idmap_cache *oclb_oic;
861         int oclb_items;
862         bool oclb_found;
863 };
864
865 /**
866  * It is called internally by ->iterate*() to filter out the
867  * local slave object's FID of the striped directory.
868  *
869  * \retval      1 found the local slave's FID
870  * \retval      0 continue to check next item
871  * \retval      -ve for failure
872  */
873 #ifdef HAVE_FILLDIR_USE_CTX
874 static int osd_stripe_dir_filldir(struct dir_context *buf,
875 #else
876 static int osd_stripe_dir_filldir(void *buf,
877 #endif
878                                   const char *name, int namelen,
879                                   loff_t offset, __u64 ino, unsigned int d_type)
880 {
881         struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
882         struct osd_thread_info *oti = oclb->oclb_info;
883         struct lu_fid *fid = &oti->oti_fid3;
884         struct osd_inode_id *id = &oti->oti_id3;
885         struct osd_device *dev = oclb->oclb_dev;
886         struct osd_idmap_cache *oic = oclb->oclb_oic;
887         struct inode *inode;
888
889         oclb->oclb_items++;
890
891         if (name[0] == '.')
892                 return 0;
893
894         fid_zero(fid);
895         sscanf(name + 1, SFID, RFID(fid));
896         if (!fid_is_sane(fid))
897                 return 0;
898
899         if (osd_remote_fid(oti->oti_env, dev, fid))
900                 return 0;
901
902         osd_id_gen(id, ino, OSD_OII_NOGEN);
903         inode = osd_iget(oti, dev, id);
904         if (IS_ERR(inode))
905                 return PTR_ERR(inode);
906
907         iput(inode);
908         osd_add_oi_cache(oti, dev, id, fid);
909         oic->oic_fid = *fid;
910         oic->oic_lid = *id;
911         oic->oic_dev = dev;
912         osd_oii_insert(dev, oic, true);
913         oclb->oclb_found = true;
914
915         return 1;
916 }
917
918 /*
919  * When lookup item under striped directory, we need to locate the master
920  * MDT-object of the striped directory firstly, then the client will send
921  * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
922  * and the item's name. If the system is restored from MDT file level backup,
923  * then before the OI scrub completely built the OI files, the OI mappings of
924  * the master MDT-object and slave MDT-object may be invalid. Usually, it is
925  * not a problem for the master MDT-object. Because when locate the master
926  * MDT-object, we will do name based lookup (for the striped directory itself)
927  * firstly, during such process we can setup the correct OI mapping for the
928  * master MDT-object. But it will be trouble for the slave MDT-object. Because
929  * the client will not trigger name based lookup on the MDT to locate the slave
930  * MDT-object before locating item under the striped directory, then when
931  * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
932  * is invalid and does not know what the right OI mapping is, then the MDT has
933  * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
934  * the OI file, related OI mapping is unknown yet, please try again later. And
935  * then client will re-try the RPC again and again until related OI mapping has
936  * been updated. That is quite inefficient.
937  *
938  * To resolve above trouble, we will handle it as the following two cases:
939  *
940  * 1) The slave MDT-object and the master MDT-object are on different MDTs.
941  *    It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
942  *    is linked under /REMOTE_PARENT_DIR with the name of its FID string.
943  *    We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
944  *    directly. Please check osd_fid_lookup().
945  *
946  * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
947  *    Under such case, during lookup the master MDT-object, we will lookup the
948  *    slave MDT-object via readdir against the master MDT-object, because the
949  *    slave MDT-objects information are stored as sub-directories with the name
950  *    "${FID}:${index}". Then when find the local slave MDT-object, its OI
951  *    mapping will be recorded. Then subsequent osd_fid_lookup() will know
952  *    the correct OI mapping for the slave MDT-object.
953  */
954 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
955                          struct inode *inode, struct osd_idmap_cache *oic)
956 {
957         struct lu_buf *buf = &oti->oti_big_buf;
958         struct dentry *dentry = &oti->oti_obj_dentry;
959         struct file *filp = &oti->oti_file;
960         const struct file_operations *fops;
961         struct lmv_mds_md_v1 *lmv1;
962         struct osd_check_lmv_buf oclb = {
963                 .ctx.actor = osd_stripe_dir_filldir,
964                 .oclb_info = oti,
965                 .oclb_dev = dev,
966                 .oclb_oic = oic,
967                 .oclb_found = false,
968         };
969         int rc = 0;
970
971         ENTRY;
972
973 again:
974         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
975                              buf->lb_len);
976         if (rc == -ERANGE) {
977                 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
978                 if (rc > 0) {
979                         lu_buf_realloc(buf, rc);
980                         if (buf->lb_buf == NULL)
981                                 GOTO(out, rc = -ENOMEM);
982
983                         goto again;
984                 }
985         }
986
987         if (unlikely(rc == 0 || rc == -ENODATA))
988                 GOTO(out, rc = 0);
989
990         if (rc < 0)
991                 GOTO(out, rc);
992
993         if (unlikely(buf->lb_buf == NULL)) {
994                 lu_buf_realloc(buf, rc);
995                 if (buf->lb_buf == NULL)
996                         GOTO(out, rc = -ENOMEM);
997
998                 goto again;
999         }
1000
1001         lmv1 = buf->lb_buf;
1002         if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
1003                 GOTO(out, rc = 0);
1004
1005         fops = inode->i_fop;
1006         dentry->d_inode = inode;
1007         dentry->d_sb = inode->i_sb;
1008         filp->f_pos = 0;
1009         filp->f_path.dentry = dentry;
1010         filp->f_flags |= O_NOATIME;
1011         filp->f_mode = FMODE_64BITHASH | FMODE_NONOTIFY;
1012         filp->f_mapping = inode->i_mapping;
1013         filp->f_op = fops;
1014         filp->private_data = NULL;
1015         filp->f_cred = current_cred();
1016         filp->f_inode = inode;
1017         rc = osd_security_file_alloc(filp);
1018         if (rc)
1019                 goto out;
1020
1021         do {
1022                 oclb.oclb_items = 0;
1023                 rc = iterate_dir(filp, &oclb.ctx);
1024         } while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
1025                  filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
1026         fops->release(inode, filp);
1027
1028 out:
1029         if (rc < 0)
1030                 CDEBUG(D_LFSCK, "%s: fail to check LMV EA, inode = %lu/%u,"
1031                        DFID": rc = %d\n", osd_ino2name(inode),
1032                        inode->i_ino, inode->i_generation,
1033                        PFID(&oic->oic_fid), rc);
1034         else
1035                 rc = 0;
1036
1037         RETURN(rc);
1038 }
1039
1040 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
1041                           const struct lu_fid *fid,
1042                           const struct lu_object_conf *conf)
1043 {
1044         struct osd_thread_info *info;
1045         struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1046         struct osd_device *dev;
1047         struct osd_idmap_cache *oic;
1048         struct osd_inode_id *id;
1049         struct inode *inode = NULL;
1050         struct lustre_scrub *scrub;
1051         struct scrub_file *sf;
1052         __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
1053         __u32 saved_ino;
1054         __u32 saved_gen;
1055         int result = 0;
1056         int rc1 = 0;
1057         bool remote = false;
1058         bool trusted = true;
1059         bool updated = false;
1060         bool checked = false;
1061
1062         ENTRY;
1063
1064         LINVRNT(osd_invariant(obj));
1065         LASSERT(obj->oo_inode == NULL);
1066         LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
1067
1068         dev = osd_dev(ldev);
1069         scrub = &dev->od_scrub.os_scrub;
1070         sf = &scrub->os_file;
1071         info = osd_oti_get(env);
1072         LASSERT(info);
1073         oic = &info->oti_cache;
1074
1075         if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1076                 RETURN(-ENOENT);
1077
1078         /*
1079          * For the object is created as locking anchor, or for the object to
1080          * be created on disk. No need to osd_oi_lookup() at here because FID
1081          * shouldn't never be re-used, if it's really a duplicate FID from
1082          * unexpected reason, we should be able to detect it later by calling
1083          * do_create->osd_oi_insert().
1084          */
1085         if (conf && conf->loc_flags & LOC_F_NEW)
1086                 GOTO(out, result = 0);
1087
1088         /* Search order: 1. per-thread cache. */
1089         if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
1090                 id = &oic->oic_lid;
1091                 goto iget;
1092         }
1093
1094         id = &info->oti_id;
1095         if (!list_empty(&scrub->os_inconsistent_items)) {
1096                 /* Search order: 2. OI scrub pending list. */
1097                 result = osd_oii_lookup(dev, fid, id);
1098                 if (!result)
1099                         goto iget;
1100         }
1101
1102         /*
1103          * The OI mapping in the OI file can be updated by the OI scrub
1104          * when we locate the inode via FID. So it may be not trustable.
1105          */
1106         trusted = false;
1107
1108         /* Search order: 3. OI files. */
1109         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1110         if (result == -ENOENT) {
1111                 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1112                     fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1113                     !ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
1114                                       sf->sf_oi_bitmap))
1115                         GOTO(out, result = 0);
1116
1117                 goto trigger;
1118         }
1119
1120         /* -ESTALE is returned if inode of OST object doesn't exist */
1121         if (result == -ESTALE &&
1122             fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1123                 GOTO(out, result = 0);
1124         }
1125
1126         if (result)
1127                 GOTO(out, result);
1128
1129 iget:
1130         obj->oo_inode = NULL;
1131         /* for later passes through checks, not true on first pass */
1132         if (!IS_ERR_OR_NULL(inode))
1133                 iput(inode);
1134
1135         inode = osd_iget_check(info, dev, fid, id, trusted);
1136         if (!IS_ERR(inode)) {
1137                 obj->oo_inode = inode;
1138                 result = 0;
1139                 if (remote)
1140                         goto trigger;
1141
1142                 goto check_lma;
1143         }
1144
1145         result = PTR_ERR(inode);
1146         if (result == -ENOENT || result == -ESTALE)
1147                 GOTO(out, result = 0);
1148
1149         if (result != -EREMCHG)
1150                 GOTO(out, result);
1151
1152 trigger:
1153         /*
1154          * We still have chance to get the valid inode: for the
1155          * object which is referenced by remote name entry, the
1156          * object on the local MDT will be linked under the dir
1157          * of "/REMOTE_PARENT_DIR" with its FID string as name.
1158          *
1159          * We do not know whether the object for the given FID
1160          * is referenced by some remote name entry or not, and
1161          * especially for DNE II, a multiple-linked object may
1162          * have many name entries reside on many MDTs.
1163          *
1164          * To simplify the operation, OSD will not distinguish
1165          * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1166          * only happened for the RPC from other MDT during the
1167          * OI scrub, or for the client side RPC with FID only,
1168          * such as FID to path, or from old connected client.
1169          */
1170         if (!remote) {
1171                 rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
1172                 if (!rc1) {
1173                         remote = true;
1174                         trusted = true;
1175                         flags |= SS_AUTO_PARTIAL;
1176                         flags &= ~SS_AUTO_FULL;
1177                         goto iget;
1178                 }
1179         }
1180
1181         if (thread_is_running(&scrub->os_thread)) {
1182                 if (scrub->os_partial_scan && !scrub->os_in_join)
1183                         goto join;
1184
1185                 if (IS_ERR_OR_NULL(inode) || result)
1186                         GOTO(out, result = -EINPROGRESS);
1187
1188                 LASSERT(remote);
1189                 LASSERT(obj->oo_inode == inode);
1190
1191                 osd_add_oi_cache(info, dev, id, fid);
1192                 osd_oii_insert(dev, oic, true);
1193                 goto found;
1194         }
1195
1196         if (dev->od_auto_scrub_interval == AS_NEVER) {
1197                 if (!remote)
1198                         GOTO(out, result = -EREMCHG);
1199
1200                 LASSERT(!result);
1201                 LASSERT(obj->oo_inode == inode);
1202
1203                 osd_add_oi_cache(info, dev, id, fid);
1204                 goto found;
1205         }
1206
1207 join:
1208         rc1 = osd_scrub_start(env, dev, flags);
1209         LCONSOLE_WARN("%s: trigger OI scrub by RPC for the " DFID" with flags "
1210                       "0x%x, rc = %d\n", osd_name(dev), PFID(fid), flags, rc1);
1211         if (rc1 && rc1 != -EALREADY)
1212                 GOTO(out, result = -EREMCHG);
1213
1214         if (IS_ERR_OR_NULL(inode) || result)
1215                 GOTO(out, result = -EINPROGRESS);
1216
1217         LASSERT(remote);
1218         LASSERT(obj->oo_inode == inode);
1219
1220         osd_add_oi_cache(info, dev, id, fid);
1221         osd_oii_insert(dev, oic, true);
1222         goto found;
1223
1224 check_lma:
1225         checked = true;
1226         if (unlikely(obj->oo_header))
1227                 goto found;
1228
1229         result = osd_check_lma(env, obj);
1230         if (!result)
1231                 goto found;
1232
1233         LASSERTF(id->oii_ino == inode->i_ino &&
1234                  id->oii_gen == inode->i_generation,
1235                  "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1236                  PFID(fid), id->oii_ino, id->oii_gen,
1237                  inode->i_ino, inode->i_generation);
1238
1239         saved_ino = inode->i_ino;
1240         saved_gen = inode->i_generation;
1241
1242         if (unlikely(result == -ENODATA)) {
1243                 /*
1244                  * If the OI scrub updated the OI mapping by race, it
1245                  * must be valid. Trust the inode that has no LMA EA.
1246                  */
1247                 if (updated)
1248                         goto found;
1249
1250                 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1251                 if (!result) {
1252                         /*
1253                          * The OI mapping is still there, the inode is still
1254                          * valid. It is just becaues the inode has no LMA EA.
1255                          */
1256                         if (saved_ino == id->oii_ino &&
1257                             saved_gen == id->oii_gen)
1258                                 goto found;
1259
1260                         /*
1261                          * It is the OI scrub updated the OI mapping by race.
1262                          * The new OI mapping must be valid.
1263                          */
1264                         trusted = true;
1265                         updated = true;
1266                         goto iget;
1267                 }
1268
1269                 /*
1270                  * "result == -ENOENT" means that the OI mappinghas been
1271                  * removed by race, so the inode belongs to other object.
1272                  *
1273                  * Others error can be returned  directly.
1274                  */
1275                 if (result == -ENOENT) {
1276                         LASSERT(trusted);
1277
1278                         obj->oo_inode = NULL;
1279                         result = 0;
1280                 }
1281         }
1282
1283         if (result != -EREMCHG)
1284                 GOTO(out, result);
1285
1286         LASSERT(!updated);
1287
1288         /*
1289          * if two OST objects map to the same inode, and inode mode is
1290          * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
1291          * reserved by precreate, and not written yet, in this case, don't
1292          * set inode for the object whose FID mismatch, so that it can create
1293          * inode and not block precreate.
1294          */
1295         if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
1296             inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
1297                 obj->oo_inode = NULL;
1298                 GOTO(out, result = 0);
1299         }
1300
1301         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1302         /*
1303          * "result == -ENOENT" means the cached OI mapping has been removed
1304          * from the OI file by race, above inode belongs to other object.
1305          */
1306         if (result == -ENOENT) {
1307                 LASSERT(trusted);
1308
1309                 obj->oo_inode = NULL;
1310                 GOTO(out, result = 0);
1311         }
1312
1313         if (result)
1314                 GOTO(out, result);
1315
1316         if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
1317                 result = -EREMCHG;
1318                 goto trigger;
1319         }
1320
1321         /*
1322          * It is the OI scrub updated the OI mapping by race.
1323          * The new OI mapping must be valid.
1324          */
1325         trusted = true;
1326         updated = true;
1327         goto iget;
1328
1329 found:
1330         if (!checked) {
1331                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
1332                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
1333
1334                 result = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
1335                 if (!result) {
1336                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
1337                             dev->od_is_ost)
1338                                 obj->oo_pfid_in_lma = 1;
1339                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
1340                             !dev->od_is_ost)
1341                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
1342                 } else if (result != -ENODATA) {
1343                         GOTO(out, result);
1344                 }
1345         }
1346
1347         obj->oo_compat_dot_created = 1;
1348         obj->oo_compat_dotdot_created = 1;
1349
1350         if (S_ISDIR(inode->i_mode) &&
1351             (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1352                 osd_check_lmv(info, dev, inode, oic);
1353
1354         result = osd_attach_jinode(inode);
1355         if (result)
1356                 GOTO(out, result);
1357
1358         if (!ldiskfs_pdo)
1359                 GOTO(out, result = 0);
1360
1361         LASSERT(!obj->oo_hl_head);
1362         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1363
1364         GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
1365
1366 out:
1367         if (result || !obj->oo_inode) {
1368                 if (!IS_ERR_OR_NULL(inode))
1369                         iput(inode);
1370
1371                 obj->oo_inode = NULL;
1372                 if (trusted)
1373                         fid_zero(&oic->oic_fid);
1374         }
1375
1376         LINVRNT(osd_invariant(obj));
1377         return result;
1378 }
1379
1380 /*
1381  * Concurrency: shouldn't matter.
1382  */
1383 static void osd_object_init0(struct osd_object *obj)
1384 {
1385         LASSERT(obj->oo_inode != NULL);
1386         obj->oo_dt.do_body_ops = &osd_body_ops;
1387         obj->oo_dt.do_lu.lo_header->loh_attr |=
1388                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1389 }
1390
1391 /*
1392  * Concurrency: no concurrent access is possible that early in object
1393  * life-cycle.
1394  */
1395 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1396                            const struct lu_object_conf *conf)
1397 {
1398         struct osd_object *obj = osd_obj(l);
1399         int result;
1400
1401         LINVRNT(osd_invariant(obj));
1402
1403         if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_LLOG_UMOUNT_RACE) &&
1404             cfs_fail_val == 2) {
1405                 struct osd_thread_info *info = osd_oti_get(env);
1406                 struct osd_idmap_cache *oic = &info->oti_cache;
1407                 /* invalidate thread cache */
1408                 memset(&oic->oic_fid, 0, sizeof(oic->oic_fid));
1409         }
1410         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1411                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1412                 l->lo_header->loh_attr |= LOHA_EXISTS;
1413                 return 0;
1414         }
1415
1416         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1417         obj->oo_dt.do_body_ops = &osd_body_ops_new;
1418         if (result == 0 && obj->oo_inode != NULL) {
1419                 struct osd_thread_info *oti = osd_oti_get(env);
1420                 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1421
1422                 osd_object_init0(obj);
1423                 if (unlikely(obj->oo_header))
1424                         return 0;
1425
1426                 result = osd_get_lma(oti, obj->oo_inode,
1427                                      &oti->oti_obj_dentry, loa);
1428                 if (!result) {
1429                         /*
1430                          * Convert LMAI flags to lustre LMA flags
1431                          * and cache it to oo_lma_flags
1432                          */
1433                         obj->oo_lma_flags =
1434                                 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1435                 } else if (result == -ENODATA) {
1436                         result = 0;
1437                 }
1438         }
1439         obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
1440
1441         LINVRNT(osd_invariant(obj));
1442         return result;
1443 }
1444
1445 /*
1446  * The first part of oxe_buf is xattr name, and is '\0' terminated.
1447  * The left part is for value, binary mode.
1448  */
1449 struct osd_xattr_entry {
1450         struct list_head        oxe_list;
1451         size_t                  oxe_len;
1452         size_t                  oxe_namelen;
1453         bool                    oxe_exist;
1454         struct rcu_head         oxe_rcu;
1455         char                    oxe_buf[0];
1456 };
1457
1458 static int osd_oxc_get(struct osd_object *obj, const char *name,
1459                        struct lu_buf *buf)
1460 {
1461         struct osd_xattr_entry *tmp;
1462         struct osd_xattr_entry *oxe = NULL;
1463         size_t namelen = strlen(name);
1464         int rc;
1465
1466         rcu_read_lock();
1467         list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1468                 if (namelen == tmp->oxe_namelen &&
1469                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1470                         oxe = tmp;
1471                         break;
1472                 }
1473         }
1474
1475         if (oxe == NULL)
1476                 GOTO(out, rc = -ENOENT);
1477
1478         if (!oxe->oxe_exist)
1479                 GOTO(out, rc = -ENODATA);
1480
1481         /* vallen */
1482         rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1483         LASSERT(rc > 0);
1484
1485         if (buf->lb_buf == NULL)
1486                 GOTO(out, rc);
1487
1488         if (buf->lb_len < rc)
1489                 GOTO(out, rc = -ERANGE);
1490
1491         memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1492 out:
1493         rcu_read_unlock();
1494
1495         return rc;
1496 }
1497
1498 static void osd_oxc_free(struct rcu_head *head)
1499 {
1500         struct osd_xattr_entry *oxe;
1501
1502         oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1503         OBD_FREE(oxe, oxe->oxe_len);
1504 }
1505
1506 static void osd_oxc_add(struct osd_object *obj, const char *name,
1507                         const char *buf, int buflen)
1508 {
1509         struct osd_xattr_entry *oxe;
1510         struct osd_xattr_entry *old = NULL;
1511         struct osd_xattr_entry *tmp;
1512         size_t namelen = strlen(name);
1513         size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1514
1515         OBD_ALLOC(oxe, len);
1516         if (oxe == NULL)
1517                 return;
1518
1519         INIT_LIST_HEAD(&oxe->oxe_list);
1520         oxe->oxe_len = len;
1521         oxe->oxe_namelen = namelen;
1522         memcpy(oxe->oxe_buf, name, namelen);
1523         if (buflen > 0) {
1524                 LASSERT(buf != NULL);
1525                 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1526                 oxe->oxe_exist = true;
1527         } else {
1528                 oxe->oxe_exist = false;
1529         }
1530
1531         /* this should be rarely called, just remove old and add new */
1532         spin_lock(&obj->oo_guard);
1533         list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1534                 if (namelen == tmp->oxe_namelen &&
1535                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1536                         old = tmp;
1537                         break;
1538                 }
1539         }
1540         if (old != NULL) {
1541                 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1542                 call_rcu(&old->oxe_rcu, osd_oxc_free);
1543         } else {
1544                 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1545         }
1546         spin_unlock(&obj->oo_guard);
1547 }
1548
1549 static void osd_oxc_del(struct osd_object *obj, const char *name)
1550 {
1551         struct osd_xattr_entry *oxe;
1552         size_t namelen = strlen(name);
1553
1554         spin_lock(&obj->oo_guard);
1555         list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1556                 if (namelen == oxe->oxe_namelen &&
1557                     strncmp(name, oxe->oxe_buf, namelen) == 0) {
1558                         list_del_rcu(&oxe->oxe_list);
1559                         call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1560                         break;
1561                 }
1562         }
1563         spin_unlock(&obj->oo_guard);
1564 }
1565
1566 static void osd_oxc_fini(struct osd_object *obj)
1567 {
1568         struct osd_xattr_entry *oxe, *next;
1569
1570         list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1571                 list_del(&oxe->oxe_list);
1572                 OBD_FREE(oxe, oxe->oxe_len);
1573         }
1574 }
1575
1576 /*
1577  * Concurrency: no concurrent access is possible that late in object
1578  * life-cycle.
1579  */
1580 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1581 {
1582         struct osd_object *obj = osd_obj(l);
1583         struct lu_object_header *h = obj->oo_header;
1584
1585         LINVRNT(osd_invariant(obj));
1586
1587         osd_oxc_fini(obj);
1588         dt_object_fini(&obj->oo_dt);
1589         if (obj->oo_hl_head != NULL)
1590                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1591         /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
1592         OBD_FREE_PTR(obj);
1593         if (unlikely(h)) {
1594                 lu_object_header_fini(h);
1595                 OBD_FREE_PRE(h, sizeof(*h), "kfreed");
1596                 kfree_rcu(h, loh_rcu);
1597         }
1598 }
1599
1600 /*
1601  * Concurrency: no concurrent access is possible that late in object
1602  * life-cycle.
1603  */
1604 static void osd_index_fini(struct osd_object *o)
1605 {
1606         struct iam_container *bag;
1607
1608         if (o->oo_dir != NULL) {
1609                 bag = &o->oo_dir->od_container;
1610                 if (o->oo_inode != NULL) {
1611                         if (bag->ic_object == o->oo_inode)
1612                                 iam_container_fini(bag);
1613                 }
1614                 OBD_FREE_PTR(o->oo_dir);
1615                 o->oo_dir = NULL;
1616         }
1617 }
1618
1619 /*
1620  * Concurrency: no concurrent access is possible that late in object
1621  * life-cycle (for all existing callers, that is. New callers have to provide
1622  * their own locking.)
1623  */
1624 static int osd_inode_unlinked(const struct inode *inode)
1625 {
1626         return inode->i_nlink == 0;
1627 }
1628
1629 enum {
1630         OSD_TXN_OI_DELETE_CREDITS    = 20,
1631         OSD_TXN_INODE_DELETE_CREDITS = 20
1632 };
1633
1634 /*
1635  * Journal
1636  */
1637
1638 #if OSD_THANDLE_STATS
1639 /**
1640  * Set time when the handle is allocated
1641  */
1642 static void osd_th_alloced(struct osd_thandle *oth)
1643 {
1644         oth->oth_alloced = ktime_get();
1645 }
1646
1647 /**
1648  * Set time when the handle started
1649  */
1650 static void osd_th_started(struct osd_thandle *oth)
1651 {
1652         oth->oth_started = ktime_get();
1653 }
1654
1655 /**
1656  * Check whether the we deal with this handle for too long.
1657  */
1658 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1659                                 ktime_t alloced, ktime_t started,
1660                                 ktime_t closed)
1661 {
1662         ktime_t now = ktime_get();
1663
1664         LASSERT(dev != NULL);
1665
1666         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1667                             ktime_us_delta(started, alloced));
1668         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1669                             ktime_us_delta(closed, started));
1670         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1671                             ktime_us_delta(now, closed));
1672
1673         if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1674                 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1675                                 oth, now, alloced, started, closed);
1676                 libcfs_debug_dumpstack(NULL);
1677         }
1678 }
1679
1680 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
1681 {                                                                       \
1682         ktime_t __closed = ktime_get();                                 \
1683         ktime_t __alloced = oth->oth_alloced;                           \
1684         ktime_t __started = oth->oth_started;                           \
1685                                                                         \
1686         expr;                                                           \
1687         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
1688 }
1689
1690 #else /* OSD_THANDLE_STATS */
1691
1692 #define osd_th_alloced(h)                  do {} while(0)
1693 #define osd_th_started(h)                  do {} while(0)
1694 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
1695
1696 #endif /* OSD_THANDLE_STATS */
1697
1698 /*
1699  * Concurrency: doesn't access mutable data.
1700  */
1701 static int osd_param_is_not_sane(const struct osd_device *dev,
1702                                  const struct thandle *th)
1703 {
1704         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1705
1706         return oh->ot_credits > osd_transaction_size(dev);
1707 }
1708
1709 /*
1710  * Concurrency: shouldn't matter.
1711  */
1712 static void osd_trans_commit_cb(struct super_block *sb,
1713                                 struct ldiskfs_journal_cb_entry *jcb, int error)
1714 {
1715         struct osd_thandle *oh = container_of(jcb, struct osd_thandle, ot_jcb);
1716         struct thandle *th = &oh->ot_super;
1717         struct lu_device *lud = &th->th_dev->dd_lu_dev;
1718         struct dt_txn_commit_cb *dcb, *tmp;
1719
1720         LASSERT(oh->ot_handle == NULL);
1721
1722         if (error)
1723                 CERROR("transaction @0x%p commit error: %d\n", th, error);
1724
1725         OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DELAY_TRANS, 40);
1726         /* call per-transaction callbacks if any */
1727         list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1728                                  dcb_linkage) {
1729                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1730                          "commit callback entry: magic=%x name='%s'\n",
1731                          dcb->dcb_magic, dcb->dcb_name);
1732                 list_del_init(&dcb->dcb_linkage);
1733                 dcb->dcb_func(NULL, th, dcb, error);
1734         }
1735
1736         lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1737         lu_device_put(lud);
1738         th->th_dev = NULL;
1739
1740         OBD_FREE_PTR(oh);
1741 }
1742
1743 static struct thandle *osd_trans_create(const struct lu_env *env,
1744                                         struct dt_device *d)
1745 {
1746         struct osd_thread_info *oti = osd_oti_get(env);
1747         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1748         struct osd_thandle *oh;
1749         struct thandle *th;
1750
1751         ENTRY;
1752
1753         if (d->dd_rdonly) {
1754                 CERROR("%s: someone try to start transaction under "
1755                        "readonly mode, should be disabled.\n",
1756                        osd_name(osd_dt_dev(d)));
1757                 dump_stack();
1758                 RETURN(ERR_PTR(-EROFS));
1759         }
1760
1761         /* on pending IO in this thread should left from prev. request */
1762         LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1763
1764         sb_start_write(osd_sb(osd_dt_dev(d)));
1765
1766         OBD_ALLOC_GFP(oh, sizeof(*oh), GFP_NOFS);
1767         if (!oh) {
1768                 sb_end_write(osd_sb(osd_dt_dev(d)));
1769                 RETURN(ERR_PTR(-ENOMEM));
1770         }
1771
1772         oh->ot_quota_trans = &oti->oti_quota_trans;
1773         memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1774         th = &oh->ot_super;
1775         th->th_dev = d;
1776         th->th_result = 0;
1777         oh->ot_credits = 0;
1778         INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1779         INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1780         INIT_LIST_HEAD(&oh->ot_trunc_locks);
1781         osd_th_alloced(oh);
1782
1783         memset(oti->oti_declare_ops, 0,
1784                sizeof(oti->oti_declare_ops));
1785         memset(oti->oti_declare_ops_cred, 0,
1786                sizeof(oti->oti_declare_ops_cred));
1787         memset(oti->oti_declare_ops_used, 0,
1788                sizeof(oti->oti_declare_ops_used));
1789
1790         oti->oti_ins_cache_depth++;
1791
1792         RETURN(th);
1793 }
1794
1795 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1796 {
1797         struct osd_thread_info *oti = osd_oti_get(env);
1798         struct osd_thandle *oh;
1799
1800         oh = container_of(th, struct osd_thandle, ot_super);
1801         LASSERT(oh != NULL);
1802
1803         CWARN("  create: %u/%u/%u, destroy: %u/%u/%u\n",
1804               oti->oti_declare_ops[OSD_OT_CREATE],
1805               oti->oti_declare_ops_cred[OSD_OT_CREATE],
1806               oti->oti_declare_ops_used[OSD_OT_CREATE],
1807               oti->oti_declare_ops[OSD_OT_DESTROY],
1808               oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1809               oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1810         CWARN("  attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1811               oti->oti_declare_ops[OSD_OT_ATTR_SET],
1812               oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1813               oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1814               oti->oti_declare_ops[OSD_OT_XATTR_SET],
1815               oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1816               oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1817         CWARN("  write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1818               oti->oti_declare_ops[OSD_OT_WRITE],
1819               oti->oti_declare_ops_cred[OSD_OT_WRITE],
1820               oti->oti_declare_ops_used[OSD_OT_WRITE],
1821               oti->oti_declare_ops[OSD_OT_PUNCH],
1822               oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1823               oti->oti_declare_ops_used[OSD_OT_PUNCH],
1824               oti->oti_declare_ops[OSD_OT_QUOTA],
1825               oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1826               oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1827         CWARN("  insert: %u/%u/%u, delete: %u/%u/%u\n",
1828               oti->oti_declare_ops[OSD_OT_INSERT],
1829               oti->oti_declare_ops_cred[OSD_OT_INSERT],
1830               oti->oti_declare_ops_used[OSD_OT_INSERT],
1831               oti->oti_declare_ops[OSD_OT_DELETE],
1832               oti->oti_declare_ops_cred[OSD_OT_DELETE],
1833               oti->oti_declare_ops_used[OSD_OT_DELETE]);
1834         CWARN("  ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1835               oti->oti_declare_ops[OSD_OT_REF_ADD],
1836               oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1837               oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1838               oti->oti_declare_ops[OSD_OT_REF_DEL],
1839               oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1840               oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1841 }
1842
1843 /*
1844  * Concurrency: shouldn't matter.
1845  */
1846 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1847                            struct thandle *th)
1848 {
1849         struct osd_thread_info *oti = osd_oti_get(env);
1850         struct osd_device *dev = osd_dt_dev(d);
1851         handle_t *jh;
1852         struct osd_thandle *oh;
1853         int rc;
1854
1855         ENTRY;
1856
1857         LASSERT(current->journal_info == NULL);
1858
1859         oh = container_of(th, struct osd_thandle, ot_super);
1860         LASSERT(oh != NULL);
1861         LASSERT(oh->ot_handle == NULL);
1862
1863         rc = dt_txn_hook_start(env, d, th);
1864         if (rc != 0)
1865                 GOTO(out, rc);
1866
1867         if (unlikely(osd_param_is_not_sane(dev, th))) {
1868                 static unsigned long last_printed;
1869                 static int last_credits;
1870
1871                 /*
1872                  * don't make noise on a tiny testing systems
1873                  * actual credits misuse will be caught anyway
1874                  */
1875                 if (last_credits != oh->ot_credits &&
1876                     time_after(jiffies, last_printed +
1877                                cfs_time_seconds(60)) &&
1878                     osd_transaction_size(dev) > 512) {
1879                         CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1880                               oh->ot_credits, osd_transaction_size(dev));
1881                         osd_trans_dump_creds(env, th);
1882                         libcfs_debug_dumpstack(NULL);
1883                         last_credits = oh->ot_credits;
1884                         last_printed = jiffies;
1885                 }
1886                 /*
1887                  * XXX Limit the credits to 'max_transaction_buffers', and
1888                  *     let the underlying filesystem to catch the error if
1889                  *     we really need so many credits.
1890                  *
1891                  *     This should be removed when we can calculate the
1892                  *     credits precisely.
1893                  */
1894                 oh->ot_credits = osd_transaction_size(dev);
1895         } else if (ldiskfs_track_declares_assert != 0) {
1896                 /*
1897                  * reserve few credits to prevent an assertion in JBD
1898                  * our debugging mechanism will be able to detected
1899                  * overuse. this can help to debug single-update
1900                  * transactions
1901                  */
1902                 oh->ot_credits += 10;
1903                 if (unlikely(osd_param_is_not_sane(dev, th)))
1904                         oh->ot_credits = osd_transaction_size(dev);
1905         }
1906
1907         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_TXN_START))
1908                 GOTO(out, rc = -EIO);
1909
1910         /*
1911          * XXX temporary stuff. Some abstraction layer should
1912          * be used.
1913          */
1914         jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1915         osd_th_started(oh);
1916         if (!IS_ERR(jh)) {
1917                 oh->ot_handle = jh;
1918                 LASSERT(oti->oti_txns == 0);
1919
1920                 lu_device_get(&d->dd_lu_dev);
1921                 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1922                               "osd-tx", th);
1923                 oti->oti_txns++;
1924                 rc = 0;
1925         } else {
1926                 rc = PTR_ERR(jh);
1927         }
1928 out:
1929         RETURN(rc);
1930 }
1931
1932 static int osd_seq_exists(const struct lu_env *env,
1933                           struct osd_device *osd, u64 seq)
1934 {
1935         struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1936         struct seq_server_site *ss = osd_seq_site(osd);
1937         int rc;
1938
1939         ENTRY;
1940
1941         LASSERT(ss != NULL);
1942         LASSERT(ss->ss_server_fld != NULL);
1943
1944         rc = osd_fld_lookup(env, osd, seq, range);
1945         if (rc != 0) {
1946                 if (rc != -ENOENT)
1947                         CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
1948                                osd_name(osd), seq, rc);
1949                 RETURN(0);
1950         }
1951
1952         RETURN(ss->ss_node_id == range->lsr_index);
1953 }
1954
1955 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1956 {
1957         struct dt_txn_commit_cb *dcb;
1958         struct dt_txn_commit_cb *tmp;
1959
1960         /* call per-transaction stop callbacks if any */
1961         list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1962                                  dcb_linkage) {
1963                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1964                          "commit callback entry: magic=%x name='%s'\n",
1965                          dcb->dcb_magic, dcb->dcb_name);
1966                 list_del_init(&dcb->dcb_linkage);
1967                 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1968         }
1969 }
1970
1971 /*
1972  * Concurrency: shouldn't matter.
1973  */
1974 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1975                           struct thandle *th)
1976 {
1977         struct osd_thread_info *oti = osd_oti_get(env);
1978         struct osd_thandle *oh;
1979         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1980         struct osd_device *osd = osd_dt_dev(th->th_dev);
1981         struct qsd_instance *qsd = osd_def_qsd(osd);
1982         struct lquota_trans *qtrans;
1983         LIST_HEAD(truncates);
1984         int rc = 0, remove_agents = 0;
1985
1986         ENTRY;
1987
1988         oh = container_of(th, struct osd_thandle, ot_super);
1989
1990         remove_agents = oh->ot_remove_agents;
1991
1992         qtrans = oh->ot_quota_trans;
1993         oh->ot_quota_trans = NULL;
1994
1995         /* move locks to local list, stop tx, execute truncates */
1996         list_splice(&oh->ot_trunc_locks, &truncates);
1997
1998         if (oh->ot_handle != NULL) {
1999                 int rc2;
2000
2001                 handle_t *hdl = oh->ot_handle;
2002
2003                 /*
2004                  * add commit callback
2005                  * notice we don't do this in osd_trans_start()
2006                  * as underlying transaction can change during truncate
2007                  */
2008                 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
2009                                              &oh->ot_jcb);
2010
2011                 LASSERT(oti->oti_txns == 1);
2012                 oti->oti_txns--;
2013
2014                 rc = dt_txn_hook_stop(env, th);
2015                 if (rc != 0)
2016                         CERROR("%s: failed in transaction hook: rc = %d\n",
2017                                osd_name(osd), rc);
2018
2019                 osd_trans_stop_cb(oh, rc);
2020                 /* hook functions might modify th_sync */
2021                 hdl->h_sync = th->th_sync;
2022
2023                 oh->ot_handle = NULL;
2024                 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
2025                 if (rc2 != 0)
2026                         CERROR("%s: failed to stop transaction: rc = %d\n",
2027                                osd_name(osd), rc2);
2028                 if (!rc)
2029                         rc = rc2;
2030
2031                 osd_process_truncates(&truncates);
2032         } else {
2033                 osd_trans_stop_cb(oh, th->th_result);
2034                 OBD_FREE_PTR(oh);
2035         }
2036
2037         osd_trunc_unlock_all(env, &truncates);
2038
2039         /* inform the quota slave device that the transaction is stopping */
2040         qsd_op_end(env, qsd, qtrans);
2041
2042         /*
2043          * as we want IO to journal and data IO be concurrent, we don't block
2044          * awaiting data IO completion in osd_do_bio(), instead we wait here
2045          * once transaction is submitted to the journal. all reqular requests
2046          * don't do direct IO (except read/write), thus this wait_event becomes
2047          * no-op for them.
2048          *
2049          * IMPORTANT: we have to wait till any IO submited by the thread is
2050          * completed otherwise iobuf may be corrupted by different request
2051          */
2052         wait_event(iobuf->dr_wait,
2053                        atomic_read(&iobuf->dr_numreqs) == 0);
2054         osd_fini_iobuf(osd, iobuf);
2055         if (!rc)
2056                 rc = iobuf->dr_error;
2057
2058         if (unlikely(remove_agents != 0))
2059                 osd_process_scheduled_agent_removals(env, osd);
2060
2061         oti->oti_ins_cache_depth--;
2062         /* reset OI cache for safety */
2063         if (oti->oti_ins_cache_depth == 0)
2064                 oti->oti_ins_cache_used = 0;
2065
2066         sb_end_write(osd_sb(osd));
2067
2068         RETURN(rc);
2069 }
2070
2071 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
2072 {
2073         struct osd_thandle *oh = container_of(th, struct osd_thandle,
2074                                               ot_super);
2075
2076         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
2077         LASSERT(&dcb->dcb_func != NULL);
2078         if (dcb->dcb_flags & DCB_TRANS_STOP)
2079                 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
2080         else
2081                 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
2082
2083         return 0;
2084 }
2085
2086 /*
2087  * Called just before object is freed. Releases all resources except for
2088  * object itself (that is released by osd_object_free()).
2089  *
2090  * Concurrency: no concurrent access is possible that late in object
2091  * life-cycle.
2092  */
2093 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
2094 {
2095         struct osd_object *obj = osd_obj(l);
2096         struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
2097         struct inode *inode = obj->oo_inode;
2098         __u64 projid;
2099         qid_t uid;
2100         qid_t gid;
2101
2102         LINVRNT(osd_invariant(obj));
2103
2104         /*
2105          * If object is unlinked remove fid->ino mapping from object index.
2106          */
2107
2108         osd_index_fini(obj);
2109
2110         if (!inode)
2111                 return;
2112
2113         if (osd_has_index(obj) &&  obj->oo_dt.do_index_ops == &osd_index_iam_ops)
2114                 ldiskfs_set_inode_flag(inode, LDISKFS_INODE_JOURNAL_DATA);
2115
2116         uid = i_uid_read(inode);
2117         gid = i_gid_read(inode);
2118         projid = i_projid_read(inode);
2119
2120         obj->oo_inode = NULL;
2121         iput(inode);
2122
2123         /* do not rebalance quota if the caller needs to release memory
2124          * otherwise qsd_refresh_usage() may went into a new ldiskfs
2125          * transaction and risk to deadlock - LU-12178 */
2126         if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
2127                 return;
2128
2129         if (!obj->oo_header && qsd) {
2130                 struct osd_thread_info *info = osd_oti_get(env);
2131                 struct lquota_id_info *qi = &info->oti_qi;
2132
2133                 /* Release granted quota to master if necessary */
2134                 qi->lqi_id.qid_uid = uid;
2135                 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
2136
2137                 qi->lqi_id.qid_uid = gid;
2138                 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
2139
2140                 qi->lqi_id.qid_uid = projid;
2141                 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
2142         }
2143 }
2144
2145 /*
2146  * Concurrency: ->loo_object_release() is called under site spin-lock.
2147  */
2148 static void osd_object_release(const struct lu_env *env,
2149                                struct lu_object *l)
2150 {
2151         struct osd_object *o = osd_obj(l);
2152
2153         /*
2154          * nobody should be releasing a non-destroyed object with nlink=0
2155          * the API allows this, but ldiskfs doesn't like and then report
2156          * this inode as deleted
2157          */
2158         LASSERT(!(o->oo_destroyed == 0 && o->oo_inode &&
2159                   o->oo_inode->i_nlink == 0));
2160 }
2161
2162 /*
2163  * Concurrency: shouldn't matter.
2164  */
2165 static int osd_object_print(const struct lu_env *env, void *cookie,
2166                             lu_printer_t p, const struct lu_object *l)
2167 {
2168         struct osd_object *o = osd_obj(l);
2169         struct iam_descr *d;
2170
2171         if (o->oo_dir != NULL)
2172                 d = o->oo_dir->od_container.ic_descr;
2173         else
2174                 d = NULL;
2175         return (*p)(env, cookie,
2176                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
2177                     o, o->oo_inode,
2178                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
2179                     o->oo_inode ? o->oo_inode->i_generation : 0,
2180                     d ? d->id_ops->id_name : "plain");
2181 }
2182
2183 /*
2184  * Concurrency: shouldn't matter.
2185  */
2186 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2187                 struct obd_statfs *sfs, struct obd_statfs_info *info)
2188 {
2189         struct osd_device *osd = osd_dt_dev(d);
2190         struct super_block *sb = osd_sb(osd);
2191         struct kstatfs *ksfs;
2192         __u64 reserved;
2193         int result = 0;
2194
2195         if (unlikely(osd->od_mnt == NULL))
2196                 return -EINPROGRESS;
2197
2198         /* osd_lproc.c call this without env, allocate ksfs for that case */
2199         if (unlikely(env == NULL)) {
2200                 OBD_ALLOC_PTR(ksfs);
2201                 if (ksfs == NULL)
2202                         return -ENOMEM;
2203         } else {
2204                 ksfs = &osd_oti_get(env)->oti_ksfs;
2205         }
2206
2207         result = sb->s_op->statfs(sb->s_root, ksfs);
2208         if (result)
2209                 goto out;
2210
2211         statfs_pack(sfs, ksfs);
2212         if (unlikely(sb->s_flags & SB_RDONLY))
2213                 sfs->os_state |= OS_STATFS_READONLY;
2214
2215         sfs->os_state |= osd->od_nonrotational ? OS_STATFS_NONROT : 0;
2216
2217         if (ldiskfs_has_feature_extents(sb))
2218                 sfs->os_maxbytes = sb->s_maxbytes;
2219         else
2220                 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2221
2222         /*
2223          * Reserve some space so to avoid fragmenting the filesystem too much.
2224          * Fragmentation not only impacts performance, but can also increase
2225          * metadata overhead significantly, causing grant calculation to be
2226          * wrong.
2227          *
2228          * Reserve 0.78% of total space, at least 8MB for small filesystems.
2229          */
2230         BUILD_BUG_ON(OSD_STATFS_RESERVED <= LDISKFS_MAX_BLOCK_SIZE);
2231         reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2232         if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2233                 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2234
2235         sfs->os_blocks -= reserved;
2236         sfs->os_bfree  -= min(reserved, sfs->os_bfree);
2237         sfs->os_bavail -= min(reserved, sfs->os_bavail);
2238
2239 out:
2240         if (unlikely(env == NULL))
2241                 OBD_FREE_PTR(ksfs);
2242         return result;
2243 }
2244
2245 /**
2246  * Estimate space needed for file creations. We assume the largest filename
2247  * which is 2^64 - 1, hence a filename of 20 chars.
2248  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2249  */
2250 #ifdef __LDISKFS_DIR_REC_LEN
2251 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2252 #else
2253 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2254 #endif
2255
2256 /*
2257  * Concurrency: doesn't access mutable data.
2258  */
2259 static void osd_conf_get(const struct lu_env *env,
2260                          const struct dt_device *dev,
2261                          struct dt_device_param *param)
2262 {
2263         struct osd_device *d = osd_dt_dev(dev);
2264         struct super_block *sb = osd_sb(d);
2265         struct blk_integrity *bi = bdev_get_integrity(sb->s_bdev);
2266         const char *name;
2267         int ea_overhead;
2268
2269         /*
2270          * XXX should be taken from not-yet-existing fs abstraction layer.
2271          */
2272         param->ddp_max_name_len = LDISKFS_NAME_LEN;
2273         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
2274         param->ddp_symlink_max  = sb->s_blocksize;
2275         param->ddp_mount_type   = LDD_MT_LDISKFS;
2276         if (ldiskfs_has_feature_extents(sb))
2277                 param->ddp_maxbytes = sb->s_maxbytes;
2278         else
2279                 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2280         /*
2281          * inode are statically allocated, so per-inode space consumption
2282          * is the space consumed by the directory entry
2283          */
2284         param->ddp_inodespace     = PER_OBJ_USAGE;
2285         /*
2286          * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2287          * is 128MB) which is unlikely to be hit in real life. Report a smaller
2288          * maximum length to not under-count the actual number of extents
2289          * needed for writing a file if there are sub-optimal block allocations.
2290          */
2291         param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
2292         /* worst-case extent insertion metadata overhead */
2293         param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2294         param->ddp_mntopts = 0;
2295         if (test_opt(sb, XATTR_USER))
2296                 param->ddp_mntopts |= MNTOPT_USERXATTR;
2297         if (test_opt(sb, POSIX_ACL))
2298                 param->ddp_mntopts |= MNTOPT_ACL;
2299
2300         /*
2301          * LOD might calculate the max stripe count based on max_ea_size,
2302          * so we need take account in the overhead as well,
2303          * xattr_header + magic + xattr_entry_head
2304          */
2305         ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2306                       LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2307
2308 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2309         if (ldiskfs_has_feature_ea_inode(sb))
2310                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2311                                                                 ea_overhead;
2312         else
2313 #endif
2314                 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2315
2316         if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
2317                 param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
2318
2319         /*
2320          * Preferred RPC size for efficient disk IO.  4MB shows good
2321          * all-around performance for ldiskfs, but use bigalloc chunk size
2322          * by default if larger.
2323          */
2324 #if defined(LDISKFS_CLUSTER_SIZE)
2325         if (LDISKFS_CLUSTER_SIZE(sb) > DT_DEF_BRW_SIZE)
2326                 param->ddp_brw_size = LDISKFS_CLUSTER_SIZE(sb);
2327         else
2328 #endif
2329                 param->ddp_brw_size = DT_DEF_BRW_SIZE;
2330
2331         param->ddp_t10_cksum_type = 0;
2332         if (bi) {
2333                 unsigned short interval = blk_integrity_interval(bi);
2334                 name = blk_integrity_name(bi);
2335                 /*
2336                  * Expected values:
2337                  * T10-DIF-TYPE1-CRC
2338                  * T10-DIF-TYPE3-CRC
2339                  * T10-DIF-TYPE1-IP
2340                  * T10-DIF-TYPE3-IP
2341                  */
2342                 if (strncmp(name, "T10-DIF-TYPE",
2343                             sizeof("T10-DIF-TYPE") - 1) == 0) {
2344                         /* also skip "1/3-" at end */
2345                         const int type_off = sizeof("T10-DIF-TYPE.");
2346                         char type_number = name[type_off - 2];
2347
2348                         if (interval != 512 && interval != 4096) {
2349                                 CERROR("%s: unsupported T10PI sector size %u\n",
2350                                        d->od_svname, interval);
2351                         } else if (type_number != '1' && type_number != '3') {
2352                                 CERROR("%s: unsupported T10PI type %s\n",
2353                                        d->od_svname, name);
2354                         } else if (strcmp(name + type_off, "CRC") == 0) {
2355                                 d->od_t10_type = type_number == '1' ?
2356                                         OSD_T10_TYPE1_CRC : OSD_T10_TYPE3_CRC;
2357                                 param->ddp_t10_cksum_type = interval == 512 ?
2358                                         OBD_CKSUM_T10CRC512 :
2359                                         OBD_CKSUM_T10CRC4K;
2360                         } else if (strcmp(name + type_off, "IP") == 0) {
2361                                 d->od_t10_type = type_number == '1' ?
2362                                         OSD_T10_TYPE1_IP : OSD_T10_TYPE3_IP;
2363                                 param->ddp_t10_cksum_type = interval == 512 ?
2364                                         OBD_CKSUM_T10IP512 :
2365                                         OBD_CKSUM_T10IP4K;
2366                         } else {
2367                                 CERROR("%s: unsupported checksum type of "
2368                                        "T10PI type '%s'",
2369                                        d->od_svname, name);
2370                         }
2371
2372                 } else {
2373                         CERROR("%s: unsupported T10PI type '%s'",
2374                                d->od_svname, name);
2375                 }
2376         }
2377 }
2378
2379 static struct super_block *osd_mnt_sb_get(const struct dt_device *d)
2380 {
2381         return osd_sb(osd_dt_dev(d));
2382 }
2383
2384 /*
2385  * Concurrency: shouldn't matter.
2386  */
2387 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2388 {
2389         int rc;
2390         struct super_block *s = osd_sb(osd_dt_dev(d));
2391         ENTRY;
2392
2393         down_read(&s->s_umount);
2394         rc = s->s_op->sync_fs(s, 1);
2395         up_read(&s->s_umount);
2396
2397         CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
2398                rc);
2399
2400         return rc;
2401 }
2402
2403 /**
2404  * Start commit for OSD device.
2405  *
2406  * An implementation of dt_commit_async method for OSD device.
2407  * Asychronously starts underlayng fs sync and thereby a transaction
2408  * commit.
2409  *
2410  * \param env environment
2411  * \param d dt device
2412  *
2413  * \see dt_device_operations
2414  */
2415 static int osd_commit_async(const struct lu_env *env,
2416                             struct dt_device *d)
2417 {
2418         struct super_block *s = osd_sb(osd_dt_dev(d));
2419         int rc;
2420
2421         ENTRY;
2422
2423         CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
2424         down_read(&s->s_umount);
2425         rc = s->s_op->sync_fs(s, 0);
2426         up_read(&s->s_umount);
2427
2428         RETURN(rc);
2429 }
2430
2431 static int (*priv_security_file_alloc)(struct file *file);
2432
2433 int osd_security_file_alloc(struct file *file)
2434 {
2435         if (priv_security_file_alloc)
2436                 return priv_security_file_alloc(file);
2437         return 0;
2438 }
2439
2440 /*
2441  * Concurrency: shouldn't matter.
2442  */
2443 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2444 {
2445         struct super_block *sb = osd_sb(osd_dt_dev(d));
2446         struct block_device *dev = sb->s_bdev;
2447         int rc = -EOPNOTSUPP;
2448
2449         ENTRY;
2450
2451         CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2452                osd_dt_dev(d)->od_svname, (long)dev, rc);
2453
2454         RETURN(rc);
2455 }
2456
2457 /**
2458  * Note: we do not count into QUOTA here.
2459  * If we mount with --data_journal we may need more.
2460  */
2461 const int osd_dto_credits_noquota[DTO_NR] = {
2462         /**
2463          * Insert.
2464          * INDEX_EXTRA_TRANS_BLOCKS(8) +
2465          * SINGLEDATA_TRANS_BLOCKS(8)
2466          * XXX Note: maybe iam need more, since iam have more level than
2467          *           EXT3 htree.
2468          */
2469         [DTO_INDEX_INSERT]  = 16,
2470         /**
2471          * Delete
2472          * just modify a single entry, probably merge few within a block
2473          */
2474         [DTO_INDEX_DELETE]  = 1,
2475         /**
2476          * Used for OI scrub
2477          */
2478         [DTO_INDEX_UPDATE]  = 16,
2479         /**
2480          * 4(inode, inode bits, groups, GDT)
2481          *   notice: OI updates are counted separately with DTO_INDEX_INSERT
2482          */
2483         [DTO_OBJECT_CREATE] = 4,
2484         /**
2485          * 4(inode, inode bits, groups, GDT)
2486          *   notice: OI updates are counted separately with DTO_INDEX_DELETE
2487          */
2488         [DTO_OBJECT_DELETE] = 4,
2489         /**
2490          * Attr set credits (inode)
2491          */
2492         [DTO_ATTR_SET_BASE] = 1,
2493         /**
2494          * Xattr set. The same as xattr of EXT3.
2495          * DATA_TRANS_BLOCKS(14)
2496          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2497          * are also counted in. Do not know why?
2498          */
2499         [DTO_XATTR_SET]     = 14,
2500         /**
2501          * credits for inode change during write.
2502          */
2503         [DTO_WRITE_BASE]    = 3,
2504         /**
2505          * credits for single block write.
2506          */
2507         [DTO_WRITE_BLOCK]   = 14,
2508         /**
2509          * Attr set credits for chown.
2510          * This is extra credits for setattr, and it is null without quota
2511          */
2512         [DTO_ATTR_SET_CHOWN] = 0
2513 };
2514
2515 static const struct dt_device_operations osd_dt_ops = {
2516         .dt_root_get       = osd_root_get,
2517         .dt_statfs         = osd_statfs,
2518         .dt_trans_create   = osd_trans_create,
2519         .dt_trans_start    = osd_trans_start,
2520         .dt_trans_stop     = osd_trans_stop,
2521         .dt_trans_cb_add   = osd_trans_cb_add,
2522         .dt_conf_get       = osd_conf_get,
2523         .dt_mnt_sb_get     = osd_mnt_sb_get,
2524         .dt_sync           = osd_sync,
2525         .dt_ro             = osd_ro,
2526         .dt_commit_async   = osd_commit_async,
2527 };
2528
2529 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
2530                           unsigned int role)
2531 {
2532         struct osd_object *obj = osd_dt_obj(dt);
2533         struct osd_thread_info *oti = osd_oti_get(env);
2534
2535         LINVRNT(osd_invariant(obj));
2536
2537         LASSERT(obj->oo_owner != env);
2538         down_read_nested(&obj->oo_sem, role);
2539
2540         LASSERT(obj->oo_owner == NULL);
2541         oti->oti_r_locks++;
2542 }
2543
2544 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
2545                            unsigned int role)
2546 {
2547         struct osd_object *obj = osd_dt_obj(dt);
2548         struct osd_thread_info *oti = osd_oti_get(env);
2549
2550         LINVRNT(osd_invariant(obj));
2551
2552         LASSERT(obj->oo_owner != env);
2553         down_write_nested(&obj->oo_sem, role);
2554
2555         LASSERT(obj->oo_owner == NULL);
2556         obj->oo_owner = env;
2557         oti->oti_w_locks++;
2558 }
2559
2560 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
2561 {
2562         struct osd_object *obj = osd_dt_obj(dt);
2563         struct osd_thread_info *oti = osd_oti_get(env);
2564
2565         LINVRNT(osd_invariant(obj));
2566
2567         LASSERT(oti->oti_r_locks > 0);
2568         oti->oti_r_locks--;
2569         up_read(&obj->oo_sem);
2570 }
2571
2572 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
2573 {
2574         struct osd_object *obj = osd_dt_obj(dt);
2575         struct osd_thread_info *oti = osd_oti_get(env);
2576
2577         LINVRNT(osd_invariant(obj));
2578
2579         LASSERT(obj->oo_owner == env);
2580         LASSERT(oti->oti_w_locks > 0);
2581         oti->oti_w_locks--;
2582         obj->oo_owner = NULL;
2583         up_write(&obj->oo_sem);
2584 }
2585
2586 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
2587 {
2588         struct osd_object *obj = osd_dt_obj(dt);
2589
2590         LINVRNT(osd_invariant(obj));
2591
2592         return obj->oo_owner == env;
2593 }
2594
2595 static void osd_inode_getattr(const struct lu_env *env,
2596                               struct inode *inode, struct lu_attr *attr)
2597 {
2598         attr->la_valid  |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2599                            LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2600                            LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2601                            LA_BLKSIZE | LA_TYPE | LA_BTIME;
2602
2603         attr->la_atime = inode->i_atime.tv_sec;
2604         attr->la_mtime = inode->i_mtime.tv_sec;
2605         attr->la_ctime = inode->i_ctime.tv_sec;
2606         attr->la_btime = LDISKFS_I(inode)->i_crtime.tv_sec;
2607         attr->la_mode    = inode->i_mode;
2608         attr->la_size    = i_size_read(inode);
2609         attr->la_blocks  = inode->i_blocks;
2610         attr->la_uid     = i_uid_read(inode);
2611         attr->la_gid     = i_gid_read(inode);
2612         attr->la_projid  = i_projid_read(inode);
2613         attr->la_flags   = ll_inode_to_ext_flags(inode->i_flags);
2614         attr->la_nlink   = inode->i_nlink;
2615         attr->la_rdev    = inode->i_rdev;
2616         attr->la_blksize = 1 << inode->i_blkbits;
2617         attr->la_blkbits = inode->i_blkbits;
2618         /*
2619          * Ext4 did not transfer inherit flags from raw inode
2620          * to inode flags, and ext4 internally test raw inode
2621          * @i_flags directly. Instead of patching ext4, we do it here.
2622          */
2623         if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
2624                 attr->la_flags |= LUSTRE_PROJINHERIT_FL;
2625 }
2626
2627 static int osd_dirent_count(const struct lu_env *env, struct dt_object *dt,
2628                             u64 *count)
2629 {
2630         struct osd_object *obj = osd_dt_obj(dt);
2631         const struct dt_it_ops *iops;
2632         struct dt_it *it;
2633         int rc;
2634
2635         ENTRY;
2636
2637         LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2638         LASSERT(fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)));
2639
2640         if (obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
2641                 *count = obj->oo_dirent_count;
2642                 RETURN(0);
2643         }
2644
2645         /* directory not initialized yet */
2646         if (!dt->do_index_ops) {
2647                 *count = 0;
2648                 RETURN(0);
2649         }
2650
2651         iops = &dt->do_index_ops->dio_it;
2652         it = iops->init(env, dt, LUDA_64BITHASH);
2653         if (IS_ERR(it))
2654                 RETURN(PTR_ERR(it));
2655
2656         rc = iops->load(env, it, 0);
2657         if (rc < 0) {
2658                 if (rc == -ENODATA) {
2659                         rc = 0;
2660                         *count = 0;
2661                 }
2662                 GOTO(out, rc);
2663         }
2664         if (rc > 0)
2665                 rc = iops->next(env, it);
2666
2667         for (*count = 0; rc == 0 || rc == -ESTALE; rc = iops->next(env, it)) {
2668                 if (rc == -ESTALE)
2669                         continue;
2670
2671                 if (iops->key_size(env, it) == 0)
2672                         continue;
2673
2674                 (*count)++;
2675         }
2676         if (rc == 1) {
2677                 obj->oo_dirent_count = *count;
2678                 rc = 0;
2679         }
2680 out:
2681         iops->put(env, it);
2682         iops->fini(env, it);
2683
2684         RETURN(rc);
2685 }
2686
2687 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
2688                         struct lu_attr *attr)
2689 {
2690         struct osd_object *obj = osd_dt_obj(dt);
2691         int rc = 0;
2692
2693         if (unlikely(!dt_object_exists(dt)))
2694                 return -ENOENT;
2695         if (unlikely(obj->oo_destroyed))
2696                 return -ENOENT;
2697
2698         LASSERT(!dt_object_remote(dt));
2699         LINVRNT(osd_invariant(obj));
2700
2701         spin_lock(&obj->oo_guard);
2702         osd_inode_getattr(env, obj->oo_inode, attr);
2703         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
2704                 attr->la_valid |= LA_FLAGS;
2705                 attr->la_flags |= LUSTRE_ORPHAN_FL;
2706         }
2707         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
2708                 attr->la_valid |= LA_FLAGS;
2709                 attr->la_flags |= LUSTRE_ENCRYPT_FL;
2710         }
2711         spin_unlock(&obj->oo_guard);
2712
2713         if (S_ISDIR(obj->oo_inode->i_mode) &&
2714             fid_is_namespace_visible(lu_object_fid(&dt->do_lu)))
2715                 rc = osd_dirent_count(env, dt, &attr->la_dirent_count);
2716
2717         return rc;
2718 }
2719
2720 static int osd_declare_attr_qid(const struct lu_env *env,
2721                                 struct osd_object *obj,
2722                                 struct osd_thandle *oh, long long bspace,
2723                                 qid_t old_id, qid_t new_id, bool enforce,
2724                                 unsigned int type, bool ignore_edquot)
2725 {
2726         int rc;
2727         struct osd_thread_info *info = osd_oti_get(env);
2728         struct lquota_id_info  *qi = &info->oti_qi;
2729
2730         qi->lqi_type = type;
2731         /* inode accounting */
2732         qi->lqi_is_blk = false;
2733
2734         /* one more inode for the new id ... */
2735         qi->lqi_id.qid_uid = new_id;
2736         qi->lqi_space      = 1;
2737         /* Reserve credits for the new id */
2738         rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2739         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2740                 rc = 0;
2741         if (rc)
2742                 RETURN(rc);
2743
2744         /* and one less inode for the current id */
2745         qi->lqi_id.qid_uid = old_id;
2746         qi->lqi_space = -1;
2747         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2748         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2749                 rc = 0;
2750         if (rc)
2751                 RETURN(rc);
2752
2753         /* block accounting */
2754         qi->lqi_is_blk = true;
2755
2756         /* more blocks for the new id ... */
2757         qi->lqi_id.qid_uid = new_id;
2758         qi->lqi_space      = bspace;
2759         /*
2760          * Credits for the new uid has been reserved, re-use "obj"
2761          * to save credit reservation.
2762          */
2763         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2764         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2765                 rc = 0;
2766         if (rc)
2767                 RETURN(rc);
2768
2769         /* and finally less blocks for the current uid */
2770         qi->lqi_id.qid_uid = old_id;
2771         qi->lqi_space      = -bspace;
2772         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2773         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2774                 rc = 0;
2775
2776         RETURN(rc);
2777 }
2778
2779 static int osd_declare_attr_set(const struct lu_env *env,
2780                                 struct dt_object *dt,
2781                                 const struct lu_attr *attr,
2782                                 struct thandle *handle)
2783 {
2784         struct osd_thandle *oh;
2785         struct osd_object *obj;
2786         qid_t uid;
2787         qid_t gid;
2788         long long bspace;
2789         int rc = 0;
2790         bool enforce;
2791
2792         ENTRY;
2793
2794         LASSERT(dt != NULL);
2795         LASSERT(handle != NULL);
2796
2797         obj = osd_dt_obj(dt);
2798         LASSERT(osd_invariant(obj));
2799
2800         oh = container_of(handle, struct osd_thandle, ot_super);
2801         LASSERT(oh->ot_handle == NULL);
2802
2803         osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2804                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2805
2806         osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2807                              osd_dto_credits_noquota[DTO_XATTR_SET]);
2808
2809         if (attr == NULL || obj->oo_inode == NULL)
2810                 RETURN(rc);
2811
2812         bspace   = obj->oo_inode->i_blocks << 9;
2813         bspace   = toqb(bspace);
2814
2815         /*
2816          * Changing ownership is always preformed by super user, it should not
2817          * fail with EDQUOT unless required explicitly.
2818          *
2819          * We still need to call the osd_declare_qid() to calculate the journal
2820          * credits for updating quota accounting files and to trigger quota
2821          * space adjustment once the operation is completed.
2822          */
2823         if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2824                 bool ignore_edquot = !(attr->la_flags & LUSTRE_SET_SYNC_FL);
2825
2826                 if (!ignore_edquot)
2827                         CDEBUG(D_QUOTA, "%s: enforce quota on UID %u, GID %u"
2828                                "(the quota space is %lld)\n",
2829                                obj->oo_inode->i_sb->s_id, attr->la_uid,
2830                                attr->la_gid, bspace);
2831
2832                 /* USERQUOTA */
2833                 uid = i_uid_read(obj->oo_inode);
2834                 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2835                 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2836                                           attr->la_uid, enforce, USRQUOTA,
2837                                           true);
2838                 if (rc)
2839                         RETURN(rc);
2840
2841                 gid = i_gid_read(obj->oo_inode);
2842                 CDEBUG(D_QUOTA, "declare uid %d -> %d gid %d -> %d\n", uid,
2843                        attr->la_uid, gid, attr->la_gid);
2844                 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2845                 rc = osd_declare_attr_qid(env, obj, oh, bspace, gid,
2846                                           attr->la_gid, enforce, GRPQUOTA,
2847                                           ignore_edquot);
2848                 if (rc)
2849                         RETURN(rc);
2850
2851         }
2852 #ifdef HAVE_PROJECT_QUOTA
2853         if (attr->la_valid & LA_PROJID) {
2854                 __u32 projid = i_projid_read(obj->oo_inode);
2855
2856                 enforce = (attr->la_valid & LA_PROJID) &&
2857                                         (attr->la_projid != projid);
2858                 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2859                                           (qid_t)projid, (qid_t)attr->la_projid,
2860                                           enforce, PRJQUOTA, true);
2861                 if (rc)
2862                         RETURN(rc);
2863         }
2864 #endif
2865         RETURN(rc);
2866 }
2867
2868 static int osd_inode_setattr(const struct lu_env *env,
2869                              struct inode *inode, const struct lu_attr *attr)
2870 {
2871         __u64 bits = attr->la_valid;
2872
2873         /* Only allow set size for regular file */
2874         if (!S_ISREG(inode->i_mode))
2875                 bits &= ~(LA_SIZE | LA_BLOCKS);
2876
2877         if (bits == 0)
2878                 return 0;
2879
2880         if (bits & LA_ATIME)
2881                 inode->i_atime = osd_inode_time(inode, attr->la_atime);
2882         if (bits & LA_CTIME)
2883                 inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
2884         if (bits & LA_MTIME)
2885                 inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
2886         if (bits & LA_SIZE) {
2887                 spin_lock(&inode->i_lock);
2888                 LDISKFS_I(inode)->i_disksize = attr->la_size;
2889                 i_size_write(inode, attr->la_size);
2890                 spin_unlock(&inode->i_lock);
2891         }
2892
2893         /*
2894          * OSD should not change "i_blocks" which is used by quota.
2895          * "i_blocks" should be changed by ldiskfs only.
2896          */
2897         if (bits & LA_MODE)
2898                 inode->i_mode = (inode->i_mode & S_IFMT) |
2899                                 (attr->la_mode & ~S_IFMT);
2900         if (bits & LA_UID)
2901                 i_uid_write(inode, attr->la_uid);
2902         if (bits & LA_GID)
2903                 i_gid_write(inode, attr->la_gid);
2904         if (bits & LA_PROJID)
2905                 i_projid_write(inode, attr->la_projid);
2906         if (bits & LA_NLINK)
2907                 set_nlink(inode, attr->la_nlink);
2908         if (bits & LA_RDEV)
2909                 inode->i_rdev = attr->la_rdev;
2910
2911         if (bits & LA_FLAGS) {
2912                 /* always keep S_NOCMTIME */
2913                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2914                                  S_NOCMTIME;
2915                 /*
2916                  * Ext4 did not transfer inherit flags from
2917                  * @inode->i_flags to raw inode i_flags when writing
2918                  * flags, we do it explictly here.
2919                  */
2920                 if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
2921                         LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
2922                 else
2923                         LDISKFS_I(inode)->i_flags &= ~LUSTRE_PROJINHERIT_FL;
2924         }
2925         return 0;
2926 }
2927
2928 #ifdef HAVE_PROJECT_QUOTA
2929 static int osd_transfer_project(struct inode *inode, __u32 projid)
2930 {
2931         struct super_block *sb = inode->i_sb;
2932         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2933         int err;
2934         kprojid_t kprojid;
2935         struct ldiskfs_iloc iloc;
2936         struct ldiskfs_inode *raw_inode;
2937         struct dquot *transfer_to[LDISKFS_MAXQUOTAS] = { };
2938
2939         if (!ldiskfs_has_feature_project(sb)) {
2940                 LASSERT(__kprojid_val(LDISKFS_I(inode)->i_projid)
2941                         == LDISKFS_DEF_PROJID);
2942                 if (projid != LDISKFS_DEF_PROJID)
2943                         return -EOPNOTSUPP;
2944                 else
2945                         return 0;
2946         }
2947
2948         if (LDISKFS_INODE_SIZE(sb) <= LDISKFS_GOOD_OLD_INODE_SIZE)
2949                 return -EOPNOTSUPP;
2950
2951         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2952         if (projid_eq(kprojid, LDISKFS_I(inode)->i_projid))
2953                 return 0;
2954
2955         err = ldiskfs_get_inode_loc(inode, &iloc);
2956         if (err)
2957                 return err;
2958
2959         raw_inode = ldiskfs_raw_inode(&iloc);
2960         if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
2961                 err = -EOVERFLOW;
2962                 brelse(iloc.bh);
2963                 return err;
2964         }
2965         brelse(iloc.bh);
2966
2967         dquot_initialize(inode);
2968         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2969         if (transfer_to[PRJQUOTA]) {
2970                 err = __dquot_transfer(inode, transfer_to);
2971                 dqput(transfer_to[PRJQUOTA]);
2972                 if (err)
2973                         return err;
2974         }
2975
2976         return err;
2977 }
2978 #endif
2979
2980 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
2981 {
2982         int rc;
2983
2984         if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
2985             (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
2986                 struct iattr iattr;
2987
2988                 CDEBUG(D_QUOTA,
2989                        "executing dquot_transfer inode %ld uid %d -> %d gid %d -> %d\n",
2990                        inode->i_ino, i_uid_read(inode), attr->la_uid,
2991                        i_gid_read(inode), attr->la_gid);
2992
2993                 dquot_initialize(inode);
2994                 iattr.ia_valid = 0;
2995                 if (attr->la_valid & LA_UID)
2996                         iattr.ia_valid |= ATTR_UID;
2997                 if (attr->la_valid & LA_GID)
2998                         iattr.ia_valid |= ATTR_GID;
2999                 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
3000                 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
3001
3002                 rc = dquot_transfer(inode, &iattr);
3003                 if (rc) {
3004                         CERROR("%s: quota transfer failed: rc = %d. Is quota "
3005                                "enforcement enabled on the ldiskfs "
3006                                "filesystem?\n", inode->i_sb->s_id, rc);
3007                         return rc;
3008                 }
3009         }
3010
3011         /* Handle project id transfer here properly */
3012         if (attr->la_valid & LA_PROJID &&
3013             attr->la_projid != i_projid_read(inode)) {
3014 #ifdef HAVE_PROJECT_QUOTA
3015                 rc = osd_transfer_project(inode, attr->la_projid);
3016 #else
3017                 rc = -ENOTSUPP;
3018 #endif
3019                 if (rc) {
3020                         CERROR("%s: quota transfer failed: rc = %d. Is project "
3021                                "enforcement enabled on the ldiskfs "
3022                                "filesystem?\n", inode->i_sb->s_id, rc);
3023                         return rc;
3024                 }
3025         }
3026         return 0;
3027 }
3028
3029 static int osd_attr_set(const struct lu_env *env,
3030                         struct dt_object *dt,
3031                         const struct lu_attr *attr,
3032                         struct thandle *handle)
3033 {
3034         struct osd_object *obj = osd_dt_obj(dt);
3035         struct inode *inode;
3036         int rc;
3037
3038         if (!dt_object_exists(dt))
3039                 return -ENOENT;
3040
3041         LASSERT(handle != NULL);
3042         LASSERT(!dt_object_remote(dt));
3043         LASSERT(osd_invariant(obj));
3044
3045         osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
3046
3047         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) &&
3048             !osd_obj2dev(obj)->od_is_ost) {
3049                 struct osd_thread_info *oti = osd_oti_get(env);
3050                 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
3051                 struct lu_fid *fid1 = &oti->oti_fid;
3052                 struct osd_inode_id *id = &oti->oti_id;
3053                 struct iam_path_descr *ipd;
3054                 struct iam_container *bag;
3055                 struct osd_thandle *oh;
3056                 int rc;
3057
3058                 fid_cpu_to_be(fid1, fid0);
3059                 memset(id, 1, sizeof(*id));
3060                 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
3061                                   fid0)->oi_dir.od_container;
3062                 ipd = osd_idx_ipd_get(env, bag);
3063                 if (unlikely(ipd == NULL))
3064                         RETURN(-ENOMEM);
3065
3066                 oh = container_of(handle, struct osd_thandle, ot_super);
3067                 rc = iam_update(oh->ot_handle, bag,
3068                                 (const struct iam_key *)fid1,
3069                                 (const struct iam_rec *)id, ipd);
3070                 osd_ipd_put(env, bag, ipd);
3071                 return(rc > 0 ? 0 : rc);
3072         }
3073
3074         inode = obj->oo_inode;
3075
3076         rc = osd_quota_transfer(inode, attr);
3077         if (rc)
3078                 return rc;
3079
3080         spin_lock(&obj->oo_guard);
3081         rc = osd_inode_setattr(env, inode, attr);
3082         spin_unlock(&obj->oo_guard);
3083         if (rc != 0)
3084                 GOTO(out, rc);
3085
3086         osd_dirty_inode(inode, I_DIRTY_DATASYNC);
3087
3088         osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
3089
3090         if (!(attr->la_valid & LA_FLAGS))
3091                 GOTO(out, rc);
3092
3093         /* Let's check if there are extra flags need to be set into LMA */
3094         if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
3095                 struct osd_thread_info *info = osd_oti_get(env);
3096                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
3097
3098                 LASSERT(!obj->oo_pfid_in_lma);
3099
3100                 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
3101                                  &info->oti_ost_attrs);
3102                 if (rc)
3103                         GOTO(out, rc);
3104
3105                 lma->lma_incompat |=
3106                         lustre_to_lma_flags(attr->la_flags);
3107                 lustre_lma_swab(lma);
3108
3109                 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3110
3111                 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
3112                                      lma, sizeof(*lma), XATTR_REPLACE);
3113                 if (rc != 0) {
3114                         struct osd_device *osd = osd_obj2dev(obj);
3115
3116                         CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
3117                               osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
3118                               lma->lma_incompat, rc);
3119                 } else {
3120                         obj->oo_lma_flags =
3121                                 attr->la_flags & LUSTRE_LMA_FL_MASKS;
3122                 }
3123                 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
3124         }
3125 out:
3126
3127         return rc;
3128 }
3129
3130 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
3131                                            struct osd_object *obj,
3132                                            const char *name, const int namelen)
3133 {
3134         return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
3135 }
3136
3137 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
3138                       umode_t mode, struct dt_allocation_hint *hint,
3139                       struct thandle *th, struct lu_attr *attr)
3140 {
3141         int result;
3142         struct osd_device *osd = osd_obj2dev(obj);
3143         struct osd_thandle *oth;
3144         struct dt_object *parent = NULL;
3145         struct inode *inode;
3146         uid_t owner[2] = {0, 0};
3147
3148         if (attr->la_valid & LA_UID)
3149                 owner[0] = attr->la_uid;
3150         if (attr->la_valid & LA_GID)
3151                 owner[1] = attr->la_gid;
3152
3153         LINVRNT(osd_invariant(obj));
3154         LASSERT(obj->oo_inode == NULL);
3155         LASSERT(obj->oo_hl_head == NULL);
3156
3157         if (S_ISDIR(mode) && ldiskfs_pdo) {
3158                 obj->oo_hl_head =
3159                         ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
3160                 if (obj->oo_hl_head == NULL)
3161                         return -ENOMEM;
3162         }
3163
3164         oth = container_of(th, struct osd_thandle, ot_super);
3165         LASSERT(oth->ot_handle->h_transaction != NULL);
3166
3167         if (hint != NULL && hint->dah_parent != NULL &&
3168             !dt_object_remote(hint->dah_parent))
3169                 parent = hint->dah_parent;
3170
3171         inode = ldiskfs_create_inode(oth->ot_handle,
3172                                      parent ? osd_dt_obj(parent)->oo_inode :
3173                                               osd_sb(osd)->s_root->d_inode,
3174                                      mode, owner);
3175         if (!IS_ERR(inode)) {
3176                 /* Do not update file c/mtime in ldiskfs. */
3177                 inode->i_flags |= S_NOCMTIME;
3178
3179                 /*
3180                  * For new created object, it must be consistent,
3181                  * and it is unnecessary to scrub against it.
3182                  */
3183                 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
3184
3185                 obj->oo_inode = inode;
3186                 result = 0;
3187         } else {
3188                 if (obj->oo_hl_head != NULL) {