Whamcloud - gitweb
LU-12355 llite: MS_* flags and SB_* flags split
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_handler.c
33  *
34  * Top-level entry points into osd module
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
38  */
39
40 #define DEBUG_SUBSYSTEM S_OSD
41
42 #include <linux/kallsyms.h>
43 #include <linux/module.h>
44 #include <linux/user_namespace.h>
45 #ifdef HAVE_UIDGID_HEADER
46 # include <linux/uidgid.h>
47 #endif
48
49 /* prerequisite for linux/xattr.h */
50 #include <linux/types.h>
51 /* prerequisite for linux/xattr.h */
52 #include <linux/fs.h>
53 /* XATTR_{REPLACE,CREATE} */
54 #include <linux/xattr.h>
55
56 #include <ldiskfs/ldiskfs.h>
57 #include <ldiskfs/xattr.h>
58 #include <ldiskfs/ldiskfs_extents.h>
59 #undef ENTRY
60 /*
61  * struct OBD_{ALLOC,FREE}*()
62  * OBD_FAIL_CHECK
63  */
64 #include <obd_support.h>
65 /* struct ptlrpc_thread */
66 #include <lustre_net.h>
67 #include <lustre_fid.h>
68 /* process_config */
69 #include <uapi/linux/lustre/lustre_param.h>
70
71 #include "osd_internal.h"
72 #include "osd_dynlocks.h"
73
74 /* llo_* api support */
75 #include <md_object.h>
76 #include <lustre_quota.h>
77
78 #include <lustre_linkea.h>
79
80 /* Maximum EA size is limited by LNET_MTU for remote objects */
81 #define OSD_MAX_EA_SIZE 1048364
82
83 int ldiskfs_pdo = 1;
84 module_param(ldiskfs_pdo, int, 0644);
85 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
86
87 int ldiskfs_track_declares_assert;
88 module_param(ldiskfs_track_declares_assert, int, 0644);
89 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
90
91 /* Slab to allocate dynlocks */
92 struct kmem_cache *dynlock_cachep;
93
94 /* Slab to allocate osd_it_ea */
95 struct kmem_cache *osd_itea_cachep;
96
97 static struct lu_kmem_descr ldiskfs_caches[] = {
98         {
99                 .ckd_cache = &dynlock_cachep,
100                 .ckd_name  = "dynlock_cache",
101                 .ckd_size  = sizeof(struct dynlock_handle)
102         },
103         {
104                 .ckd_cache = &osd_itea_cachep,
105                 .ckd_name  = "osd_itea_cache",
106                 .ckd_size  = sizeof(struct osd_it_ea)
107         },
108         {
109                 .ckd_cache = NULL
110         }
111 };
112
113 static const char dot[] = ".";
114 static const char dotdot[] = "..";
115
116 static const struct lu_object_operations      osd_lu_obj_ops;
117 static const struct dt_object_operations      osd_obj_ops;
118 static const struct dt_object_operations      osd_obj_otable_it_ops;
119 static const struct dt_index_operations       osd_index_iam_ops;
120 static const struct dt_index_operations       osd_index_ea_ops;
121
122 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
123                           const struct lu_fid *fid);
124 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
125                                                 struct osd_device *osd);
126
127 int osd_trans_declare_op2rb[] = {
128         [OSD_OT_ATTR_SET]       = OSD_OT_ATTR_SET,
129         [OSD_OT_PUNCH]          = OSD_OT_MAX,
130         [OSD_OT_XATTR_SET]      = OSD_OT_XATTR_SET,
131         [OSD_OT_CREATE]         = OSD_OT_DESTROY,
132         [OSD_OT_DESTROY]        = OSD_OT_CREATE,
133         [OSD_OT_REF_ADD]        = OSD_OT_REF_DEL,
134         [OSD_OT_REF_DEL]        = OSD_OT_REF_ADD,
135         [OSD_OT_WRITE]          = OSD_OT_WRITE,
136         [OSD_OT_INSERT]         = OSD_OT_DELETE,
137         [OSD_OT_DELETE]         = OSD_OT_INSERT,
138         [OSD_OT_QUOTA]          = OSD_OT_MAX,
139 };
140
141 static int osd_has_index(const struct osd_object *obj)
142 {
143         return obj->oo_dt.do_index_ops != NULL;
144 }
145
146 static int osd_object_invariant(const struct lu_object *l)
147 {
148         return osd_invariant(osd_obj(l));
149 }
150
151 /*
152  * Concurrency: doesn't matter
153  */
154 static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
155 {
156         struct osd_thread_info *oti = osd_oti_get(env);
157
158         return oti->oti_w_locks > 0 && o->oo_owner == env;
159 }
160
161 /*
162  * Concurrency: doesn't access mutable data
163  */
164 static int osd_root_get(const struct lu_env *env,
165                         struct dt_device *dev, struct lu_fid *f)
166 {
167         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
168         return 0;
169 }
170
171 /*
172  * the following set of functions are used to maintain per-thread
173  * cache of FID->ino mapping. this mechanism is needed to resolve
174  * FID to inode at dt_insert() which in turn stores ino in the
175  * directory entries to keep ldiskfs compatible with ext[34].
176  * due to locking-originated restrictions we can't lookup ino
177  * using LU cache (deadlock is possible). lookup using OI is quite
178  * expensive. so instead we maintain this cache and methods like
179  * dt_create() fill it. so in the majority of cases dt_insert() is
180  * able to find needed mapping in lockless manner.
181  */
182 static struct osd_idmap_cache *
183 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
184              const struct lu_fid *fid)
185 {
186         struct osd_thread_info *oti = osd_oti_get(env);
187         struct osd_idmap_cache *idc = oti->oti_ins_cache;
188         int i;
189
190         for (i = 0; i < oti->oti_ins_cache_used; i++) {
191                 if (!lu_fid_eq(&idc[i].oic_fid, fid))
192                         continue;
193                 if (idc[i].oic_dev != osd)
194                         continue;
195
196                 return idc + i;
197         }
198
199         return NULL;
200 }
201
202 static struct osd_idmap_cache *
203 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
204             const struct lu_fid *fid)
205 {
206         struct osd_thread_info *oti   = osd_oti_get(env);
207         struct osd_idmap_cache *idc;
208         int i;
209
210         if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
211                 i = oti->oti_ins_cache_size * 2;
212                 if (i == 0)
213                         i = OSD_INS_CACHE_SIZE;
214                 OBD_ALLOC(idc, sizeof(*idc) * i);
215                 if (idc == NULL)
216                         return ERR_PTR(-ENOMEM);
217                 if (oti->oti_ins_cache != NULL) {
218                         memcpy(idc, oti->oti_ins_cache,
219                                oti->oti_ins_cache_used * sizeof(*idc));
220                         OBD_FREE(oti->oti_ins_cache,
221                                  oti->oti_ins_cache_used * sizeof(*idc));
222                 }
223                 oti->oti_ins_cache = idc;
224                 oti->oti_ins_cache_size = i;
225         }
226
227         idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
228         idc->oic_fid = *fid;
229         idc->oic_dev = osd;
230         idc->oic_lid.oii_ino = 0;
231         idc->oic_lid.oii_gen = 0;
232         idc->oic_remote = 0;
233
234         return idc;
235 }
236
237 /*
238  * lookup mapping for the given fid in the cache, initialize a
239  * new one if not found. the initialization checks whether the
240  * object is local or remote. for local objects, OI is used to
241  * learn ino/generation. the function is used when the caller
242  * has no information about the object, e.g. at dt_insert().
243  */
244 static struct osd_idmap_cache *
245 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
246                      const struct lu_fid *fid)
247 {
248         struct osd_idmap_cache *idc;
249         int rc;
250
251         idc = osd_idc_find(env, osd, fid);
252         LASSERT(!IS_ERR(idc));
253         if (idc != NULL)
254                 return idc;
255
256         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
257                osd->od_svname, PFID(fid));
258
259         /* new mapping is needed */
260         idc = osd_idc_add(env, osd, fid);
261         if (IS_ERR(idc)) {
262                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
263                        osd->od_svname, PFID(fid), PTR_ERR(idc));
264                 return idc;
265         }
266
267         /* initialize it */
268         rc = osd_remote_fid(env, osd, fid);
269         if (unlikely(rc < 0))
270                 return ERR_PTR(rc);
271
272         if (rc == 0) {
273                 /* the object is local, lookup in OI */
274                 /* XXX: probably cheaper to lookup in LU first? */
275                 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
276                                    &idc->oic_lid, 0);
277                 if (unlikely(rc < 0)) {
278                         CERROR("can't lookup: rc = %d\n", rc);
279                         return ERR_PTR(rc);
280                 }
281         } else {
282                 /* the object is remote */
283                 idc->oic_remote = 1;
284         }
285
286         return idc;
287 }
288
289 /*
290  * lookup mapping for given FID and fill it from the given object.
291  * the object is lolcal by definition.
292  */
293 static int osd_idc_find_and_init(const struct lu_env *env,
294                                  struct osd_device *osd,
295                                  struct osd_object *obj)
296 {
297         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
298         struct osd_idmap_cache *idc;
299
300         idc = osd_idc_find(env, osd, fid);
301         LASSERT(!IS_ERR(idc));
302         if (idc != NULL) {
303                 if (obj->oo_inode == NULL)
304                         return 0;
305                 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
306                         LASSERT(idc->oic_lid.oii_ino == 0);
307                         idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
308                         idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
309                 }
310                 return 0;
311         }
312
313         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
314                osd->od_svname, PFID(fid));
315
316         /* new mapping is needed */
317         idc = osd_idc_add(env, osd, fid);
318         if (IS_ERR(idc)) {
319                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
320                        osd->od_svname, PFID(fid), PTR_ERR(idc));
321                 return PTR_ERR(idc);
322         }
323
324         if (obj->oo_inode != NULL) {
325                 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
326                 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
327         }
328         return 0;
329 }
330
331 /*
332  * OSD object methods.
333  */
334
335 /*
336  * Concurrency: no concurrent access is possible that early in object
337  * life-cycle.
338  */
339 static struct lu_object *osd_object_alloc(const struct lu_env *env,
340                                           const struct lu_object_header *hdr,
341                                           struct lu_device *d)
342 {
343         struct osd_object *mo;
344
345         OBD_ALLOC_PTR(mo);
346         if (mo != NULL) {
347                 struct lu_object *l;
348                 struct lu_object_header *h;
349                 struct osd_device *o = osd_dev(d);
350
351                 l = &mo->oo_dt.do_lu;
352                 if (unlikely(o->od_in_init)) {
353                         OBD_ALLOC_PTR(h);
354                         if (!h) {
355                                 OBD_FREE_PTR(mo);
356                                 return NULL;
357                         }
358
359                         lu_object_header_init(h);
360                         lu_object_init(l, h, d);
361                         lu_object_add_top(h, l);
362                         mo->oo_header = h;
363                 } else {
364                         dt_object_init(&mo->oo_dt, NULL, d);
365                         mo->oo_header = NULL;
366                 }
367
368                 mo->oo_dt.do_ops = &osd_obj_ops;
369                 l->lo_ops = &osd_lu_obj_ops;
370                 init_rwsem(&mo->oo_sem);
371                 init_rwsem(&mo->oo_ext_idx_sem);
372                 spin_lock_init(&mo->oo_guard);
373                 INIT_LIST_HEAD(&mo->oo_xattr_list);
374                 return l;
375         }
376         return NULL;
377 }
378
379 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
380                 struct dentry *dentry, struct lustre_ost_attrs *loa)
381 {
382         int rc;
383
384         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
385                              (void *)loa, sizeof(*loa));
386         if (rc > 0) {
387                 struct lustre_mdt_attrs *lma = &loa->loa_lma;
388
389                 if (rc < sizeof(*lma))
390                         return -EINVAL;
391
392                 rc = 0;
393                 lustre_loa_swab(loa, true);
394                 /* Check LMA compatibility */
395                 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
396                         CWARN("%s: unsupported incompat LMA feature(s) %#x "
397                               "for fid = "DFID", ino = %lu\n",
398                               osd_ino2name(inode),
399                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
400                               PFID(&lma->lma_self_fid), inode->i_ino);
401                         rc = -EOPNOTSUPP;
402                 }
403         } else if (rc == 0) {
404                 rc = -ENODATA;
405         }
406
407         return rc;
408 }
409
410 /*
411  * retrieve object from backend ext fs.
412  **/
413 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
414                        struct osd_inode_id *id)
415 {
416         int rc;
417         struct inode *inode = NULL;
418
419         /*
420          * if we look for an inode withing a running
421          * transaction, then we risk to deadlock
422          * osd_dirent_check_repair() breaks this
423          */
424          /* LASSERT(current->journal_info == NULL); */
425
426         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
427         if (IS_ERR(inode)) {
428                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
429                        id->oii_ino, PTR_ERR(inode));
430         } else if (id->oii_gen != OSD_OII_NOGEN &&
431                    inode->i_generation != id->oii_gen) {
432                 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
433                        "i_generation = %u\n",
434                        id->oii_ino, id->oii_gen, inode->i_generation);
435                 iput(inode);
436                 inode = ERR_PTR(-ESTALE);
437         } else if (inode->i_nlink == 0) {
438                 /*
439                  * due to parallel readdir and unlink,
440                  * we can have dead inode here.
441                  */
442                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
443                 iput(inode);
444                 inode = ERR_PTR(-ESTALE);
445         } else if (is_bad_inode(inode)) {
446                 CWARN("%s: bad inode: ino = %u\n",
447                       osd_dev2name(dev), id->oii_ino);
448                 iput(inode);
449                 inode = ERR_PTR(-ENOENT);
450         } else if ((rc = osd_attach_jinode(inode))) {
451                 iput(inode);
452                 inode = ERR_PTR(rc);
453         } else {
454                 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
455                 if (id->oii_gen == OSD_OII_NOGEN)
456                         osd_id_gen(id, inode->i_ino, inode->i_generation);
457
458                 /*
459                  * Do not update file c/mtime in ldiskfs.
460                  * NB: we don't have any lock to protect this because we don't
461                  * have reference on osd_object now, but contention with
462                  * another lookup + attr_set can't happen in the tiny window
463                  * between if (...) and set S_NOCMTIME.
464                  */
465                 if (!(inode->i_flags & S_NOCMTIME))
466                         inode->i_flags |= S_NOCMTIME;
467         }
468         return inode;
469 }
470
471 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
472                           handle_t *handle, struct dentry *child,
473                           struct inode *inode, struct htree_lock *hlock)
474 {
475         int rc, rc2;
476
477         rc = __ldiskfs_add_entry(handle, child, inode, hlock);
478         if (rc == -ENOBUFS || rc == -ENOSPC) {
479                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
480                 struct inode *parent = child->d_parent->d_inode;
481                 struct lu_fid *fid = NULL;
482
483                 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
484                 if (!rc2) {
485                         fid = &loa->loa_lma.lma_self_fid;
486                 } else if (rc2 == -ENODATA) {
487                         if (unlikely(parent == inode->i_sb->s_root->d_inode)) {
488                                 fid = &info->oti_fid3;
489                                 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
490                         } else if (!osd->od_is_ost && osd->od_index == 0) {
491                                 fid = &info->oti_fid3;
492                                 lu_igif_build(fid, parent->i_ino,
493                                               parent->i_generation);
494                         }
495                 }
496
497                 if (fid != NULL)
498                         CWARN("%s: directory (inode: %lu, FID: "DFID") %s "
499                               "maximum entry limit\n",
500                               osd_name(osd), parent->i_ino, PFID(fid),
501                               rc == -ENOSPC ? "has reached" : "is approaching");
502                 else
503                         CWARN("%s: directory (inode: %lu, FID: unknown) %s "
504                               "maximum entry limit\n",
505                               osd_name(osd), parent->i_ino,
506                               rc == -ENOSPC ? "has reached" : "is approaching");
507
508                 /* ignore such error now */
509                 if (rc == -ENOBUFS)
510                         rc = 0;
511         }
512
513         return rc;
514 }
515
516
517 struct inode *
518 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
519              struct osd_inode_id *id, struct lu_fid *fid)
520 {
521         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
522         struct inode *inode;
523         int rc;
524
525         inode = osd_iget(info, dev, id);
526         if (IS_ERR(inode))
527                 return inode;
528
529         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
530         if (!rc) {
531                 *fid = loa->loa_lma.lma_self_fid;
532         } else if (rc == -ENODATA) {
533                 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
534                         lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
535                 else
536                         lu_igif_build(fid, inode->i_ino, inode->i_generation);
537         } else {
538                 iput(inode);
539                 inode = ERR_PTR(rc);
540         }
541         return inode;
542 }
543
544 static struct inode *osd_iget_check(struct osd_thread_info *info,
545                                     struct osd_device *dev,
546                                     const struct lu_fid *fid,
547                                     struct osd_inode_id *id,
548                                     bool trusted)
549 {
550         struct inode *inode;
551         int rc = 0;
552
553         ENTRY;
554
555         /*
556          * The cached OI mapping is trustable. If we cannot locate the inode
557          * via the cached OI mapping, then return the failure to the caller
558          * directly without further OI checking.
559          */
560
561 again:
562         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
563         if (IS_ERR(inode)) {
564                 rc = PTR_ERR(inode);
565                 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
566                         goto check_oi;
567
568                 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
569                        PFID(fid), id->oii_ino, rc);
570                 GOTO(put, rc);
571         }
572
573         if (is_bad_inode(inode)) {
574                 rc = -ENOENT;
575                 if (!trusted)
576                         goto check_oi;
577
578                 CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
579                        PFID(fid), id->oii_ino);
580                 GOTO(put, rc);
581         }
582
583         if (id->oii_gen != OSD_OII_NOGEN &&
584             inode->i_generation != id->oii_gen) {
585                 rc = -ESTALE;
586                 if (!trusted)
587                         goto check_oi;
588
589                 CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
590                        "oii_gen = %u, i_generation = %u\n", PFID(fid),
591                        id->oii_ino, id->oii_gen, inode->i_generation);
592                 GOTO(put, rc);
593         }
594
595         if (inode->i_nlink == 0) {
596                 rc = -ENOENT;
597                 if (!trusted)
598                         goto check_oi;
599
600                 CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
601                        PFID(fid), id->oii_ino);
602                 GOTO(put, rc);
603         }
604
605         ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
606
607 check_oi:
608         if (rc != 0) {
609                 __u32 saved_ino = id->oii_ino;
610                 __u32 saved_gen = id->oii_gen;
611
612                 LASSERT(!trusted);
613                 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
614
615                 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
616                 /*
617                  * XXX: There are four possible cases:
618                  *      1. rc = 0.
619                  *         Backup/restore caused the OI invalid.
620                  *      2. rc = 0.
621                  *         Someone unlinked the object but NOT removed
622                  *         the OI mapping, such as mount target device
623                  *         as ldiskfs, and modify something directly.
624                  *      3. rc = -ENOENT.
625                  *         Someone just removed the object between the
626                  *         former oi_lookup and the iget. It is normal.
627                  *      4. Other failure cases.
628                  *
629                  *      Generally, when the device is mounted, it will
630                  *      auto check whether the system is restored from
631                  *      file-level backup or not. We trust such detect
632                  *      to distinguish the 1st case from the 2nd case:
633                  *      if the OI files are consistent but may contain
634                  *      stale OI mappings because of case 2, if iget()
635                  *      returns -ENOENT or -ESTALE, then it should be
636                  *      the case 2.
637                  */
638                 if (rc != 0)
639                         /*
640                          * If the OI mapping was in OI file before the
641                          * osd_iget_check(), but now, it is disappear,
642                          * then it must be removed by race. That is a
643                          * normal race case.
644                          */
645                         GOTO(put, rc);
646
647                 /*
648                  * It is the OI scrub updated the OI mapping by race.
649                  * The new OI mapping must be valid.
650                  */
651                 if (saved_ino != id->oii_ino ||
652                     (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
653                         if (!IS_ERR(inode))
654                                 iput(inode);
655
656                         trusted = true;
657                         goto again;
658                 }
659
660                 if (IS_ERR(inode)) {
661                         if (dev->od_scrub.os_scrub.os_file.sf_flags &
662                             SF_INCONSISTENT)
663                                 /*
664                                  * It still can be the case 2, but we cannot
665                                  * distinguish it from the case 1. So return
666                                  * -EREMCHG to block current operation until
667                                  *  OI scrub rebuilt the OI mappings.
668                                  */
669                                 rc = -EREMCHG;
670                         else
671                                 rc = -ENOENT;
672
673                         GOTO(put, rc);
674                 }
675
676                 if (inode->i_generation == id->oii_gen)
677                         rc = -ENOENT;
678                 else
679                         rc = -EREMCHG;
680         } else {
681                 if (id->oii_gen == OSD_OII_NOGEN)
682                         osd_id_gen(id, inode->i_ino, inode->i_generation);
683
684                 /*
685                  * Do not update file c/mtime in ldiskfs.
686                  * NB: we don't have any lock to protect this because we don't
687                  * have reference on osd_object now, but contention with
688                  * another lookup + attr_set can't happen in the tiny window
689                  * between if (...) and set S_NOCMTIME.
690                  */
691                 if (!(inode->i_flags & S_NOCMTIME))
692                         inode->i_flags |= S_NOCMTIME;
693         }
694
695         GOTO(put, rc);
696
697 put:
698         if (rc != 0) {
699                 if (!IS_ERR(inode))
700                         iput(inode);
701
702                 inode = ERR_PTR(rc);
703         }
704
705         return inode;
706 }
707
708 /**
709  * \retval +v: new filter_fid does not contain self-fid
710  * \retval 0:  filter_fid_18_23, contains self-fid
711  * \retval -v: other failure cases
712  */
713 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
714                  struct dentry *dentry, struct lu_fid *fid)
715 {
716         struct filter_fid *ff = &info->oti_ff;
717         struct ost_id *ostid = &info->oti_ostid;
718         int rc;
719
720         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
721         if (rc == sizeof(struct filter_fid_18_23)) {
722                 struct filter_fid_18_23 *ff_old = (void *)ff;
723
724                 ostid_set_seq(ostid, le64_to_cpu(ff_old->ff_seq));
725                 rc = ostid_set_id(ostid, le64_to_cpu(ff_old->ff_objid));
726                 /*
727                  * XXX: use 0 as the index for compatibility, the caller will
728                  * handle index related issues when necessary.
729                  */
730                 if (!rc)
731                         ostid_to_fid(fid, ostid, 0);
732         } else if (rc >= (int)sizeof(struct filter_fid_24_29)) {
733                 rc = 1;
734         } else if (rc >= 0) {
735                 rc = -EINVAL;
736         }
737
738         return rc;
739 }
740
741 static int osd_lma_self_repair(struct osd_thread_info *info,
742                                struct osd_device *osd, struct inode *inode,
743                                const struct lu_fid *fid, __u32 compat)
744 {
745         handle_t *jh;
746         int rc;
747
748         LASSERT(current->journal_info == NULL);
749
750         jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
751                                   osd_dto_credits_noquota[DTO_XATTR_SET]);
752         if (IS_ERR(jh)) {
753                 rc = PTR_ERR(jh);
754                 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
755                       osd_name(osd), rc);
756                 return rc;
757         }
758
759         rc = osd_ea_fid_set(info, inode, fid, compat, 0);
760         if (rc != 0)
761                 CWARN("%s: cannot self repair the LMA: rc = %d\n",
762                       osd_name(osd), rc);
763         ldiskfs_journal_stop(jh);
764         return rc;
765 }
766
767 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
768 {
769         struct osd_thread_info *info = osd_oti_get(env);
770         struct osd_device *osd = osd_obj2dev(obj);
771         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
772         struct lustre_mdt_attrs *lma = &loa->loa_lma;
773         struct inode *inode = obj->oo_inode;
774         struct dentry *dentry = &info->oti_obj_dentry;
775         struct lu_fid *fid = NULL;
776         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
777         int rc;
778
779         ENTRY;
780
781         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
782                              (void *)loa, sizeof(*loa));
783         if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
784                 fid = &lma->lma_self_fid;
785                 rc = osd_get_idif(info, inode, dentry, fid);
786                 if (rc > 0 || (rc == -ENODATA && osd->od_index_in_idif)) {
787                         /*
788                          * For the given OST-object, if it has neither LMA nor
789                          * FID in XATTR_NAME_FID, then the given FID (which is
790                          * contained in the @obj, from client RPC for locating
791                          * the OST-object) is trusted. We use it to generate
792                          * the LMA.
793                          */
794                         osd_lma_self_repair(info, osd, inode, rfid,
795                                             LMAC_FID_ON_OST);
796                         RETURN(0);
797                 }
798         }
799
800         if (rc < 0)
801                 RETURN(rc);
802
803         if (rc > 0) {
804                 rc = 0;
805                 lustre_lma_swab(lma);
806                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
807                              (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
808                               S_ISREG(inode->i_mode)))) {
809                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
810                               "fid = "DFID", ino = %lu\n", osd_name(osd),
811                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
812                               PFID(rfid), inode->i_ino);
813                         rc = -EOPNOTSUPP;
814                 } else {
815                         fid = &lma->lma_self_fid;
816                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
817                             osd->od_is_ost)
818                                 obj->oo_pfid_in_lma = 1;
819                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
820                             !osd->od_is_ost)
821                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
822                 }
823         }
824
825         if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
826                 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
827                         struct ost_id   *oi   = &info->oti_ostid;
828                         struct lu_fid   *fid1 = &info->oti_fid3;
829                         __u32            idx  = fid_idif_ost_idx(rfid);
830
831                         /*
832                          * For old IDIF, the OST index is not part of the IDIF,
833                          * Means that different OSTs may have the same IDIFs.
834                          * Under such case, we need to make some compatible
835                          * check to make sure to trigger OI scrub properly.
836                          */
837                         if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
838                                 /* Given @rfid is new, LMA is old. */
839                                 fid_to_ostid(fid, oi);
840                                 ostid_to_fid(fid1, oi, idx);
841                                 if (lu_fid_eq(fid1, rfid)) {
842                                         if (osd->od_index_in_idif)
843                                                 osd_lma_self_repair(info, osd,
844                                                         inode, rfid,
845                                                         LMAC_FID_ON_OST);
846                                         RETURN(0);
847                                 }
848                         }
849                 }
850
851                 rc = -EREMCHG;
852         }
853
854         RETURN(rc);
855 }
856
857 struct osd_check_lmv_buf {
858 #ifdef HAVE_DIR_CONTEXT
859         /* please keep it as first member */
860         struct dir_context ctx;
861 #endif
862         struct osd_thread_info *oclb_info;
863         struct osd_device *oclb_dev;
864         struct osd_idmap_cache *oclb_oic;
865         int oclb_items;
866         bool oclb_found;
867 };
868
869 /**
870  * It is called internally by ->readdir() to filter out the
871  * local slave object's FID of the striped directory.
872  *
873  * \retval      1 found the local slave's FID
874  * \retval      0 continue to check next item
875  * \retval      -ve for failure
876  */
877 #ifdef HAVE_FILLDIR_USE_CTX
878 static int osd_stripe_dir_filldir(struct dir_context *buf,
879 #else
880 static int osd_stripe_dir_filldir(void *buf,
881 #endif
882                                   const char *name, int namelen,
883                                   loff_t offset, __u64 ino, unsigned int d_type)
884 {
885         struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
886         struct osd_thread_info *oti = oclb->oclb_info;
887         struct lu_fid *fid = &oti->oti_fid3;
888         struct osd_inode_id *id = &oti->oti_id3;
889         struct osd_device *dev = oclb->oclb_dev;
890         struct osd_idmap_cache *oic = oclb->oclb_oic;
891         struct inode *inode;
892
893         oclb->oclb_items++;
894
895         if (name[0] == '.')
896                 return 0;
897
898         fid_zero(fid);
899         sscanf(name + 1, SFID, RFID(fid));
900         if (!fid_is_sane(fid))
901                 return 0;
902
903         if (osd_remote_fid(oti->oti_env, dev, fid))
904                 return 0;
905
906         osd_id_gen(id, ino, OSD_OII_NOGEN);
907         inode = osd_iget(oti, dev, id);
908         if (IS_ERR(inode))
909                 return PTR_ERR(inode);
910
911         iput(inode);
912         osd_add_oi_cache(oti, dev, id, fid);
913         oic->oic_fid = *fid;
914         oic->oic_lid = *id;
915         oic->oic_dev = dev;
916         osd_oii_insert(dev, oic, true);
917         oclb->oclb_found = true;
918
919         return 1;
920 }
921
922 /*
923  * When lookup item under striped directory, we need to locate the master
924  * MDT-object of the striped directory firstly, then the client will send
925  * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
926  * and the item's name. If the system is restored from MDT file level backup,
927  * then before the OI scrub completely built the OI files, the OI mappings of
928  * the master MDT-object and slave MDT-object may be invalid. Usually, it is
929  * not a problem for the master MDT-object. Because when locate the master
930  * MDT-object, we will do name based lookup (for the striped directory itself)
931  * firstly, during such process we can setup the correct OI mapping for the
932  * master MDT-object. But it will be trouble for the slave MDT-object. Because
933  * the client will not trigger name based lookup on the MDT to locate the slave
934  * MDT-object before locating item under the striped directory, then when
935  * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
936  * is invalid and does not know what the right OI mapping is, then the MDT has
937  * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
938  * the OI file, related OI mapping is unknown yet, please try again later. And
939  * then client will re-try the RPC again and again until related OI mapping has
940  * been updated. That is quite inefficient.
941  *
942  * To resolve above trouble, we will handle it as the following two cases:
943  *
944  * 1) The slave MDT-object and the master MDT-object are on different MDTs.
945  *    It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
946  *    is linked under /REMOTE_PARENT_DIR with the name of its FID string.
947  *    We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
948  *    directly. Please check osd_fid_lookup().
949  *
950  * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
951  *    Under such case, during lookup the master MDT-object, we will lookup the
952  *    slave MDT-object via readdir against the master MDT-object, because the
953  *    slave MDT-objects information are stored as sub-directories with the name
954  *    "${FID}:${index}". Then when find the local slave MDT-object, its OI
955  *    mapping will be recorded. Then subsequent osd_fid_lookup() will know
956  *    the correct OI mapping for the slave MDT-object.
957  */
958 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
959                          struct inode *inode, struct osd_idmap_cache *oic)
960 {
961         struct lu_buf *buf = &oti->oti_big_buf;
962         struct dentry *dentry = &oti->oti_obj_dentry;
963         struct file *filp = &oti->oti_file;
964         const struct file_operations *fops;
965         struct lmv_mds_md_v1 *lmv1;
966         struct osd_check_lmv_buf oclb = {
967 #ifdef HAVE_DIR_CONTEXT
968                 .ctx.actor = osd_stripe_dir_filldir,
969 #endif
970                 .oclb_info = oti,
971                 .oclb_dev = dev,
972                 .oclb_oic = oic,
973                 .oclb_found = false,
974         };
975         int rc = 0;
976
977         ENTRY;
978
979 again:
980         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
981                              buf->lb_len);
982         if (rc == -ERANGE) {
983                 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
984                 if (rc > 0) {
985                         lu_buf_realloc(buf, rc);
986                         if (buf->lb_buf == NULL)
987                                 GOTO(out, rc = -ENOMEM);
988
989                         goto again;
990                 }
991         }
992
993         if (unlikely(rc == 0 || rc == -ENODATA))
994                 GOTO(out, rc = 0);
995
996         if (rc < 0)
997                 GOTO(out, rc);
998
999         if (unlikely(buf->lb_buf == NULL)) {
1000                 lu_buf_realloc(buf, rc);
1001                 if (buf->lb_buf == NULL)
1002                         GOTO(out, rc = -ENOMEM);
1003
1004                 goto again;
1005         }
1006
1007         lmv1 = buf->lb_buf;
1008         if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
1009                 GOTO(out, rc = 0);
1010
1011         fops = inode->i_fop;
1012         dentry->d_inode = inode;
1013         dentry->d_sb = inode->i_sb;
1014         filp->f_pos = 0;
1015         filp->f_path.dentry = dentry;
1016         filp->f_mode = FMODE_64BITHASH;
1017         filp->f_mapping = inode->i_mapping;
1018         filp->f_op = fops;
1019         filp->private_data = NULL;
1020         set_file_inode(filp, inode);
1021
1022         do {
1023                 oclb.oclb_items = 0;
1024 #ifdef HAVE_DIR_CONTEXT
1025                 oclb.ctx.pos = filp->f_pos;
1026 #ifdef HAVE_ITERATE_SHARED
1027                 rc = fops->iterate_shared(filp, &oclb.ctx);
1028 #else
1029                 rc = fops->iterate(filp, &oclb.ctx);
1030 #endif
1031                 filp->f_pos = oclb.ctx.pos;
1032 #else
1033                 rc = fops->readdir(filp, &oclb, osd_stripe_dir_filldir);
1034 #endif
1035         } while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
1036                  filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
1037         fops->release(inode, filp);
1038
1039 out:
1040         if (rc < 0)
1041                 CDEBUG(D_LFSCK, "%s: fail to check LMV EA, inode = %lu/%u,"
1042                        DFID": rc = %d\n", osd_ino2name(inode),
1043                        inode->i_ino, inode->i_generation,
1044                        PFID(&oic->oic_fid), rc);
1045         else
1046                 rc = 0;
1047
1048         RETURN(rc);
1049 }
1050
1051 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
1052                           const struct lu_fid *fid,
1053                           const struct lu_object_conf *conf)
1054 {
1055         struct osd_thread_info *info;
1056         struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1057         struct osd_device *dev;
1058         struct osd_idmap_cache *oic;
1059         struct osd_inode_id *id;
1060         struct inode *inode = NULL;
1061         struct lustre_scrub *scrub;
1062         struct scrub_file *sf;
1063         __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
1064         __u32 saved_ino;
1065         __u32 saved_gen;
1066         int result = 0;
1067         int rc1 = 0;
1068         bool remote = false;
1069         bool trusted = true;
1070         bool updated = false;
1071         bool checked = false;
1072
1073         ENTRY;
1074
1075         LINVRNT(osd_invariant(obj));
1076         LASSERT(obj->oo_inode == NULL);
1077         LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
1078
1079         dev = osd_dev(ldev);
1080         scrub = &dev->od_scrub.os_scrub;
1081         sf = &scrub->os_file;
1082         info = osd_oti_get(env);
1083         LASSERT(info);
1084         oic = &info->oti_cache;
1085
1086         if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1087                 RETURN(-ENOENT);
1088
1089         /*
1090          * For the object is created as locking anchor, or for the object to
1091          * be created on disk. No need to osd_oi_lookup() at here because FID
1092          * shouldn't never be re-used, if it's really a duplicate FID from
1093          * unexpected reason, we should be able to detect it later by calling
1094          * do_create->osd_oi_insert().
1095          */
1096         if (conf && conf->loc_flags & LOC_F_NEW)
1097                 GOTO(out, result = 0);
1098
1099         /* Search order: 1. per-thread cache. */
1100         if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
1101                 id = &oic->oic_lid;
1102                 goto iget;
1103         }
1104
1105         id = &info->oti_id;
1106         if (!list_empty(&scrub->os_inconsistent_items)) {
1107                 /* Search order: 2. OI scrub pending list. */
1108                 result = osd_oii_lookup(dev, fid, id);
1109                 if (!result)
1110                         goto iget;
1111         }
1112
1113         /*
1114          * The OI mapping in the OI file can be updated by the OI scrub
1115          * when we locate the inode via FID. So it may be not trustable.
1116          */
1117         trusted = false;
1118
1119         /* Search order: 3. OI files. */
1120         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1121         if (result == -ENOENT) {
1122                 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1123                     fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1124                     !ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
1125                                       sf->sf_oi_bitmap))
1126                         GOTO(out, result = 0);
1127
1128                 goto trigger;
1129         }
1130
1131         /* -ESTALE is returned if inode of OST object doesn't exist */
1132         if (result == -ESTALE &&
1133             fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1134                 GOTO(out, result = 0);
1135         }
1136
1137         if (result)
1138                 GOTO(out, result);
1139
1140 iget:
1141         obj->oo_inode = NULL;
1142         /* for later passes through checks, not true on first pass */
1143         if (!IS_ERR_OR_NULL(inode))
1144                 iput(inode);
1145
1146         inode = osd_iget_check(info, dev, fid, id, trusted);
1147         if (!IS_ERR(inode)) {
1148                 obj->oo_inode = inode;
1149                 result = 0;
1150                 if (remote)
1151                         goto trigger;
1152
1153                 goto check_lma;
1154         }
1155
1156         result = PTR_ERR(inode);
1157         if (result == -ENOENT || result == -ESTALE)
1158                 GOTO(out, result = 0);
1159
1160         if (result != -EREMCHG)
1161                 GOTO(out, result);
1162
1163 trigger:
1164         /*
1165          * We still have chance to get the valid inode: for the
1166          * object which is referenced by remote name entry, the
1167          * object on the local MDT will be linked under the dir
1168          * of "/REMOTE_PARENT_DIR" with its FID string as name.
1169          *
1170          * We do not know whether the object for the given FID
1171          * is referenced by some remote name entry or not, and
1172          * especially for DNE II, a multiple-linked object may
1173          * have many name entries reside on many MDTs.
1174          *
1175          * To simplify the operation, OSD will not distinguish
1176          * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1177          * only happened for the RPC from other MDT during the
1178          * OI scrub, or for the client side RPC with FID only,
1179          * such as FID to path, or from old connected client.
1180          */
1181         if (!remote) {
1182                 rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
1183                 if (!rc1) {
1184                         remote = true;
1185                         trusted = true;
1186                         flags |= SS_AUTO_PARTIAL;
1187                         flags &= ~SS_AUTO_FULL;
1188                         goto iget;
1189                 }
1190         }
1191
1192         if (thread_is_running(&scrub->os_thread)) {
1193                 if (scrub->os_partial_scan && !scrub->os_in_join)
1194                         goto join;
1195
1196                 if (IS_ERR_OR_NULL(inode) || result)
1197                         GOTO(out, result = -EINPROGRESS);
1198
1199                 LASSERT(remote);
1200                 LASSERT(obj->oo_inode == inode);
1201
1202                 osd_add_oi_cache(info, dev, id, fid);
1203                 osd_oii_insert(dev, oic, true);
1204                 goto found;
1205         }
1206
1207         if (dev->od_auto_scrub_interval == AS_NEVER) {
1208                 if (!remote)
1209                         GOTO(out, result = -EREMCHG);
1210
1211                 LASSERT(!result);
1212                 LASSERT(obj->oo_inode == inode);
1213
1214                 osd_add_oi_cache(info, dev, id, fid);
1215                 goto found;
1216         }
1217
1218 join:
1219         rc1 = osd_scrub_start(env, dev, flags);
1220         LCONSOLE_WARN("%s: trigger OI scrub by RPC for the " DFID" with flags "
1221                       "0x%x, rc = %d\n", osd_name(dev), PFID(fid), flags, rc1);
1222         if (rc1 && rc1 != -EALREADY)
1223                 GOTO(out, result = -EREMCHG);
1224
1225         if (IS_ERR_OR_NULL(inode) || result)
1226                 GOTO(out, result = -EINPROGRESS);
1227
1228         LASSERT(remote);
1229         LASSERT(obj->oo_inode == inode);
1230
1231         osd_add_oi_cache(info, dev, id, fid);
1232         osd_oii_insert(dev, oic, true);
1233         goto found;
1234
1235 check_lma:
1236         checked = true;
1237         if (unlikely(obj->oo_header))
1238                 goto found;
1239
1240         result = osd_check_lma(env, obj);
1241         if (!result)
1242                 goto found;
1243
1244         LASSERTF(id->oii_ino == inode->i_ino &&
1245                  id->oii_gen == inode->i_generation,
1246                  "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1247                  PFID(fid), id->oii_ino, id->oii_gen,
1248                  inode->i_ino, inode->i_generation);
1249
1250         saved_ino = inode->i_ino;
1251         saved_gen = inode->i_generation;
1252
1253         if (unlikely(result == -ENODATA)) {
1254                 /*
1255                  * If the OI scrub updated the OI mapping by race, it
1256                  * must be valid. Trust the inode that has no LMA EA.
1257                  */
1258                 if (updated)
1259                         goto found;
1260
1261                 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1262                 if (!result) {
1263                         /*
1264                          * The OI mapping is still there, the inode is still
1265                          * valid. It is just becaues the inode has no LMA EA.
1266                          */
1267                         if (saved_ino == id->oii_ino &&
1268                             saved_gen == id->oii_gen)
1269                                 goto found;
1270
1271                         /*
1272                          * It is the OI scrub updated the OI mapping by race.
1273                          * The new OI mapping must be valid.
1274                          */
1275                         trusted = true;
1276                         updated = true;
1277                         goto iget;
1278                 }
1279
1280                 /*
1281                  * "result == -ENOENT" means that the OI mappinghas been
1282                  * removed by race, so the inode belongs to other object.
1283                  *
1284                  * Others error can be returned  directly.
1285                  */
1286                 if (result == -ENOENT) {
1287                         LASSERT(trusted);
1288
1289                         obj->oo_inode = NULL;
1290                         result = 0;
1291                 }
1292         }
1293
1294         if (result != -EREMCHG)
1295                 GOTO(out, result);
1296
1297         LASSERT(!updated);
1298
1299         /*
1300          * if two OST objects map to the same inode, and inode mode is
1301          * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
1302          * reserved by precreate, and not written yet, in this case, don't
1303          * set inode for the object whose FID mismatch, so that it can create
1304          * inode and not block precreate.
1305          */
1306         if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
1307             inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
1308                 obj->oo_inode = NULL;
1309                 GOTO(out, result = 0);
1310         }
1311
1312         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1313         /*
1314          * "result == -ENOENT" means the cached OI mapping has been removed
1315          * from the OI file by race, above inode belongs to other object.
1316          */
1317         if (result == -ENOENT) {
1318                 LASSERT(trusted);
1319
1320                 obj->oo_inode = NULL;
1321                 GOTO(out, result = 0);
1322         }
1323
1324         if (result)
1325                 GOTO(out, result);
1326
1327         if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
1328                 result = -EREMCHG;
1329                 goto trigger;
1330         }
1331
1332         /*
1333          * It is the OI scrub updated the OI mapping by race.
1334          * The new OI mapping must be valid.
1335          */
1336         trusted = true;
1337         updated = true;
1338         goto iget;
1339
1340 found:
1341         if (!checked) {
1342                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
1343                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
1344
1345                 result = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
1346                 if (!result) {
1347                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
1348                             dev->od_is_ost)
1349                                 obj->oo_pfid_in_lma = 1;
1350                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
1351                             !dev->od_is_ost)
1352                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
1353                 } else if (result != -ENODATA) {
1354                         GOTO(out, result);
1355                 }
1356         }
1357
1358         obj->oo_compat_dot_created = 1;
1359         obj->oo_compat_dotdot_created = 1;
1360
1361         if (S_ISDIR(inode->i_mode) &&
1362             (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1363                 osd_check_lmv(info, dev, inode, oic);
1364
1365         result = osd_attach_jinode(inode);
1366         if (result)
1367                 GOTO(out, result);
1368
1369         if (!ldiskfs_pdo)
1370                 GOTO(out, result = 0);
1371
1372         LASSERT(!obj->oo_hl_head);
1373         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1374
1375         GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
1376
1377 out:
1378         if (result || !obj->oo_inode) {
1379                 if (!IS_ERR_OR_NULL(inode))
1380                         iput(inode);
1381
1382                 obj->oo_inode = NULL;
1383                 if (trusted)
1384                         fid_zero(&oic->oic_fid);
1385         }
1386
1387         LINVRNT(osd_invariant(obj));
1388         return result;
1389 }
1390
1391 /*
1392  * Concurrency: shouldn't matter.
1393  */
1394 static void osd_object_init0(struct osd_object *obj)
1395 {
1396         LASSERT(obj->oo_inode != NULL);
1397         obj->oo_dt.do_body_ops = &osd_body_ops;
1398         obj->oo_dt.do_lu.lo_header->loh_attr |=
1399                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1400 }
1401
1402 /*
1403  * Concurrency: no concurrent access is possible that early in object
1404  * life-cycle.
1405  */
1406 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1407                            const struct lu_object_conf *conf)
1408 {
1409         struct osd_object *obj = osd_obj(l);
1410         int result;
1411
1412         LINVRNT(osd_invariant(obj));
1413
1414         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1415                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1416                 l->lo_header->loh_attr |= LOHA_EXISTS;
1417                 return 0;
1418         }
1419
1420         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1421         obj->oo_dt.do_body_ops = &osd_body_ops_new;
1422         if (result == 0 && obj->oo_inode != NULL) {
1423                 struct osd_thread_info *oti = osd_oti_get(env);
1424                 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1425
1426                 osd_object_init0(obj);
1427                 if (unlikely(obj->oo_header))
1428                         return 0;
1429
1430                 result = osd_get_lma(oti, obj->oo_inode,
1431                                      &oti->oti_obj_dentry, loa);
1432                 if (!result) {
1433                         /*
1434                          * Convert LMAI flags to lustre LMA flags
1435                          * and cache it to oo_lma_flags
1436                          */
1437                         obj->oo_lma_flags =
1438                                 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1439                 } else if (result == -ENODATA) {
1440                         result = 0;
1441                 }
1442         }
1443
1444         LINVRNT(osd_invariant(obj));
1445         return result;
1446 }
1447
1448 /*
1449  * The first part of oxe_buf is xattr name, and is '\0' terminated.
1450  * The left part is for value, binary mode.
1451  */
1452 struct osd_xattr_entry {
1453         struct list_head        oxe_list;
1454         size_t                  oxe_len;
1455         size_t                  oxe_namelen;
1456         bool                    oxe_exist;
1457         struct rcu_head         oxe_rcu;
1458         char                    oxe_buf[0];
1459 };
1460
1461 static int osd_oxc_get(struct osd_object *obj, const char *name,
1462                        struct lu_buf *buf)
1463 {
1464         struct osd_xattr_entry *tmp;
1465         struct osd_xattr_entry *oxe = NULL;
1466         size_t namelen = strlen(name);
1467         int rc;
1468
1469         ENTRY;
1470
1471         rcu_read_lock();
1472         list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1473                 if (namelen == tmp->oxe_namelen &&
1474                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1475                         oxe = tmp;
1476                         break;
1477                 }
1478         }
1479
1480         if (oxe == NULL)
1481                 GOTO(out, rc = -ENOENT);
1482
1483         if (!oxe->oxe_exist)
1484                 GOTO(out, rc = -ENODATA);
1485
1486         /* vallen */
1487         rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1488         LASSERT(rc > 0);
1489
1490         if (buf->lb_buf == NULL)
1491                 GOTO(out, rc);
1492
1493         if (buf->lb_len < rc)
1494                 GOTO(out, rc = -ERANGE);
1495
1496         memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1497         EXIT;
1498 out:
1499         rcu_read_unlock();
1500
1501         return rc;
1502 }
1503
1504 static void osd_oxc_free(struct rcu_head *head)
1505 {
1506         struct osd_xattr_entry *oxe;
1507
1508         oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1509         OBD_FREE(oxe, oxe->oxe_len);
1510 }
1511
1512 static void osd_oxc_add(struct osd_object *obj, const char *name,
1513                         const char *buf, int buflen)
1514 {
1515         struct osd_xattr_entry *oxe;
1516         struct osd_xattr_entry *old = NULL;
1517         struct osd_xattr_entry *tmp;
1518         size_t namelen = strlen(name);
1519         size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1520
1521         OBD_ALLOC(oxe, len);
1522         if (oxe == NULL)
1523                 return;
1524
1525         INIT_LIST_HEAD(&oxe->oxe_list);
1526         oxe->oxe_len = len;
1527         oxe->oxe_namelen = namelen;
1528         memcpy(oxe->oxe_buf, name, namelen);
1529         if (buflen > 0) {
1530                 LASSERT(buf != NULL);
1531                 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1532                 oxe->oxe_exist = true;
1533         } else {
1534                 oxe->oxe_exist = false;
1535         }
1536
1537         /* this should be rarely called, just remove old and add new */
1538         spin_lock(&obj->oo_guard);
1539         list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1540                 if (namelen == tmp->oxe_namelen &&
1541                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1542                         old = tmp;
1543                         break;
1544                 }
1545         }
1546         if (old != NULL) {
1547                 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1548                 call_rcu(&old->oxe_rcu, osd_oxc_free);
1549         } else {
1550                 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1551         }
1552         spin_unlock(&obj->oo_guard);
1553 }
1554
1555 static void osd_oxc_del(struct osd_object *obj, const char *name)
1556 {
1557         struct osd_xattr_entry *oxe;
1558         size_t namelen = strlen(name);
1559
1560         spin_lock(&obj->oo_guard);
1561         list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1562                 if (namelen == oxe->oxe_namelen &&
1563                     strncmp(name, oxe->oxe_buf, namelen) == 0) {
1564                         list_del_rcu(&oxe->oxe_list);
1565                         call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1566                         break;
1567                 }
1568         }
1569         spin_unlock(&obj->oo_guard);
1570 }
1571
1572 static void osd_oxc_fini(struct osd_object *obj)
1573 {
1574         struct osd_xattr_entry *oxe, *next;
1575
1576         list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1577                 list_del(&oxe->oxe_list);
1578                 OBD_FREE(oxe, oxe->oxe_len);
1579         }
1580 }
1581
1582 /*
1583  * Concurrency: no concurrent access is possible that late in object
1584  * life-cycle.
1585  */
1586 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1587 {
1588         struct osd_object *obj = osd_obj(l);
1589         struct lu_object_header *h = obj->oo_header;
1590
1591         LINVRNT(osd_invariant(obj));
1592
1593         osd_oxc_fini(obj);
1594         dt_object_fini(&obj->oo_dt);
1595         if (obj->oo_hl_head != NULL)
1596                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1597         OBD_FREE_PTR(obj);
1598         if (unlikely(h)) {
1599                 lu_object_header_fini(h);
1600                 OBD_FREE_PTR(h);
1601         }
1602 }
1603
1604 /*
1605  * Concurrency: no concurrent access is possible that late in object
1606  * life-cycle.
1607  */
1608 static void osd_index_fini(struct osd_object *o)
1609 {
1610         struct iam_container *bag;
1611
1612         if (o->oo_dir != NULL) {
1613                 bag = &o->oo_dir->od_container;
1614                 if (o->oo_inode != NULL) {
1615                         if (bag->ic_object == o->oo_inode)
1616                                 iam_container_fini(bag);
1617                 }
1618                 OBD_FREE_PTR(o->oo_dir);
1619                 o->oo_dir = NULL;
1620         }
1621 }
1622
1623 /*
1624  * Concurrency: no concurrent access is possible that late in object
1625  * life-cycle (for all existing callers, that is. New callers have to provide
1626  * their own locking.)
1627  */
1628 static int osd_inode_unlinked(const struct inode *inode)
1629 {
1630         return inode->i_nlink == 0;
1631 }
1632
1633 enum {
1634         OSD_TXN_OI_DELETE_CREDITS    = 20,
1635         OSD_TXN_INODE_DELETE_CREDITS = 20
1636 };
1637
1638 /*
1639  * Journal
1640  */
1641
1642 #if OSD_THANDLE_STATS
1643 /**
1644  * Set time when the handle is allocated
1645  */
1646 static void osd_th_alloced(struct osd_thandle *oth)
1647 {
1648         oth->oth_alloced = ktime_get();
1649 }
1650
1651 /**
1652  * Set time when the handle started
1653  */
1654 static void osd_th_started(struct osd_thandle *oth)
1655 {
1656         oth->oth_started = ktime_get();
1657 }
1658
1659 /**
1660  * Check whether the we deal with this handle for too long.
1661  */
1662 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1663                                 ktime_t alloced, ktime_t started,
1664                                 ktime_t closed)
1665 {
1666         ktime_t now = ktime_get();
1667
1668         LASSERT(dev != NULL);
1669
1670         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1671                             ktime_us_delta(started, alloced));
1672         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1673                             ktime_us_delta(closed, started));
1674         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1675                             ktime_us_delta(now, closed));
1676
1677         if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1678                 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1679                                 oth, now, alloced, started, closed);
1680                 libcfs_debug_dumpstack(NULL);
1681         }
1682 }
1683
1684 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
1685 {                                                                       \
1686         ktime_t __closed = ktime_get();                                 \
1687         ktime_t __alloced = oth->oth_alloced;                           \
1688         ktime_t __started = oth->oth_started;                           \
1689                                                                         \
1690         expr;                                                           \
1691         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
1692 }
1693
1694 #else /* OSD_THANDLE_STATS */
1695
1696 #define osd_th_alloced(h)                  do {} while(0)
1697 #define osd_th_started(h)                  do {} while(0)
1698 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
1699
1700 #endif /* OSD_THANDLE_STATS */
1701
1702 /*
1703  * Concurrency: doesn't access mutable data.
1704  */
1705 static int osd_param_is_not_sane(const struct osd_device *dev,
1706                                  const struct thandle *th)
1707 {
1708         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1709
1710         return oh->ot_credits > osd_transaction_size(dev);
1711 }
1712
1713 /*
1714  * Concurrency: shouldn't matter.
1715  */
1716 static void osd_trans_commit_cb(struct super_block *sb,
1717                                 struct ldiskfs_journal_cb_entry *jcb, int error)
1718 {
1719         struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
1720         struct thandle *th = &oh->ot_super;
1721         struct lu_device *lud = &th->th_dev->dd_lu_dev;
1722         struct dt_txn_commit_cb *dcb, *tmp;
1723
1724         LASSERT(oh->ot_handle == NULL);
1725
1726         if (error)
1727                 CERROR("transaction @0x%p commit error: %d\n", th, error);
1728
1729         OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DELAY_TRANS, 40);
1730         /* call per-transaction callbacks if any */
1731         list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1732                                  dcb_linkage) {
1733                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1734                          "commit callback entry: magic=%x name='%s'\n",
1735                          dcb->dcb_magic, dcb->dcb_name);
1736                 list_del_init(&dcb->dcb_linkage);
1737                 dcb->dcb_func(NULL, th, dcb, error);
1738         }
1739
1740         lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1741         lu_device_put(lud);
1742         th->th_dev = NULL;
1743
1744         OBD_FREE_PTR(oh);
1745 }
1746
1747 #ifndef HAVE_SB_START_WRITE
1748 # define sb_start_write(sb) do {} while (0)
1749 # define sb_end_write(sb) do {} while (0)
1750 #endif
1751
1752 static struct thandle *osd_trans_create(const struct lu_env *env,
1753                                         struct dt_device *d)
1754 {
1755         struct osd_thread_info *oti = osd_oti_get(env);
1756         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1757         struct osd_thandle *oh;
1758         struct thandle *th;
1759
1760         ENTRY;
1761
1762         if (d->dd_rdonly) {
1763                 CERROR("%s: someone try to start transaction under "
1764                        "readonly mode, should be disabled.\n",
1765                        osd_name(osd_dt_dev(d)));
1766                 dump_stack();
1767                 RETURN(ERR_PTR(-EROFS));
1768         }
1769
1770         /* on pending IO in this thread should left from prev. request */
1771         LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1772
1773         sb_start_write(osd_sb(osd_dt_dev(d)));
1774
1775         OBD_ALLOC_GFP(oh, sizeof(*oh), GFP_NOFS);
1776         if (!oh) {
1777                 sb_end_write(osd_sb(osd_dt_dev(d)));
1778                 RETURN(ERR_PTR(-ENOMEM));
1779         }
1780
1781         oh->ot_quota_trans = &oti->oti_quota_trans;
1782         memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1783         th = &oh->ot_super;
1784         th->th_dev = d;
1785         th->th_result = 0;
1786         oh->ot_credits = 0;
1787         INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1788         INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1789         INIT_LIST_HEAD(&oh->ot_trunc_locks);
1790         osd_th_alloced(oh);
1791
1792         memset(oti->oti_declare_ops, 0,
1793                sizeof(oti->oti_declare_ops));
1794         memset(oti->oti_declare_ops_cred, 0,
1795                sizeof(oti->oti_declare_ops_cred));
1796         memset(oti->oti_declare_ops_used, 0,
1797                sizeof(oti->oti_declare_ops_used));
1798
1799         oti->oti_ins_cache_depth++;
1800
1801         RETURN(th);
1802 }
1803
1804 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1805 {
1806         struct osd_thread_info *oti = osd_oti_get(env);
1807         struct osd_thandle *oh;
1808
1809         oh = container_of0(th, struct osd_thandle, ot_super);
1810         LASSERT(oh != NULL);
1811
1812         CWARN("  create: %u/%u/%u, destroy: %u/%u/%u\n",
1813               oti->oti_declare_ops[OSD_OT_CREATE],
1814               oti->oti_declare_ops_cred[OSD_OT_CREATE],
1815               oti->oti_declare_ops_used[OSD_OT_CREATE],
1816               oti->oti_declare_ops[OSD_OT_DESTROY],
1817               oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1818               oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1819         CWARN("  attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1820               oti->oti_declare_ops[OSD_OT_ATTR_SET],
1821               oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1822               oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1823               oti->oti_declare_ops[OSD_OT_XATTR_SET],
1824               oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1825               oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1826         CWARN("  write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1827               oti->oti_declare_ops[OSD_OT_WRITE],
1828               oti->oti_declare_ops_cred[OSD_OT_WRITE],
1829               oti->oti_declare_ops_used[OSD_OT_WRITE],
1830               oti->oti_declare_ops[OSD_OT_PUNCH],
1831               oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1832               oti->oti_declare_ops_used[OSD_OT_PUNCH],
1833               oti->oti_declare_ops[OSD_OT_QUOTA],
1834               oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1835               oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1836         CWARN("  insert: %u/%u/%u, delete: %u/%u/%u\n",
1837               oti->oti_declare_ops[OSD_OT_INSERT],
1838               oti->oti_declare_ops_cred[OSD_OT_INSERT],
1839               oti->oti_declare_ops_used[OSD_OT_INSERT],
1840               oti->oti_declare_ops[OSD_OT_DELETE],
1841               oti->oti_declare_ops_cred[OSD_OT_DELETE],
1842               oti->oti_declare_ops_used[OSD_OT_DELETE]);
1843         CWARN("  ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1844               oti->oti_declare_ops[OSD_OT_REF_ADD],
1845               oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1846               oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1847               oti->oti_declare_ops[OSD_OT_REF_DEL],
1848               oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1849               oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1850 }
1851
1852 /*
1853  * Concurrency: shouldn't matter.
1854  */
1855 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1856                            struct thandle *th)
1857 {
1858         struct osd_thread_info *oti = osd_oti_get(env);
1859         struct osd_device *dev = osd_dt_dev(d);
1860         handle_t *jh;
1861         struct osd_thandle *oh;
1862         int rc;
1863
1864         ENTRY;
1865
1866         LASSERT(current->journal_info == NULL);
1867
1868         oh = container_of0(th, struct osd_thandle, ot_super);
1869         LASSERT(oh != NULL);
1870         LASSERT(oh->ot_handle == NULL);
1871
1872         rc = dt_txn_hook_start(env, d, th);
1873         if (rc != 0)
1874                 GOTO(out, rc);
1875
1876         if (unlikely(osd_param_is_not_sane(dev, th))) {
1877                 static unsigned long last_printed;
1878                 static int last_credits;
1879
1880                 /*
1881                  * don't make noise on a tiny testing systems
1882                  * actual credits misuse will be caught anyway
1883                  */
1884                 if (last_credits != oh->ot_credits &&
1885                     time_after(jiffies, last_printed +
1886                                cfs_time_seconds(60)) &&
1887                     osd_transaction_size(dev) > 512) {
1888                         CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1889                               oh->ot_credits, osd_transaction_size(dev));
1890                         osd_trans_dump_creds(env, th);
1891                         libcfs_debug_dumpstack(NULL);
1892                         last_credits = oh->ot_credits;
1893                         last_printed = jiffies;
1894                 }
1895                 /*
1896                  * XXX Limit the credits to 'max_transaction_buffers', and
1897                  *     let the underlying filesystem to catch the error if
1898                  *     we really need so many credits.
1899                  *
1900                  *     This should be removed when we can calculate the
1901                  *     credits precisely.
1902                  */
1903                 oh->ot_credits = osd_transaction_size(dev);
1904         } else if (ldiskfs_track_declares_assert != 0) {
1905                 /*
1906                  * reserve few credits to prevent an assertion in JBD
1907                  * our debugging mechanism will be able to detected
1908                  * overuse. this can help to debug single-update
1909                  * transactions
1910                  */
1911                 oh->ot_credits += 10;
1912                 if (unlikely(osd_param_is_not_sane(dev, th)))
1913                         oh->ot_credits = osd_transaction_size(dev);
1914         }
1915
1916         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_TXN_START))
1917                 GOTO(out, rc = -EIO);
1918
1919         /*
1920          * XXX temporary stuff. Some abstraction layer should
1921          * be used.
1922          */
1923         jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1924         osd_th_started(oh);
1925         if (!IS_ERR(jh)) {
1926                 oh->ot_handle = jh;
1927                 LASSERT(oti->oti_txns == 0);
1928
1929                 lu_device_get(&d->dd_lu_dev);
1930                 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1931                               "osd-tx", th);
1932                 oti->oti_txns++;
1933                 rc = 0;
1934         } else {
1935                 rc = PTR_ERR(jh);
1936         }
1937 out:
1938         RETURN(rc);
1939 }
1940
1941 static int osd_seq_exists(const struct lu_env *env,
1942                           struct osd_device *osd, u64 seq)
1943 {
1944         struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1945         struct seq_server_site *ss = osd_seq_site(osd);
1946         int rc;
1947
1948         ENTRY;
1949
1950         LASSERT(ss != NULL);
1951         LASSERT(ss->ss_server_fld != NULL);
1952
1953         rc = osd_fld_lookup(env, osd, seq, range);
1954         if (rc != 0) {
1955                 if (rc != -ENOENT)
1956                         CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
1957                                osd_name(osd), seq, rc);
1958                 RETURN(0);
1959         }
1960
1961         RETURN(ss->ss_node_id == range->lsr_index);
1962 }
1963
1964 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1965 {
1966         struct dt_txn_commit_cb *dcb;
1967         struct dt_txn_commit_cb *tmp;
1968
1969         /* call per-transaction stop callbacks if any */
1970         list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1971                                  dcb_linkage) {
1972                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1973                          "commit callback entry: magic=%x name='%s'\n",
1974                          dcb->dcb_magic, dcb->dcb_name);
1975                 list_del_init(&dcb->dcb_linkage);
1976                 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1977         }
1978 }
1979
1980 /*
1981  * Concurrency: shouldn't matter.
1982  */
1983 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
1984                           struct thandle *th)
1985 {
1986         struct osd_thread_info *oti = osd_oti_get(env);
1987         struct osd_thandle *oh;
1988         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1989         struct osd_device *osd = osd_dt_dev(th->th_dev);
1990         struct qsd_instance *qsd = osd_def_qsd(osd);
1991         struct lquota_trans *qtrans;
1992         struct list_head truncates = LIST_HEAD_INIT(truncates);
1993         int rc = 0, remove_agents = 0;
1994
1995         ENTRY;
1996
1997         oh = container_of0(th, struct osd_thandle, ot_super);
1998
1999         remove_agents = oh->ot_remove_agents;
2000
2001         qtrans = oh->ot_quota_trans;
2002         oh->ot_quota_trans = NULL;
2003
2004         /* move locks to local list, stop tx, execute truncates */
2005         list_splice(&oh->ot_trunc_locks, &truncates);
2006
2007         if (oh->ot_handle != NULL) {
2008                 int rc2;
2009
2010                 handle_t *hdl = oh->ot_handle;
2011
2012                 /*
2013                  * add commit callback
2014                  * notice we don't do this in osd_trans_start()
2015                  * as underlying transaction can change during truncate
2016                  */
2017                 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
2018                                              &oh->ot_jcb);
2019
2020                 LASSERT(oti->oti_txns == 1);
2021                 oti->oti_txns--;
2022
2023                 rc = dt_txn_hook_stop(env, th);
2024                 if (rc != 0)
2025                         CERROR("%s: failed in transaction hook: rc = %d\n",
2026                                osd_name(osd), rc);
2027
2028                 osd_trans_stop_cb(oh, rc);
2029                 /* hook functions might modify th_sync */
2030                 hdl->h_sync = th->th_sync;
2031
2032                 oh->ot_handle = NULL;
2033                 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
2034                 if (rc2 != 0)
2035                         CERROR("%s: failed to stop transaction: rc = %d\n",
2036                                osd_name(osd), rc2);
2037                 if (!rc)
2038                         rc = rc2;
2039
2040                 osd_process_truncates(&truncates);
2041         } else {
2042                 osd_trans_stop_cb(oh, th->th_result);
2043                 OBD_FREE_PTR(oh);
2044         }
2045
2046         osd_trunc_unlock_all(&truncates);
2047
2048         /* inform the quota slave device that the transaction is stopping */
2049         qsd_op_end(env, qsd, qtrans);
2050
2051         /*
2052          * as we want IO to journal and data IO be concurrent, we don't block
2053          * awaiting data IO completion in osd_do_bio(), instead we wait here
2054          * once transaction is submitted to the journal. all reqular requests
2055          * don't do direct IO (except read/write), thus this wait_event becomes
2056          * no-op for them.
2057          *
2058          * IMPORTANT: we have to wait till any IO submited by the thread is
2059          * completed otherwise iobuf may be corrupted by different request
2060          */
2061         wait_event(iobuf->dr_wait,
2062                        atomic_read(&iobuf->dr_numreqs) == 0);
2063         osd_fini_iobuf(osd, iobuf);
2064         if (!rc)
2065                 rc = iobuf->dr_error;
2066
2067         if (unlikely(remove_agents != 0))
2068                 osd_process_scheduled_agent_removals(env, osd);
2069
2070         oti->oti_ins_cache_depth--;
2071         /* reset OI cache for safety */
2072         if (oti->oti_ins_cache_depth == 0)
2073                 oti->oti_ins_cache_used = 0;
2074
2075         sb_end_write(osd_sb(osd));
2076
2077         RETURN(rc);
2078 }
2079
2080 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
2081 {
2082         struct osd_thandle *oh = container_of0(th, struct osd_thandle,
2083                                                ot_super);
2084
2085         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
2086         LASSERT(&dcb->dcb_func != NULL);
2087         if (dcb->dcb_flags & DCB_TRANS_STOP)
2088                 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
2089         else
2090                 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
2091
2092         return 0;
2093 }
2094
2095 /*
2096  * Called just before object is freed. Releases all resources except for
2097  * object itself (that is released by osd_object_free()).
2098  *
2099  * Concurrency: no concurrent access is possible that late in object
2100  * life-cycle.
2101  */
2102 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
2103 {
2104         struct osd_object *obj = osd_obj(l);
2105         struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
2106         struct inode *inode = obj->oo_inode;
2107         __u64 projid;
2108         qid_t uid;
2109         qid_t gid;
2110
2111         LINVRNT(osd_invariant(obj));
2112
2113         /*
2114          * If object is unlinked remove fid->ino mapping from object index.
2115          */
2116
2117         osd_index_fini(obj);
2118
2119         if (!inode)
2120                 return;
2121
2122         uid = i_uid_read(inode);
2123         gid = i_gid_read(inode);
2124         projid = i_projid_read(inode);
2125
2126         obj->oo_inode = NULL;
2127         iput(inode);
2128
2129         /* do not rebalance quota if the caller needs to release memory
2130          * otherwise qsd_refresh_usage() may went into a new ldiskfs
2131          * transaction and risk to deadlock - LU-12178 */
2132         if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
2133                 return;
2134
2135         if (!obj->oo_header && qsd) {
2136                 struct osd_thread_info *info = osd_oti_get(env);
2137                 struct lquota_id_info *qi = &info->oti_qi;
2138
2139                 /* Release granted quota to master if necessary */
2140                 qi->lqi_id.qid_uid = uid;
2141                 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
2142
2143                 qi->lqi_id.qid_uid = gid;
2144                 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
2145
2146                 qi->lqi_id.qid_uid = projid;
2147                 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
2148         }
2149 }
2150
2151 /*
2152  * Concurrency: ->loo_object_release() is called under site spin-lock.
2153  */
2154 static void osd_object_release(const struct lu_env *env,
2155                                struct lu_object *l)
2156 {
2157         struct osd_object *o = osd_obj(l);
2158
2159         /*
2160          * nobody should be releasing a non-destroyed object with nlink=0
2161          * the API allows this, but ldiskfs doesn't like and then report
2162          * this inode as deleted
2163          */
2164         LASSERT(!(o->oo_destroyed == 0 && o->oo_inode &&
2165                   o->oo_inode->i_nlink == 0));
2166 }
2167
2168 /*
2169  * Concurrency: shouldn't matter.
2170  */
2171 static int osd_object_print(const struct lu_env *env, void *cookie,
2172                             lu_printer_t p, const struct lu_object *l)
2173 {
2174         struct osd_object *o = osd_obj(l);
2175         struct iam_descr *d;
2176
2177         if (o->oo_dir != NULL)
2178                 d = o->oo_dir->od_container.ic_descr;
2179         else
2180                 d = NULL;
2181         return (*p)(env, cookie,
2182                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
2183                     o, o->oo_inode,
2184                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
2185                     o->oo_inode ? o->oo_inode->i_generation : 0,
2186                     d ? d->id_ops->id_name : "plain");
2187 }
2188
2189 /*
2190  * Concurrency: shouldn't matter.
2191  */
2192 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2193                 struct obd_statfs *sfs, struct obd_statfs_info *info)
2194 {
2195         struct osd_device *osd = osd_dt_dev(d);
2196         struct super_block *sb = osd_sb(osd);
2197         struct kstatfs *ksfs;
2198         __u64 reserved;
2199         int result = 0;
2200
2201         if (unlikely(osd->od_mnt == NULL))
2202                 return -EINPROGRESS;
2203
2204         /* osd_lproc.c call this without env, allocate ksfs for that case */
2205         if (unlikely(env == NULL)) {
2206                 OBD_ALLOC_PTR(ksfs);
2207                 if (ksfs == NULL)
2208                         return -ENOMEM;
2209         } else {
2210                 ksfs = &osd_oti_get(env)->oti_ksfs;
2211         }
2212
2213         result = sb->s_op->statfs(sb->s_root, ksfs);
2214         if (result)
2215                 goto out;
2216
2217         statfs_pack(sfs, ksfs);
2218         if (unlikely(sb->s_flags & SB_RDONLY))
2219                 sfs->os_state |= OS_STATE_READONLY;
2220
2221         sfs->os_state |= osd->od_nonrotational ? OS_STATE_NONROT : 0;
2222
2223         if (ldiskfs_has_feature_extents(sb))
2224                 sfs->os_maxbytes = sb->s_maxbytes;
2225         else
2226                 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2227
2228         /*
2229          * Reserve some space so to avoid fragmenting the filesystem too much.
2230          * Fragmentation not only impacts performance, but can also increase
2231          * metadata overhead significantly, causing grant calculation to be
2232          * wrong.
2233          *
2234          * Reserve 0.78% of total space, at least 8MB for small filesystems.
2235          */
2236         CLASSERT(OSD_STATFS_RESERVED > LDISKFS_MAX_BLOCK_SIZE);
2237         reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2238         if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2239                 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2240
2241         sfs->os_blocks -= reserved;
2242         sfs->os_bfree  -= min(reserved, sfs->os_bfree);
2243         sfs->os_bavail -= min(reserved, sfs->os_bavail);
2244
2245 out:
2246         if (unlikely(env == NULL))
2247                 OBD_FREE_PTR(ksfs);
2248         return result;
2249 }
2250
2251 /**
2252  * Estimate space needed for file creations. We assume the largest filename
2253  * which is 2^64 - 1, hence a filename of 20 chars.
2254  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2255  */
2256 #ifdef __LDISKFS_DIR_REC_LEN
2257 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2258 #else
2259 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2260 #endif
2261
2262 /*
2263  * Concurrency: doesn't access mutable data.
2264  */
2265 static void osd_conf_get(const struct lu_env *env,
2266                          const struct dt_device *dev,
2267                          struct dt_device_param *param)
2268 {
2269         struct osd_device *d = osd_dt_dev(dev);
2270         struct super_block *sb = osd_sb(d);
2271         struct blk_integrity *bi = bdev_get_integrity(sb->s_bdev);
2272         const char *name;
2273         int ea_overhead;
2274
2275         /*
2276          * XXX should be taken from not-yet-existing fs abstraction layer.
2277          */
2278         param->ddp_max_name_len = LDISKFS_NAME_LEN;
2279         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
2280         param->ddp_symlink_max  = sb->s_blocksize;
2281         param->ddp_mount_type   = LDD_MT_LDISKFS;
2282         if (ldiskfs_has_feature_extents(sb))
2283                 param->ddp_maxbytes = sb->s_maxbytes;
2284         else
2285                 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2286         /*
2287          * inode are statically allocated, so per-inode space consumption
2288          * is the space consumed by the directory entry
2289          */
2290         param->ddp_inodespace     = PER_OBJ_USAGE;
2291         /*
2292          * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2293          * is 128MB) which is unlikely to be hit in real life. Report a smaller
2294          * maximum length to not under-count the actual number of extents
2295          * needed for writing a file if there are sub-optimal block allocations.
2296          */
2297         param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
2298         /* worst-case extent insertion metadata overhead */
2299         param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2300         param->ddp_mntopts = 0;
2301         if (test_opt(sb, XATTR_USER))
2302                 param->ddp_mntopts |= MNTOPT_USERXATTR;
2303         if (test_opt(sb, POSIX_ACL))
2304                 param->ddp_mntopts |= MNTOPT_ACL;
2305
2306         /*
2307          * LOD might calculate the max stripe count based on max_ea_size,
2308          * so we need take account in the overhead as well,
2309          * xattr_header + magic + xattr_entry_head
2310          */
2311         ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2312                       LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2313
2314 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2315         if (ldiskfs_has_feature_ea_inode(sb))
2316                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2317                                                                 ea_overhead;
2318         else
2319 #endif
2320                 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2321
2322         if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
2323                 param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
2324
2325         /*
2326          * Preferred RPC size for efficient disk IO.  4MB shows good
2327          * all-around performance for ldiskfs, but use bigalloc chunk size
2328          * by default if larger.
2329          */
2330 #if defined(LDISKFS_CLUSTER_SIZE)
2331         if (LDISKFS_CLUSTER_SIZE(sb) > DT_DEF_BRW_SIZE)
2332                 param->ddp_brw_size = LDISKFS_CLUSTER_SIZE(sb);
2333         else
2334 #endif
2335                 param->ddp_brw_size = DT_DEF_BRW_SIZE;
2336
2337         param->ddp_t10_cksum_type = 0;
2338         if (bi) {
2339                 unsigned short interval = blk_integrity_interval(bi);
2340                 name = blk_integrity_name(bi);
2341                 /*
2342                  * Expected values:
2343                  * T10-DIF-TYPE1-CRC
2344                  * T10-DIF-TYPE3-CRC
2345                  * T10-DIF-TYPE1-IP
2346                  * T10-DIF-TYPE3-IP
2347                  */
2348                 if (strncmp(name, "T10-DIF-TYPE",
2349                             sizeof("T10-DIF-TYPE") - 1) == 0) {
2350                         /* also skip "1/3-" at end */
2351                         const int type_off = sizeof("T10-DIF-TYPE.");
2352                         char type_number = name[type_off - 2];
2353
2354                         if (interval != 512 && interval != 4096) {
2355                                 CERROR("%s: unsupported T10PI sector size %u\n",
2356                                        d->od_svname, interval);
2357                         } else if (type_number != '1' && type_number != '3') {
2358                                 CERROR("%s: unsupported T10PI type %s\n",
2359                                        d->od_svname, name);
2360                         } else if (strcmp(name + type_off, "CRC") == 0) {
2361                                 d->od_t10_type = type_number == '1' ?
2362                                         OSD_T10_TYPE1_CRC : OSD_T10_TYPE3_CRC;
2363                                 param->ddp_t10_cksum_type = interval == 512 ?
2364                                         OBD_CKSUM_T10CRC512 :
2365                                         OBD_CKSUM_T10CRC4K;
2366                         } else if (strcmp(name + type_off, "IP") == 0) {
2367                                 d->od_t10_type = type_number == '1' ?
2368                                         OSD_T10_TYPE1_IP : OSD_T10_TYPE3_IP;
2369                                 param->ddp_t10_cksum_type = interval == 512 ?
2370                                         OBD_CKSUM_T10IP512 :
2371                                         OBD_CKSUM_T10IP4K;
2372                         } else {
2373                                 CERROR("%s: unsupported checksum type of "
2374                                        "T10PI type '%s'",
2375                                        d->od_svname, name);
2376                         }
2377
2378                 } else {
2379                         CERROR("%s: unsupported T10PI type '%s'",
2380                                d->od_svname, name);
2381                 }
2382         }
2383 }
2384
2385 static struct super_block *osd_mnt_sb_get(const struct dt_device *d)
2386 {
2387         return osd_sb(osd_dt_dev(d));
2388 }
2389
2390 /*
2391  * Concurrency: shouldn't matter.
2392  */
2393 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2394 {
2395         int rc;
2396         struct super_block *s = osd_sb(osd_dt_dev(d));
2397         ENTRY;
2398
2399         down_read(&s->s_umount);
2400         rc = s->s_op->sync_fs(s, 1);
2401         up_read(&s->s_umount);
2402
2403         CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
2404                rc);
2405
2406         return rc;
2407 }
2408
2409 /**
2410  * Start commit for OSD device.
2411  *
2412  * An implementation of dt_commit_async method for OSD device.
2413  * Asychronously starts underlayng fs sync and thereby a transaction
2414  * commit.
2415  *
2416  * \param env environment
2417  * \param d dt device
2418  *
2419  * \see dt_device_operations
2420  */
2421 static int osd_commit_async(const struct lu_env *env,
2422                             struct dt_device *d)
2423 {
2424         struct super_block *s = osd_sb(osd_dt_dev(d));
2425         int rc;
2426
2427         ENTRY;
2428
2429         CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
2430         down_read(&s->s_umount);
2431         rc = s->s_op->sync_fs(s, 0);
2432         up_read(&s->s_umount);
2433
2434         RETURN(rc);
2435 }
2436
2437 /* Our own copy of the set readonly functions if present, or NU if not. */
2438 static int (*priv_dev_set_rdonly)(struct block_device *bdev);
2439 static int (*priv_dev_check_rdonly)(struct block_device *bdev);
2440 /* static int (*priv_dev_clear_rdonly)(struct block_device *bdev); */
2441
2442 /*
2443  * Concurrency: shouldn't matter.
2444  */
2445 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2446 {
2447         struct super_block *sb = osd_sb(osd_dt_dev(d));
2448         struct block_device *dev = sb->s_bdev;
2449         int rc = -EOPNOTSUPP;
2450
2451         ENTRY;
2452
2453         if (priv_dev_set_rdonly) {
2454                 struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
2455
2456                 rc = 0;
2457                 CERROR("*** setting %s read-only ***\n",
2458                        osd_dt_dev(d)->od_svname);
2459
2460                 if (sb->s_op->freeze_fs) {
2461                         rc = sb->s_op->freeze_fs(sb);
2462                         if (rc)
2463                                 goto out;
2464                 }
2465
2466                 if (jdev && (jdev != dev)) {
2467                         CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
2468                                (long)jdev);
2469                         priv_dev_set_rdonly(jdev);
2470                 }
2471                 CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
2472                 priv_dev_set_rdonly(dev);
2473
2474                 if (sb->s_op->unfreeze_fs)
2475                         sb->s_op->unfreeze_fs(sb);
2476         }
2477
2478 out:
2479         if (rc)
2480                 CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2481                        osd_dt_dev(d)->od_svname, (long)dev, rc);
2482
2483         RETURN(rc);
2484 }
2485
2486 /**
2487  * Note: we do not count into QUOTA here.
2488  * If we mount with --data_journal we may need more.
2489  */
2490 const int osd_dto_credits_noquota[DTO_NR] = {
2491         /**
2492          * Insert.
2493          * INDEX_EXTRA_TRANS_BLOCKS(8) +
2494          * SINGLEDATA_TRANS_BLOCKS(8)
2495          * XXX Note: maybe iam need more, since iam have more level than
2496          *           EXT3 htree.
2497          */
2498         [DTO_INDEX_INSERT]  = 16,
2499         /**
2500          * Delete
2501          * just modify a single entry, probably merge few within a block
2502          */
2503         [DTO_INDEX_DELETE]  = 1,
2504         /**
2505          * Used for OI scrub
2506          */
2507         [DTO_INDEX_UPDATE]  = 16,
2508         /**
2509          * 4(inode, inode bits, groups, GDT)
2510          *   notice: OI updates are counted separately with DTO_INDEX_INSERT
2511          */
2512         [DTO_OBJECT_CREATE] = 4,
2513         /**
2514          * 4(inode, inode bits, groups, GDT)
2515          *   notice: OI updates are counted separately with DTO_INDEX_DELETE
2516          */
2517         [DTO_OBJECT_DELETE] = 4,
2518         /**
2519          * Attr set credits (inode)
2520          */
2521         [DTO_ATTR_SET_BASE] = 1,
2522         /**
2523          * Xattr set. The same as xattr of EXT3.
2524          * DATA_TRANS_BLOCKS(14)
2525          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2526          * are also counted in. Do not know why?
2527          */
2528         [DTO_XATTR_SET]     = 14,
2529         /**
2530          * credits for inode change during write.
2531          */
2532         [DTO_WRITE_BASE]    = 3,
2533         /**
2534          * credits for single block write.
2535          */
2536         [DTO_WRITE_BLOCK]   = 14,
2537         /**
2538          * Attr set credits for chown.
2539          * This is extra credits for setattr, and it is null without quota
2540          */
2541         [DTO_ATTR_SET_CHOWN] = 0
2542 };
2543
2544 static const struct dt_device_operations osd_dt_ops = {
2545         .dt_root_get       = osd_root_get,
2546         .dt_statfs         = osd_statfs,
2547         .dt_trans_create   = osd_trans_create,
2548         .dt_trans_start    = osd_trans_start,
2549         .dt_trans_stop     = osd_trans_stop,
2550         .dt_trans_cb_add   = osd_trans_cb_add,
2551         .dt_conf_get       = osd_conf_get,
2552         .dt_mnt_sb_get     = osd_mnt_sb_get,
2553         .dt_sync           = osd_sync,
2554         .dt_ro             = osd_ro,
2555         .dt_commit_async   = osd_commit_async,
2556 };
2557
2558 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
2559                           unsigned int role)
2560 {
2561         struct osd_object *obj = osd_dt_obj(dt);
2562         struct osd_thread_info *oti = osd_oti_get(env);
2563
2564         LINVRNT(osd_invariant(obj));
2565
2566         LASSERT(obj->oo_owner != env);
2567         down_read_nested(&obj->oo_sem, role);
2568
2569         LASSERT(obj->oo_owner == NULL);
2570         oti->oti_r_locks++;
2571 }
2572
2573 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
2574                            unsigned int role)
2575 {
2576         struct osd_object *obj = osd_dt_obj(dt);
2577         struct osd_thread_info *oti = osd_oti_get(env);
2578
2579         LINVRNT(osd_invariant(obj));
2580
2581         LASSERT(obj->oo_owner != env);
2582         down_write_nested(&obj->oo_sem, role);
2583
2584         LASSERT(obj->oo_owner == NULL);
2585         obj->oo_owner = env;
2586         oti->oti_w_locks++;
2587 }
2588
2589 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
2590 {
2591         struct osd_object *obj = osd_dt_obj(dt);
2592         struct osd_thread_info *oti = osd_oti_get(env);
2593
2594         LINVRNT(osd_invariant(obj));
2595
2596         LASSERT(oti->oti_r_locks > 0);
2597         oti->oti_r_locks--;
2598         up_read(&obj->oo_sem);
2599 }
2600
2601 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
2602 {
2603         struct osd_object *obj = osd_dt_obj(dt);
2604         struct osd_thread_info *oti = osd_oti_get(env);
2605
2606         LINVRNT(osd_invariant(obj));
2607
2608         LASSERT(obj->oo_owner == env);
2609         LASSERT(oti->oti_w_locks > 0);
2610         oti->oti_w_locks--;
2611         obj->oo_owner = NULL;
2612         up_write(&obj->oo_sem);
2613 }
2614
2615 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
2616 {
2617         struct osd_object *obj = osd_dt_obj(dt);
2618
2619         LINVRNT(osd_invariant(obj));
2620
2621         return obj->oo_owner == env;
2622 }
2623
2624 static void osd_inode_getattr(const struct lu_env *env,
2625                               struct inode *inode, struct lu_attr *attr)
2626 {
2627         attr->la_valid  |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2628                            LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2629                            LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2630                            LA_BLKSIZE | LA_TYPE;
2631
2632         attr->la_atime = inode->i_atime.tv_sec;
2633         attr->la_mtime = inode->i_mtime.tv_sec;
2634         attr->la_ctime = inode->i_ctime.tv_sec;
2635         attr->la_mode    = inode->i_mode;
2636         attr->la_size    = i_size_read(inode);
2637         attr->la_blocks  = inode->i_blocks;
2638         attr->la_uid     = i_uid_read(inode);
2639         attr->la_gid     = i_gid_read(inode);
2640         attr->la_projid  = i_projid_read(inode);
2641         attr->la_flags   = ll_inode_to_ext_flags(inode->i_flags);
2642         attr->la_nlink   = inode->i_nlink;
2643         attr->la_rdev    = inode->i_rdev;
2644         attr->la_blksize = 1 << inode->i_blkbits;
2645         attr->la_blkbits = inode->i_blkbits;
2646         /*
2647          * Ext4 did not transfer inherit flags from raw inode
2648          * to inode flags, and ext4 internally test raw inode
2649          * @i_flags directly. Instead of patching ext4, we do it here.
2650          */
2651         if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
2652                 attr->la_flags |= LUSTRE_PROJINHERIT_FL;
2653 }
2654
2655 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
2656                         struct lu_attr *attr)
2657 {
2658         struct osd_object *obj = osd_dt_obj(dt);
2659
2660         if (unlikely(!dt_object_exists(dt)))
2661                 return -ENOENT;
2662         if (unlikely(obj->oo_destroyed))
2663                 return -ENOENT;
2664
2665         LASSERT(!dt_object_remote(dt));
2666         LINVRNT(osd_invariant(obj));
2667
2668         spin_lock(&obj->oo_guard);
2669         osd_inode_getattr(env, obj->oo_inode, attr);
2670         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
2671                 attr->la_valid |= LA_FLAGS;
2672                 attr->la_flags |= LUSTRE_ORPHAN_FL;
2673         }
2674         spin_unlock(&obj->oo_guard);
2675
2676         return 0;
2677 }
2678
2679 static int osd_declare_attr_qid(const struct lu_env *env,
2680                                 struct osd_object *obj,
2681                                 struct osd_thandle *oh, long long bspace,
2682                                 qid_t old_id, qid_t new_id, bool enforce,
2683                                 unsigned int type, bool ignore_edquot)
2684 {
2685         int rc;
2686         struct osd_thread_info *info = osd_oti_get(env);
2687         struct lquota_id_info  *qi = &info->oti_qi;
2688
2689         qi->lqi_type = type;
2690         /* inode accounting */
2691         qi->lqi_is_blk = false;
2692
2693         /* one more inode for the new id ... */
2694         qi->lqi_id.qid_uid = new_id;
2695         qi->lqi_space      = 1;
2696         /* Reserve credits for the new id */
2697         rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2698         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2699                 rc = 0;
2700         if (rc)
2701                 RETURN(rc);
2702
2703         /* and one less inode for the current id */
2704         qi->lqi_id.qid_uid = old_id;
2705         qi->lqi_space = -1;
2706         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2707         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2708                 rc = 0;
2709         if (rc)
2710                 RETURN(rc);
2711
2712         /* block accounting */
2713         qi->lqi_is_blk = true;
2714
2715         /* more blocks for the new id ... */
2716         qi->lqi_id.qid_uid = new_id;
2717         qi->lqi_space      = bspace;
2718         /*
2719          * Credits for the new uid has been reserved, re-use "obj"
2720          * to save credit reservation.
2721          */
2722         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2723         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2724                 rc = 0;
2725         if (rc)
2726                 RETURN(rc);
2727
2728         /* and finally less blocks for the current uid */
2729         qi->lqi_id.qid_uid = old_id;
2730         qi->lqi_space      = -bspace;
2731         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2732         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2733                 rc = 0;
2734
2735         RETURN(rc);
2736 }
2737
2738 static int osd_declare_attr_set(const struct lu_env *env,
2739                                 struct dt_object *dt,
2740                                 const struct lu_attr *attr,
2741                                 struct thandle *handle)
2742 {
2743         struct osd_thandle *oh;
2744         struct osd_object *obj;
2745         qid_t uid;
2746         qid_t gid;
2747         long long bspace;
2748         int rc = 0;
2749         bool enforce;
2750
2751         ENTRY;
2752
2753         LASSERT(dt != NULL);
2754         LASSERT(handle != NULL);
2755
2756         obj = osd_dt_obj(dt);
2757         LASSERT(osd_invariant(obj));
2758
2759         oh = container_of0(handle, struct osd_thandle, ot_super);
2760         LASSERT(oh->ot_handle == NULL);
2761
2762         osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2763                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2764
2765         osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2766                              osd_dto_credits_noquota[DTO_XATTR_SET]);
2767
2768         if (attr == NULL || obj->oo_inode == NULL)
2769                 RETURN(rc);
2770
2771         bspace   = obj->oo_inode->i_blocks << 9;
2772         bspace   = toqb(bspace);
2773
2774         /*
2775          * Changing ownership is always preformed by super user, it should not
2776          * fail with EDQUOT unless required explicitly.
2777          *
2778          * We still need to call the osd_declare_qid() to calculate the journal
2779          * credits for updating quota accounting files and to trigger quota
2780          * space adjustment once the operation is completed.
2781          */
2782         if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2783                 bool ignore_edquot = !(attr->la_flags & LUSTRE_SET_SYNC_FL);
2784
2785                 if (!ignore_edquot)
2786                         CDEBUG(D_QUOTA, "%s: enforce quota on UID %u, GID %u"
2787                                "(the quota space is %lld)\n",
2788                                obj->oo_inode->i_sb->s_id, attr->la_uid,
2789                                attr->la_gid, bspace);
2790
2791                 /* USERQUOTA */
2792                 uid = i_uid_read(obj->oo_inode);
2793                 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2794                 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2795                                           attr->la_uid, enforce, USRQUOTA,
2796                                           true);
2797                 if (rc)
2798                         RETURN(rc);
2799
2800                 gid = i_gid_read(obj->oo_inode);
2801                 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2802                 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2803                                           i_gid_read(obj->oo_inode),
2804                                           attr->la_gid, enforce, GRPQUOTA,
2805                                           ignore_edquot);
2806                 if (rc)
2807                         RETURN(rc);
2808
2809         }
2810 #ifdef HAVE_PROJECT_QUOTA
2811         if (attr->la_valid & LA_PROJID) {
2812                 __u32 projid = i_projid_read(obj->oo_inode);
2813
2814                 enforce = (attr->la_valid & LA_PROJID) &&
2815                                         (attr->la_projid != projid);
2816                 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2817                                           (qid_t)projid, (qid_t)attr->la_projid,
2818                                           enforce, PRJQUOTA, true);
2819                 if (rc)
2820                         RETURN(rc);
2821         }
2822 #endif
2823         RETURN(rc);
2824 }
2825
2826 static int osd_inode_setattr(const struct lu_env *env,
2827                              struct inode *inode, const struct lu_attr *attr)
2828 {
2829         __u64 bits = attr->la_valid;
2830
2831         /* Only allow set size for regular file */
2832         if (!S_ISREG(inode->i_mode))
2833                 bits &= ~(LA_SIZE | LA_BLOCKS);
2834
2835         if (bits == 0)
2836                 return 0;
2837
2838         if (bits & LA_ATIME)
2839                 inode->i_atime = osd_inode_time(inode, attr->la_atime);
2840         if (bits & LA_CTIME)
2841                 inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
2842         if (bits & LA_MTIME)
2843                 inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
2844         if (bits & LA_SIZE) {
2845                 spin_lock(&inode->i_lock);
2846                 LDISKFS_I(inode)->i_disksize = attr->la_size;
2847                 i_size_write(inode, attr->la_size);
2848                 spin_unlock(&inode->i_lock);
2849         }
2850
2851         /*
2852          * OSD should not change "i_blocks" which is used by quota.
2853          * "i_blocks" should be changed by ldiskfs only.
2854          */
2855         if (bits & LA_MODE)
2856                 inode->i_mode = (inode->i_mode & S_IFMT) |
2857                                 (attr->la_mode & ~S_IFMT);
2858         if (bits & LA_UID)
2859                 i_uid_write(inode, attr->la_uid);
2860         if (bits & LA_GID)
2861                 i_gid_write(inode, attr->la_gid);
2862         if (bits & LA_PROJID)
2863                 i_projid_write(inode, attr->la_projid);
2864         if (bits & LA_NLINK)
2865                 set_nlink(inode, attr->la_nlink);
2866         if (bits & LA_RDEV)
2867                 inode->i_rdev = attr->la_rdev;
2868
2869         if (bits & LA_FLAGS) {
2870                 /* always keep S_NOCMTIME */
2871                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2872                                  S_NOCMTIME;
2873                 /*
2874                  * Ext4 did not transfer inherit flags from
2875                  * @inode->i_flags to raw inode i_flags when writing
2876                  * flags, we do it explictly here.
2877                  */
2878                 if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
2879                         LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
2880                 else
2881                         LDISKFS_I(inode)->i_flags &= ~LUSTRE_PROJINHERIT_FL;
2882         }
2883         return 0;
2884 }
2885
2886 #ifdef HAVE_PROJECT_QUOTA
2887 static int osd_transfer_project(struct inode *inode, __u32 projid)
2888 {
2889         struct super_block *sb = inode->i_sb;
2890         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2891         int err;
2892         kprojid_t kprojid;
2893         struct ldiskfs_iloc iloc;
2894         struct ldiskfs_inode *raw_inode;
2895         struct dquot *transfer_to[LDISKFS_MAXQUOTAS] = { };
2896
2897         if (!ldiskfs_has_feature_project(sb)) {
2898                 LASSERT(__kprojid_val(LDISKFS_I(inode)->i_projid)
2899                         == LDISKFS_DEF_PROJID);
2900                 if (projid != LDISKFS_DEF_PROJID)
2901                         return -EOPNOTSUPP;
2902                 else
2903                         return 0;
2904         }
2905
2906         if (LDISKFS_INODE_SIZE(sb) <= LDISKFS_GOOD_OLD_INODE_SIZE)
2907                 return -EOPNOTSUPP;
2908
2909         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2910         if (projid_eq(kprojid, LDISKFS_I(inode)->i_projid))
2911                 return 0;
2912
2913         err = ldiskfs_get_inode_loc(inode, &iloc);
2914         if (err)
2915                 return err;
2916
2917         raw_inode = ldiskfs_raw_inode(&iloc);
2918         if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
2919                 err = -EOVERFLOW;
2920                 brelse(iloc.bh);
2921                 return err;
2922         }
2923         brelse(iloc.bh);
2924
2925         dquot_initialize(inode);
2926         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2927         if (transfer_to[PRJQUOTA]) {
2928                 err = __dquot_transfer(inode, transfer_to);
2929                 dqput(transfer_to[PRJQUOTA]);
2930                 if (err)
2931                         return err;
2932         }
2933
2934         return err;
2935 }
2936 #endif
2937
2938 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
2939 {
2940         int rc;
2941
2942         if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
2943             (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
2944                 struct iattr iattr;
2945
2946                 ll_vfs_dq_init(inode);
2947                 iattr.ia_valid = 0;
2948                 if (attr->la_valid & LA_UID)
2949                         iattr.ia_valid |= ATTR_UID;
2950                 if (attr->la_valid & LA_GID)
2951                         iattr.ia_valid |= ATTR_GID;
2952                 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
2953                 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
2954
2955                 rc = ll_vfs_dq_transfer(inode, &iattr);
2956                 if (rc) {
2957                         CERROR("%s: quota transfer failed: rc = %d. Is quota "
2958                                "enforcement enabled on the ldiskfs "
2959                                "filesystem?\n", inode->i_sb->s_id, rc);
2960                         return rc;
2961                 }
2962         }
2963
2964         /* Handle project id transfer here properly */
2965         if (attr->la_valid & LA_PROJID &&
2966             attr->la_projid != i_projid_read(inode)) {
2967 #ifdef HAVE_PROJECT_QUOTA
2968                 rc = osd_transfer_project(inode, attr->la_projid);
2969 #else
2970                 rc = -ENOTSUPP;
2971 #endif
2972                 if (rc) {
2973                         CERROR("%s: quota transfer failed: rc = %d. Is project "
2974                                "enforcement enabled on the ldiskfs "
2975                                "filesystem?\n", inode->i_sb->s_id, rc);
2976                         return rc;
2977                 }
2978         }
2979         return 0;
2980 }
2981
2982 static int osd_attr_set(const struct lu_env *env,
2983                         struct dt_object *dt,
2984                         const struct lu_attr *attr,
2985                         struct thandle *handle)
2986 {
2987         struct osd_object *obj = osd_dt_obj(dt);
2988         struct inode *inode;
2989         int rc;
2990
2991         if (!dt_object_exists(dt))
2992                 return -ENOENT;
2993
2994         LASSERT(handle != NULL);
2995         LASSERT(!dt_object_remote(dt));
2996         LASSERT(osd_invariant(obj));
2997
2998         osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
2999
3000         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) &&
3001             !osd_obj2dev(obj)->od_is_ost) {
3002                 struct osd_thread_info *oti = osd_oti_get(env);
3003                 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
3004                 struct lu_fid *fid1 = &oti->oti_fid;
3005                 struct osd_inode_id *id = &oti->oti_id;
3006                 struct iam_path_descr *ipd;
3007                 struct iam_container *bag;
3008                 struct osd_thandle *oh;
3009                 int rc;
3010
3011                 fid_cpu_to_be(fid1, fid0);
3012                 memset(id, 1, sizeof(*id));
3013                 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
3014                                   fid0)->oi_dir.od_container;
3015                 ipd = osd_idx_ipd_get(env, bag);
3016                 if (unlikely(ipd == NULL))
3017                         RETURN(-ENOMEM);
3018
3019                 oh = container_of0(handle, struct osd_thandle, ot_super);
3020                 rc = iam_update(oh->ot_handle, bag,
3021                                 (const struct iam_key *)fid1,
3022                                 (const struct iam_rec *)id, ipd);
3023                 osd_ipd_put(env, bag, ipd);
3024                 return(rc > 0 ? 0 : rc);
3025         }
3026
3027         inode = obj->oo_inode;
3028
3029         rc = osd_quota_transfer(inode, attr);
3030         if (rc)
3031                 return rc;
3032
3033         spin_lock(&obj->oo_guard);
3034         rc = osd_inode_setattr(env, inode, attr);
3035         spin_unlock(&obj->oo_guard);
3036         if (rc != 0)
3037                 GOTO(out, rc);
3038
3039         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
3040
3041         if (!(attr->la_valid & LA_FLAGS))
3042                 GOTO(out, rc);
3043
3044         /* Let's check if there are extra flags need to be set into LMA */
3045         if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
3046                 struct osd_thread_info *info = osd_oti_get(env);
3047                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
3048
3049                 LASSERT(!obj->oo_pfid_in_lma);
3050
3051                 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
3052                                  &info->oti_ost_attrs);
3053                 if (rc)
3054                         GOTO(out, rc);
3055
3056                 lma->lma_incompat |=
3057                         lustre_to_lma_flags(attr->la_flags);
3058                 lustre_lma_swab(lma);
3059                 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
3060                                      lma, sizeof(*lma), XATTR_REPLACE);
3061                 if (rc != 0) {
3062                         struct osd_device *osd = osd_obj2dev(obj);
3063
3064                         CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
3065                               osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
3066                               lma->lma_incompat, rc);
3067                 } else {
3068                         obj->oo_lma_flags =
3069                                 attr->la_flags & LUSTRE_LMA_FL_MASKS;
3070                 }
3071                 osd_trans_exec_check(env, handle, OSD_OT_XATTR_SET);
3072         }
3073 out:
3074         osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
3075
3076         return rc;
3077 }
3078
3079 static struct dentry *osd_child_dentry_get(const struct lu_env *env,
3080                                            struct osd_object *obj,
3081                                            const char *name, const int namelen)
3082 {
3083         return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
3084 }
3085
3086 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
3087                       umode_t mode, struct dt_allocation_hint *hint,
3088                       struct thandle *th, struct lu_attr *attr)
3089 {
3090         int result;
3091         struct osd_device *osd = osd_obj2dev(obj);
3092         struct osd_thandle *oth;
3093         struct dt_object *parent = NULL;
3094         struct inode *inode;
3095         uid_t owner[2] = {0, 0};
3096
3097         if (attr->la_valid & LA_UID)
3098                 owner[0] = attr->la_uid;
3099         if (attr->la_valid & LA_GID)
3100                 owner[1] = attr->la_gid;
3101
3102         LINVRNT(osd_invariant(obj));
3103         LASSERT(obj->oo_inode == NULL);
3104         LASSERT(obj->oo_hl_head == NULL);
3105
3106         if (S_ISDIR(mode) && ldiskfs_pdo) {
3107                 obj->oo_hl_head =
3108                         ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
3109                 if (obj->oo_hl_head == NULL)
3110                         return -ENOMEM;
3111         }
3112
3113         oth = container_of(th, struct osd_thandle, ot_super);
3114         LASSERT(oth->ot_handle->h_transaction != NULL);
3115
3116         if (hint != NULL && hint->dah_parent != NULL &&
3117             !dt_object_remote(hint->dah_parent))
3118                 parent = hint->dah_parent;
3119
3120         inode = ldiskfs_create_inode(oth->ot_handle,
3121                                      parent ? osd_dt_obj(parent)->oo_inode :
3122                                               osd_sb(osd)->s_root->d_inode,
3123                                      mode, owner);
3124         if (!IS_ERR(inode)) {
3125                 /* Do not update file c/mtime in ldiskfs. */
3126                 inode->i_flags |= S_NOCMTIME;
3127
3128                 /*
3129                  * For new created object, it must be consistent,
3130                  * and it is unnecessary to scrub against it.
3131                  */
3132                 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
3133
3134                 obj->oo_inode = inode;
3135                 result = 0;
3136         } else {
3137                 if (obj->oo_hl_head != NULL) {
3138                         ldiskfs_htree_lock_head_free(obj->oo_hl_head);
3139                         obj->oo_hl_head = NULL;
3140                 }
3141                 result = PTR_ERR(inode);
3142         }
3143         LINVRNT(osd_invariant(obj));
3144         return result;
3145 }
3146
3147 enum {
3148         OSD_NAME_LEN = 255
3149 };
3150
3151 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
3152                      struct lu_attr *attr,
3153                      struct dt_allocation_hint *hint,
3154                      struct dt_object_format *dof,
3155                      struct thandle *th)
3156 {
3157         int result;
3158         struct osd_thandle *oth;
3159         __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX | S_ISGID));
3160
3161         LASSERT(S_ISDIR(attr->la_mode));
3162
3163         oth = container_of(th, struct osd_thandle, ot_super);
3164         LASSERT(oth->ot_handle->h_transaction != NULL);
3165         result = osd_mkfile(info, obj, mode, hint, th, attr);
3166
3167         return result;
3168 }
3169
3170 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
3171                         struct lu_attr *attr,
3172                         struct dt_allocation_hint *hint,
3173                         struct dt_object_format *dof,
3174                         struct thandle *th)
3175 {
3176         int result;
3177         struct osd_thandle *oth;
3178         const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
3179
3180         __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
3181
3182         LASSERT(S_ISREG(attr->la_mode));
3183
3184         oth = container_of(th, struct osd_thandle, ot_super);
3185         LASSERT(oth->ot_handle->h_transaction != NULL);
3186
3187         result = osd_mkfile(info, obj, mode, hint, th, attr);
3188         if (result == 0) {
3189        &