Whamcloud - gitweb
LU-15168 osd: use large allocation for idc cache
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd/osd_handler.c
32  *
33  * Top-level entry points into osd module
34  *
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <linux/fs_struct.h>
42 #include <linux/kallsyms.h>
43 #include <linux/module.h>
44 #include <linux/user_namespace.h>
45 #include <linux/uidgid.h>
46
47 /* prerequisite for linux/xattr.h */
48 #include <linux/types.h>
49 /* prerequisite for linux/xattr.h */
50 #include <linux/fs.h>
51 /* XATTR_{REPLACE,CREATE} */
52 #include <linux/xattr.h>
53
54 #include <ldiskfs/ldiskfs.h>
55 #include <ldiskfs/xattr.h>
56 #include <ldiskfs/ldiskfs_extents.h>
57 #undef ENTRY
58 /*
59  * struct OBD_{ALLOC,FREE}*()
60  * OBD_FAIL_CHECK
61  */
62 #include <obd_support.h>
63 /* struct ptlrpc_thread */
64 #include <lustre_net.h>
65 #include <lustre_fid.h>
66 /* process_config */
67 #include <uapi/linux/lustre/lustre_param.h>
68
69 #include "osd_internal.h"
70 #include "osd_dynlocks.h"
71
72 /* llo_* api support */
73 #include <md_object.h>
74 #include <lustre_quota.h>
75
76 #include <lustre_linkea.h>
77
78 /* encoding routines */
79 #include <lustre_crypto.h>
80
81 /* Maximum EA size is limited by LNET_MTU for remote objects */
82 #define OSD_MAX_EA_SIZE 1048364
83
84 int ldiskfs_pdo = 1;
85 module_param(ldiskfs_pdo, int, 0644);
86 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
87
88 int ldiskfs_track_declares_assert;
89 module_param(ldiskfs_track_declares_assert, int, 0644);
90 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
91
92 /* Slab to allocate dynlocks */
93 struct kmem_cache *dynlock_cachep;
94
95 /* Slab to allocate osd_it_ea */
96 struct kmem_cache *osd_itea_cachep;
97
98 static struct lu_kmem_descr ldiskfs_caches[] = {
99         {
100                 .ckd_cache = &dynlock_cachep,
101                 .ckd_name  = "dynlock_cache",
102                 .ckd_size  = sizeof(struct dynlock_handle)
103         },
104         {
105                 .ckd_cache = &osd_itea_cachep,
106                 .ckd_name  = "osd_itea_cache",
107                 .ckd_size  = sizeof(struct osd_it_ea)
108         },
109         {
110                 .ckd_cache = NULL
111         }
112 };
113
114 static const char dot[] = ".";
115 static const char dotdot[] = "..";
116
117 static const struct lu_object_operations      osd_lu_obj_ops;
118 static const struct dt_object_operations      osd_obj_ops;
119 static const struct dt_object_operations      osd_obj_otable_it_ops;
120 static const struct dt_index_operations       osd_index_iam_ops;
121 static const struct dt_index_operations       osd_index_ea_ops;
122
123 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
124                           const struct lu_fid *fid);
125 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
126                                                 struct osd_device *osd);
127
128 int osd_trans_declare_op2rb[] = {
129         [OSD_OT_ATTR_SET]       = OSD_OT_ATTR_SET,
130         [OSD_OT_PUNCH]          = OSD_OT_MAX,
131         [OSD_OT_XATTR_SET]      = OSD_OT_XATTR_SET,
132         [OSD_OT_CREATE]         = OSD_OT_DESTROY,
133         [OSD_OT_DESTROY]        = OSD_OT_CREATE,
134         [OSD_OT_REF_ADD]        = OSD_OT_REF_DEL,
135         [OSD_OT_REF_DEL]        = OSD_OT_REF_ADD,
136         [OSD_OT_WRITE]          = OSD_OT_WRITE,
137         [OSD_OT_INSERT]         = OSD_OT_DELETE,
138         [OSD_OT_DELETE]         = OSD_OT_INSERT,
139         [OSD_OT_QUOTA]          = OSD_OT_MAX,
140 };
141
142 static int osd_has_index(const struct osd_object *obj)
143 {
144         return obj->oo_dt.do_index_ops != NULL;
145 }
146
147 static int osd_object_invariant(const struct lu_object *l)
148 {
149         return osd_invariant(osd_obj(l));
150 }
151
152 /*
153  * Concurrency: doesn't matter
154  */
155 static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
156 {
157         struct osd_thread_info *oti = osd_oti_get(env);
158
159         return oti->oti_w_locks > 0 && o->oo_owner == env;
160 }
161
162 /*
163  * Concurrency: doesn't access mutable data
164  */
165 static int osd_root_get(const struct lu_env *env,
166                         struct dt_device *dev, struct lu_fid *f)
167 {
168         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
169         return 0;
170 }
171
172 /*
173  * the following set of functions are used to maintain per-thread
174  * cache of FID->ino mapping. this mechanism is needed to resolve
175  * FID to inode at dt_insert() which in turn stores ino in the
176  * directory entries to keep ldiskfs compatible with ext[34].
177  * due to locking-originated restrictions we can't lookup ino
178  * using LU cache (deadlock is possible). lookup using OI is quite
179  * expensive. so instead we maintain this cache and methods like
180  * dt_create() fill it. so in the majority of cases dt_insert() is
181  * able to find needed mapping in lockless manner.
182  */
183 static struct osd_idmap_cache *
184 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
185              const struct lu_fid *fid)
186 {
187         struct osd_thread_info *oti = osd_oti_get(env);
188         struct osd_idmap_cache *idc = oti->oti_ins_cache;
189         int i;
190
191         for (i = 0; i < oti->oti_ins_cache_used; i++) {
192                 if (!lu_fid_eq(&idc[i].oic_fid, fid))
193                         continue;
194                 if (idc[i].oic_dev != osd)
195                         continue;
196
197                 return idc + i;
198         }
199
200         return NULL;
201 }
202
203 static struct osd_idmap_cache *
204 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
205             const struct lu_fid *fid)
206 {
207         struct osd_thread_info *oti   = osd_oti_get(env);
208         struct osd_idmap_cache *idc;
209         int i;
210
211         if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
212                 i = oti->oti_ins_cache_size * 2;
213                 if (i == 0)
214                         i = OSD_INS_CACHE_SIZE;
215                 OBD_ALLOC_PTR_ARRAY_LARGE(idc, i);
216                 if (idc == NULL)
217                         return ERR_PTR(-ENOMEM);
218                 if (oti->oti_ins_cache != NULL) {
219                         memcpy(idc, oti->oti_ins_cache,
220                                oti->oti_ins_cache_used * sizeof(*idc));
221                         OBD_FREE_PTR_ARRAY_LARGE(oti->oti_ins_cache,
222                                            oti->oti_ins_cache_used);
223                 }
224                 oti->oti_ins_cache = idc;
225                 oti->oti_ins_cache_size = i;
226         }
227
228         idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
229         idc->oic_fid = *fid;
230         idc->oic_dev = osd;
231         idc->oic_lid.oii_ino = 0;
232         idc->oic_lid.oii_gen = 0;
233         idc->oic_remote = 0;
234
235         return idc;
236 }
237
238 /*
239  * lookup mapping for the given fid in the cache, initialize a
240  * new one if not found. the initialization checks whether the
241  * object is local or remote. for local objects, OI is used to
242  * learn ino/generation. the function is used when the caller
243  * has no information about the object, e.g. at dt_insert().
244  */
245 static struct osd_idmap_cache *
246 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
247                      const struct lu_fid *fid)
248 {
249         struct osd_idmap_cache *idc;
250         int rc;
251
252         idc = osd_idc_find(env, osd, fid);
253         LASSERT(!IS_ERR(idc));
254         if (idc != NULL)
255                 return idc;
256
257         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
258                osd->od_svname, PFID(fid));
259
260         /* new mapping is needed */
261         idc = osd_idc_add(env, osd, fid);
262         if (IS_ERR(idc)) {
263                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
264                        osd->od_svname, PFID(fid), PTR_ERR(idc));
265                 return idc;
266         }
267
268         /* initialize it */
269         rc = osd_remote_fid(env, osd, fid);
270         if (unlikely(rc < 0))
271                 return ERR_PTR(rc);
272
273         if (rc == 0) {
274                 /* the object is local, lookup in OI */
275                 /* XXX: probably cheaper to lookup in LU first? */
276                 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
277                                    &idc->oic_lid, 0);
278                 if (unlikely(rc < 0)) {
279                         CERROR("can't lookup: rc = %d\n", rc);
280                         return ERR_PTR(rc);
281                 }
282         } else {
283                 /* the object is remote */
284                 idc->oic_remote = 1;
285         }
286
287         return idc;
288 }
289
290 static void osd_idc_dump_lma(const struct lu_env *env,
291                                 struct osd_device *osd,
292                                 unsigned long ino,
293                                 bool check_in_oi)
294 {
295         struct osd_thread_info *info = osd_oti_get(env);
296         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
297         const struct lu_fid *fid;
298         struct osd_inode_id lid;
299         struct inode *inode;
300         int rc;
301
302         inode = osd_ldiskfs_iget(osd_sb(osd), ino);
303         if (IS_ERR(inode)) {
304                 CERROR("%s: can't get inode %lu: rc = %d\n",
305                        osd->od_svname, ino, (int)PTR_ERR(inode));
306                 return;
307         }
308         if (is_bad_inode(inode)) {
309                 CERROR("%s: bad inode %lu\n", osd->od_svname, ino);
310                 goto put;
311         }
312         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
313         if (rc) {
314                 CERROR("%s: can't get LMA for %lu: rc = %d\n",
315                        osd->od_svname, ino, rc);
316                 goto put;
317         }
318         fid = &loa->loa_lma.lma_self_fid;
319         LCONSOLE(D_INFO, "%s: "DFID" in inode %lu/%u\n", osd->od_svname,
320                       PFID(fid), ino, (unsigned)inode->i_generation);
321         if (!check_in_oi)
322                 goto put;
323         rc = osd_oi_lookup(osd_oti_get(env), osd, fid, &lid, 0);
324         if (rc) {
325                 CERROR("%s: can't lookup "DFID": rc = %d\n",
326                        osd->od_svname, PFID(fid), rc);
327                 goto put;
328         }
329         LCONSOLE(D_INFO, "%s: "DFID" maps to %u/%u\n", osd->od_svname,
330                       PFID(fid), lid.oii_ino, lid.oii_gen);
331 put:
332         iput(inode);
333 }
334
335 static void osd_idc_dump_debug(const struct lu_env *env,
336                                 struct osd_device *osd,
337                                 const struct lu_fid *fid,
338                                 unsigned long ino1,
339                                 unsigned long ino2)
340 {
341         struct osd_inode_id lid;
342
343         int rc;
344
345         rc = osd_oi_lookup(osd_oti_get(env), osd, fid, &lid, 0);
346         if (!rc) {
347                 LCONSOLE(D_INFO, "%s: "DFID" maps to %u/%u\n",
348                         osd->od_svname, PFID(fid), lid.oii_ino, lid.oii_gen);
349                 osd_idc_dump_lma(env, osd, lid.oii_ino, false);
350         } else {
351                 CERROR("%s: can't lookup "DFID": rc = %d\n",
352                        osd->od_svname, PFID(fid), rc);
353         }
354         if (ino1)
355                 osd_idc_dump_lma(env, osd, ino1, true);
356         if (ino2)
357                 osd_idc_dump_lma(env, osd, ino2, true);
358 }
359
360 /*
361  * lookup mapping for given FID and fill it from the given object.
362  * the object is lolcal by definition.
363  */
364 static int osd_idc_find_and_init(const struct lu_env *env,
365                                  struct osd_device *osd,
366                                  struct osd_object *obj)
367 {
368         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
369         struct osd_idmap_cache *idc;
370
371         idc = osd_idc_find(env, osd, fid);
372         LASSERT(!IS_ERR(idc));
373         if (idc != NULL) {
374                 if (obj->oo_inode == NULL)
375                         return 0;
376                 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
377                         if (idc->oic_lid.oii_ino) {
378                                 osd_idc_dump_debug(env, osd, fid,
379                                                    idc->oic_lid.oii_ino,
380                                                    obj->oo_inode->i_ino);
381                                 return -EINVAL;
382                         }
383                         idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
384                         idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
385                 }
386                 return 0;
387         }
388
389         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
390                osd->od_svname, PFID(fid));
391
392         /* new mapping is needed */
393         idc = osd_idc_add(env, osd, fid);
394         if (IS_ERR(idc)) {
395                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
396                        osd->od_svname, PFID(fid), PTR_ERR(idc));
397                 return PTR_ERR(idc);
398         }
399
400         if (obj->oo_inode != NULL) {
401                 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
402                 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
403         }
404         return 0;
405 }
406
407 /*
408  * OSD object methods.
409  */
410
411 /*
412  * Concurrency: no concurrent access is possible that early in object
413  * life-cycle.
414  */
415 static struct lu_object *osd_object_alloc(const struct lu_env *env,
416                                           const struct lu_object_header *hdr,
417                                           struct lu_device *d)
418 {
419         struct osd_object *mo;
420
421         OBD_ALLOC_PTR(mo);
422         if (mo != NULL) {
423                 struct lu_object *l;
424                 struct lu_object_header *h;
425                 struct osd_device *o = osd_dev(d);
426
427                 l = &mo->oo_dt.do_lu;
428                 if (unlikely(o->od_in_init)) {
429                         OBD_ALLOC_PTR(h);
430                         if (!h) {
431                                 OBD_FREE_PTR(mo);
432                                 return NULL;
433                         }
434
435                         lu_object_header_init(h);
436                         lu_object_init(l, h, d);
437                         lu_object_add_top(h, l);
438                         mo->oo_header = h;
439                 } else {
440                         dt_object_init(&mo->oo_dt, NULL, d);
441                         mo->oo_header = NULL;
442                 }
443
444                 mo->oo_dt.do_ops = &osd_obj_ops;
445                 l->lo_ops = &osd_lu_obj_ops;
446                 init_rwsem(&mo->oo_sem);
447                 init_rwsem(&mo->oo_ext_idx_sem);
448                 spin_lock_init(&mo->oo_guard);
449                 INIT_LIST_HEAD(&mo->oo_xattr_list);
450                 return l;
451         }
452         return NULL;
453 }
454
455 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
456                 struct dentry *dentry, struct lustre_ost_attrs *loa)
457 {
458         int rc;
459
460         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
461                              (void *)loa, sizeof(*loa));
462         if (rc > 0) {
463                 struct lustre_mdt_attrs *lma = &loa->loa_lma;
464
465                 if (rc < sizeof(*lma))
466                         return -EINVAL;
467
468                 rc = 0;
469                 lustre_loa_swab(loa, true);
470                 /* Check LMA compatibility */
471                 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
472                         rc = -EOPNOTSUPP;
473                         CWARN("%s: unsupported incompat LMA feature(s) %#x for fid = "DFID", ino = %lu: rc = %d\n",
474                               osd_ino2name(inode),
475                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
476                               PFID(&lma->lma_self_fid), inode->i_ino, rc);
477                 }
478         } else if (rc == 0) {
479                 rc = -ENODATA;
480         }
481
482         return rc;
483 }
484
485 /*
486  * retrieve object from backend ext fs.
487  **/
488 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
489                        struct osd_inode_id *id)
490 {
491         int rc;
492         struct inode *inode = NULL;
493
494         /*
495          * if we look for an inode withing a running
496          * transaction, then we risk to deadlock
497          * osd_dirent_check_repair() breaks this
498          */
499          /* LASSERT(current->journal_info == NULL); */
500
501         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
502         if (IS_ERR(inode)) {
503                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
504                        id->oii_ino, PTR_ERR(inode));
505         } else if (id->oii_gen != OSD_OII_NOGEN &&
506                    inode->i_generation != id->oii_gen) {
507                 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
508                        "i_generation = %u\n",
509                        id->oii_ino, id->oii_gen, inode->i_generation);
510                 iput(inode);
511                 inode = ERR_PTR(-ESTALE);
512         } else if (inode->i_nlink == 0) {
513                 /*
514                  * due to parallel readdir and unlink,
515                  * we can have dead inode here.
516                  */
517                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
518                 iput(inode);
519                 inode = ERR_PTR(-ESTALE);
520         } else if (is_bad_inode(inode)) {
521                 rc = -ENOENT;
522                 CWARN("%s: bad inode: ino = %u: rc = %d\n",
523                       osd_dev2name(dev), id->oii_ino, rc);
524                 iput(inode);
525                 inode = ERR_PTR(rc);
526         } else if ((rc = osd_attach_jinode(inode))) {
527                 iput(inode);
528                 inode = ERR_PTR(rc);
529         } else {
530                 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
531                 if (id->oii_gen == OSD_OII_NOGEN)
532                         osd_id_gen(id, inode->i_ino, inode->i_generation);
533
534                 /*
535                  * Do not update file c/mtime in ldiskfs.
536                  * NB: we don't have any lock to protect this because we don't
537                  * have reference on osd_object now, but contention with
538                  * another lookup + attr_set can't happen in the tiny window
539                  * between if (...) and set S_NOCMTIME.
540                  */
541                 if (!(inode->i_flags & S_NOCMTIME))
542                         inode->i_flags |= S_NOCMTIME;
543         }
544         return inode;
545 }
546
547 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
548                           handle_t *handle, struct dentry *child,
549                           struct inode *inode, struct htree_lock *hlock)
550 {
551         int rc, rc2;
552
553         rc = __ldiskfs_add_entry(handle, child, inode, hlock);
554         if (rc == -ENOBUFS || rc == -ENOSPC) {
555                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
556                 struct inode *parent = child->d_parent->d_inode;
557                 struct lu_fid *fid = NULL;
558                 char fidstr[FID_LEN + 1] = "unknown";
559
560                 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
561                 if (!rc2) {
562                         fid = &loa->loa_lma.lma_self_fid;
563                 } else if (rc2 == -ENODATA) {
564                         if (unlikely(is_root_inode(parent))) {
565                                 fid = &info->oti_fid3;
566                                 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
567                         } else if (!osd->od_is_ost && osd->od_index == 0) {
568                                 fid = &info->oti_fid3;
569                                 lu_igif_build(fid, parent->i_ino,
570                                               parent->i_generation);
571                         }
572                 }
573
574                 if (fid != NULL)
575                         snprintf(fidstr, sizeof(fidstr), DFID, PFID(fid));
576
577                 /* below message is checked in sanity.sh test_129 */
578                 if (rc == -ENOSPC) {
579                         CWARN("%s: directory (inode: %lu, FID: %s) has reached max size limit\n",
580                               osd_name(osd), parent->i_ino, fidstr);
581                 } else {
582                         rc = 0; /* ignore such error now */
583                         CWARN("%s: directory (inode: %lu, FID: %s) is approaching max size limit\n",
584                               osd_name(osd), parent->i_ino, fidstr);
585                 }
586
587         }
588
589         return rc;
590 }
591
592
593 struct inode *
594 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
595              struct osd_inode_id *id, struct lu_fid *fid)
596 {
597         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
598         struct inode *inode;
599         int rc;
600
601         inode = osd_iget(info, dev, id);
602         if (IS_ERR(inode))
603                 return inode;
604
605         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
606         if (!rc) {
607                 *fid = loa->loa_lma.lma_self_fid;
608         } else if (rc == -ENODATA) {
609                 if (unlikely(is_root_inode(inode)))
610                         lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
611                 else
612                         lu_igif_build(fid, inode->i_ino, inode->i_generation);
613         } else {
614                 iput(inode);
615                 inode = ERR_PTR(rc);
616         }
617         return inode;
618 }
619
620 static struct inode *osd_iget_check(struct osd_thread_info *info,
621                                     struct osd_device *dev,
622                                     const struct lu_fid *fid,
623                                     struct osd_inode_id *id,
624                                     bool trusted)
625 {
626         struct inode *inode;
627         int rc = 0;
628
629         ENTRY;
630
631         /*
632          * The cached OI mapping is trustable. If we cannot locate the inode
633          * via the cached OI mapping, then return the failure to the caller
634          * directly without further OI checking.
635          */
636
637 again:
638         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
639         if (IS_ERR(inode)) {
640                 rc = PTR_ERR(inode);
641                 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
642                         goto check_oi;
643
644                 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
645                        PFID(fid), id->oii_ino, rc);
646                 GOTO(put, rc);
647         }
648
649         if (is_bad_inode(inode)) {
650                 rc = -ENOENT;
651                 if (!trusted)
652                         goto check_oi;
653
654                 CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
655                        PFID(fid), id->oii_ino);
656                 GOTO(put, rc);
657         }
658
659         if (id->oii_gen != OSD_OII_NOGEN &&
660             inode->i_generation != id->oii_gen) {
661                 rc = -ESTALE;
662                 if (!trusted)
663                         goto check_oi;
664
665                 CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
666                        "oii_gen = %u, i_generation = %u\n", PFID(fid),
667                        id->oii_ino, id->oii_gen, inode->i_generation);
668                 GOTO(put, rc);
669         }
670
671         if (inode->i_nlink == 0) {
672                 rc = -ENOENT;
673                 if (!trusted)
674                         goto check_oi;
675
676                 CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
677                        PFID(fid), id->oii_ino);
678                 GOTO(put, rc);
679         }
680
681         ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
682
683 check_oi:
684         if (rc != 0) {
685                 __u32 saved_ino = id->oii_ino;
686                 __u32 saved_gen = id->oii_gen;
687
688                 LASSERT(!trusted);
689                 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
690
691                 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
692                 /*
693                  * XXX: There are four possible cases:
694                  *      1. rc = 0.
695                  *         Backup/restore caused the OI invalid.
696                  *      2. rc = 0.
697                  *         Someone unlinked the object but NOT removed
698                  *         the OI mapping, such as mount target device
699                  *         as ldiskfs, and modify something directly.
700                  *      3. rc = -ENOENT.
701                  *         Someone just removed the object between the
702                  *         former oi_lookup and the iget. It is normal.
703                  *      4. Other failure cases.
704                  *
705                  *      Generally, when the device is mounted, it will
706                  *      auto check whether the system is restored from
707                  *      file-level backup or not. We trust such detect
708                  *      to distinguish the 1st case from the 2nd case:
709                  *      if the OI files are consistent but may contain
710                  *      stale OI mappings because of case 2, if iget()
711                  *      returns -ENOENT or -ESTALE, then it should be
712                  *      the case 2.
713                  */
714                 if (rc != 0)
715                         /*
716                          * If the OI mapping was in OI file before the
717                          * osd_iget_check(), but now, it is disappear,
718                          * then it must be removed by race. That is a
719                          * normal race case.
720                          */
721                         GOTO(put, rc);
722
723                 /*
724                  * It is the OI scrub updated the OI mapping by race.
725                  * The new OI mapping must be valid.
726                  */
727                 if (saved_ino != id->oii_ino ||
728                     (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
729                         if (!IS_ERR(inode))
730                                 iput(inode);
731
732                         trusted = true;
733                         goto again;
734                 }
735
736                 if (IS_ERR(inode)) {
737                         if (dev->od_scrub.os_scrub.os_file.sf_flags &
738                             SF_INCONSISTENT)
739                                 /*
740                                  * It still can be the case 2, but we cannot
741                                  * distinguish it from the case 1. So return
742                                  * -EREMCHG to block current operation until
743                                  *  OI scrub rebuilt the OI mappings.
744                                  */
745                                 rc = -EREMCHG;
746                         else
747                                 rc = -ENOENT;
748
749                         GOTO(put, rc);
750                 }
751
752                 if (inode->i_generation == id->oii_gen)
753                         rc = -ENOENT;
754                 else
755                         rc = -EREMCHG;
756         } else {
757                 if (id->oii_gen == OSD_OII_NOGEN)
758                         osd_id_gen(id, inode->i_ino, inode->i_generation);
759
760                 /*
761                  * Do not update file c/mtime in ldiskfs.
762                  * NB: we don't have any lock to protect this because we don't
763                  * have reference on osd_object now, but contention with
764                  * another lookup + attr_set can't happen in the tiny window
765                  * between if (...) and set S_NOCMTIME.
766                  */
767                 if (!(inode->i_flags & S_NOCMTIME))
768                         inode->i_flags |= S_NOCMTIME;
769         }
770
771         GOTO(put, rc);
772
773 put:
774         if (rc != 0) {
775                 if (!IS_ERR(inode))
776                         iput(inode);
777
778                 inode = ERR_PTR(rc);
779         }
780
781         return inode;
782 }
783
784 /**
785  * \retval +v: new filter_fid does not contain self-fid
786  * \retval 0:  filter_fid_18_23, contains self-fid
787  * \retval -v: other failure cases
788  */
789 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
790                  struct dentry *dentry, struct lu_fid *fid)
791 {
792         struct filter_fid *ff = &info->oti_ff;
793         struct ost_id *ostid = &info->oti_ostid;
794         int rc;
795
796         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
797         if (rc == sizeof(struct filter_fid_18_23)) {
798                 struct filter_fid_18_23 *ff_old = (void *)ff;
799
800                 ostid_set_seq(ostid, le64_to_cpu(ff_old->ff_seq));
801                 rc = ostid_set_id(ostid, le64_to_cpu(ff_old->ff_objid));
802                 /*
803                  * XXX: use 0 as the index for compatibility, the caller will
804                  * handle index related issues when necessary.
805                  */
806                 if (!rc)
807                         ostid_to_fid(fid, ostid, 0);
808         } else if (rc >= (int)sizeof(struct filter_fid_24_29)) {
809                 rc = 1;
810         } else if (rc >= 0) {
811                 rc = -EINVAL;
812         }
813
814         return rc;
815 }
816
817 static int osd_lma_self_repair(struct osd_thread_info *info,
818                                struct osd_device *osd, struct inode *inode,
819                                const struct lu_fid *fid, __u32 compat)
820 {
821         handle_t *jh;
822         int rc;
823
824         LASSERT(current->journal_info == NULL);
825
826         jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
827                                   osd_dto_credits_noquota[DTO_XATTR_SET]);
828         if (IS_ERR(jh)) {
829                 rc = PTR_ERR(jh);
830                 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
831                       osd_name(osd), rc);
832                 return rc;
833         }
834
835         rc = osd_ea_fid_set(info, inode, fid, compat, 0);
836         if (rc != 0)
837                 CWARN("%s: cannot self repair the LMA: rc = %d\n",
838                       osd_name(osd), rc);
839         ldiskfs_journal_stop(jh);
840         return rc;
841 }
842
843 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
844 {
845         struct osd_thread_info *info = osd_oti_get(env);
846         struct osd_device *osd = osd_obj2dev(obj);
847         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
848         struct lustre_mdt_attrs *lma = &loa->loa_lma;
849         struct inode *inode = obj->oo_inode;
850         struct dentry *dentry = &info->oti_obj_dentry;
851         struct lu_fid *fid = NULL;
852         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
853         int rc;
854
855         ENTRY;
856
857         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
858                              (void *)loa, sizeof(*loa));
859         if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
860                 fid = &lma->lma_self_fid;
861                 rc = osd_get_idif(info, inode, dentry, fid);
862                 if (rc > 0 || (rc == -ENODATA && osd->od_index_in_idif)) {
863                         /*
864                          * For the given OST-object, if it has neither LMA nor
865                          * FID in XATTR_NAME_FID, then the given FID (which is
866                          * contained in the @obj, from client RPC for locating
867                          * the OST-object) is trusted. We use it to generate
868                          * the LMA.
869                          */
870                         osd_lma_self_repair(info, osd, inode, rfid,
871                                             LMAC_FID_ON_OST);
872                         RETURN(0);
873                 }
874         }
875
876         if (rc < 0)
877                 RETURN(rc);
878
879         if (rc > 0) {
880                 rc = 0;
881                 lustre_lma_swab(lma);
882                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
883                              (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
884                               S_ISREG(inode->i_mode)))) {
885                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
886                               "fid = "DFID", ino = %lu\n", osd_name(osd),
887                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
888                               PFID(rfid), inode->i_ino);
889                         rc = -EOPNOTSUPP;
890                 } else {
891                         fid = &lma->lma_self_fid;
892                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
893                             osd->od_is_ost)
894                                 obj->oo_pfid_in_lma = 1;
895                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
896                             !osd->od_is_ost)
897                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
898                 }
899         }
900
901         if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
902                 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
903                         struct ost_id   *oi   = &info->oti_ostid;
904                         struct lu_fid   *fid1 = &info->oti_fid3;
905                         __u32            idx  = fid_idif_ost_idx(rfid);
906
907                         /*
908                          * For old IDIF, the OST index is not part of the IDIF,
909                          * Means that different OSTs may have the same IDIFs.
910                          * Under such case, we need to make some compatible
911                          * check to make sure to trigger OI scrub properly.
912                          */
913                         if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
914                                 /* Given @rfid is new, LMA is old. */
915                                 fid_to_ostid(fid, oi);
916                                 ostid_to_fid(fid1, oi, idx);
917                                 if (lu_fid_eq(fid1, rfid)) {
918                                         if (osd->od_index_in_idif)
919                                                 osd_lma_self_repair(info, osd,
920                                                         inode, rfid,
921                                                         LMAC_FID_ON_OST);
922                                         RETURN(0);
923                                 }
924                         }
925                 }
926
927                 rc = -EREMCHG;
928         }
929
930         RETURN(rc);
931 }
932
933 struct osd_check_lmv_buf {
934         /* please keep it as first member */
935         struct dir_context ctx;
936         struct osd_thread_info *oclb_info;
937         struct osd_device *oclb_dev;
938         int oclb_items;
939         bool oclb_found;
940 };
941
942 /**
943  * It is called internally by ->iterate*() to filter out the
944  * local slave object's FID of the striped directory.
945  *
946  * \retval      1 found the local slave's FID
947  * \retval      0 continue to check next item
948  * \retval      -ve for failure
949  */
950 #ifdef HAVE_FILLDIR_USE_CTX
951 static int osd_stripe_dir_filldir(struct dir_context *buf,
952 #else
953 static int osd_stripe_dir_filldir(void *buf,
954 #endif
955                                   const char *name, int namelen,
956                                   loff_t offset, __u64 ino, unsigned int d_type)
957 {
958         struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
959         struct osd_thread_info *oti = oclb->oclb_info;
960         struct lu_fid *fid = &oti->oti_fid3;
961         struct osd_inode_id *id = &oti->oti_id3;
962         struct osd_device *dev = oclb->oclb_dev;
963         struct inode *inode;
964
965         oclb->oclb_items++;
966
967         if (name[0] == '.')
968                 return 0;
969
970         fid_zero(fid);
971         sscanf(name + 1, SFID, RFID(fid));
972         if (!fid_is_sane(fid))
973                 return 0;
974
975         if (osd_remote_fid(oti->oti_env, dev, fid))
976                 return 0;
977
978         osd_id_gen(id, ino, OSD_OII_NOGEN);
979         inode = osd_iget(oti, dev, id);
980         if (IS_ERR(inode))
981                 return PTR_ERR(inode);
982
983         iput(inode);
984         osd_add_oi_cache(oti, dev, id, fid);
985         osd_oii_insert(dev, fid, id, true);
986         oclb->oclb_found = true;
987
988         return 1;
989 }
990
991 /*
992  * When lookup item under striped directory, we need to locate the master
993  * MDT-object of the striped directory firstly, then the client will send
994  * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
995  * and the item's name. If the system is restored from MDT file level backup,
996  * then before the OI scrub completely built the OI files, the OI mappings of
997  * the master MDT-object and slave MDT-object may be invalid. Usually, it is
998  * not a problem for the master MDT-object. Because when locate the master
999  * MDT-object, we will do name based lookup (for the striped directory itself)
1000  * firstly, during such process we can setup the correct OI mapping for the
1001  * master MDT-object. But it will be trouble for the slave MDT-object. Because
1002  * the client will not trigger name based lookup on the MDT to locate the slave
1003  * MDT-object before locating item under the striped directory, then when
1004  * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
1005  * is invalid and does not know what the right OI mapping is, then the MDT has
1006  * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
1007  * the OI file, related OI mapping is unknown yet, please try again later. And
1008  * then client will re-try the RPC again and again until related OI mapping has
1009  * been updated. That is quite inefficient.
1010  *
1011  * To resolve above trouble, we will handle it as the following two cases:
1012  *
1013  * 1) The slave MDT-object and the master MDT-object are on different MDTs.
1014  *    It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
1015  *    is linked under /REMOTE_PARENT_DIR with the name of its FID string.
1016  *    We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
1017  *    directly. Please check osd_fid_lookup().
1018  *
1019  * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
1020  *    Under such case, during lookup the master MDT-object, we will lookup the
1021  *    slave MDT-object via readdir against the master MDT-object, because the
1022  *    slave MDT-objects information are stored as sub-directories with the name
1023  *    "${FID}:${index}". Then when find the local slave MDT-object, its OI
1024  *    mapping will be recorded. Then subsequent osd_fid_lookup() will know
1025  *    the correct OI mapping for the slave MDT-object.
1026  */
1027 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
1028                          struct inode *inode)
1029 {
1030         struct lu_buf *buf = &oti->oti_big_buf;
1031         struct dentry *dentry = &oti->oti_obj_dentry;
1032         struct file *filp;
1033         struct lmv_mds_md_v1 *lmv1;
1034         struct osd_check_lmv_buf oclb = {
1035                 .ctx.actor = osd_stripe_dir_filldir,
1036                 .oclb_info = oti,
1037                 .oclb_dev = dev,
1038                 .oclb_found = false,
1039         };
1040         int rc = 0;
1041
1042         ENTRY;
1043
1044 again:
1045         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
1046                              buf->lb_len);
1047         if (rc == -ERANGE) {
1048                 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
1049                 if (rc > 0) {
1050                         lu_buf_realloc(buf, rc);
1051                         if (buf->lb_buf == NULL)
1052                                 GOTO(out, rc = -ENOMEM);
1053
1054                         goto again;
1055                 }
1056         }
1057
1058         if (unlikely(rc == 0 || rc == -ENODATA))
1059                 GOTO(out, rc = 0);
1060
1061         if (rc < 0)
1062                 GOTO(out, rc);
1063
1064         if (unlikely(buf->lb_buf == NULL)) {
1065                 lu_buf_realloc(buf, rc);
1066                 if (buf->lb_buf == NULL)
1067                         GOTO(out, rc = -ENOMEM);
1068
1069                 goto again;
1070         }
1071
1072         lmv1 = buf->lb_buf;
1073         if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
1074                 GOTO(out, rc = 0);
1075
1076         filp = osd_quasi_file(oti->oti_env, inode);
1077         rc = osd_security_file_alloc(filp);
1078         if (rc)
1079                 goto out;
1080
1081         do {
1082                 oclb.oclb_items = 0;
1083                 rc = iterate_dir(filp, &oclb.ctx);
1084         } while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
1085                  filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
1086         inode->i_fop->release(inode, filp);
1087
1088 out:
1089         if (rc < 0)
1090                 CDEBUG(D_LFSCK,
1091                        "%s: cannot check LMV, ino = %lu/%u: rc = %d\n",
1092                        osd_ino2name(inode), inode->i_ino, inode->i_generation,
1093                        rc);
1094         else
1095                 rc = 0;
1096
1097         RETURN(rc);
1098 }
1099
1100 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
1101                           const struct lu_fid *fid,
1102                           const struct lu_object_conf *conf)
1103 {
1104         struct osd_thread_info *info;
1105         struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1106         struct osd_device *dev;
1107         struct osd_idmap_cache *oic;
1108         struct osd_inode_id *id;
1109         struct inode *inode = NULL;
1110         struct lustre_scrub *scrub;
1111         struct scrub_file *sf;
1112         __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
1113         __u32 saved_ino;
1114         __u32 saved_gen;
1115         int result = 0;
1116         int rc1 = 0;
1117         bool remote = false;
1118         bool trusted = true;
1119         bool updated = false;
1120         bool checked = false;
1121
1122         ENTRY;
1123
1124         LINVRNT(osd_invariant(obj));
1125         LASSERT(obj->oo_inode == NULL);
1126
1127         if (fid_is_sane(fid) == 0) {
1128                 CERROR("%s: invalid FID "DFID"\n", ldev->ld_obd->obd_name,
1129                        PFID(fid));
1130                 dump_stack();
1131                 RETURN(-EINVAL);
1132         }
1133
1134         dev = osd_dev(ldev);
1135         scrub = &dev->od_scrub.os_scrub;
1136         sf = &scrub->os_file;
1137         info = osd_oti_get(env);
1138         LASSERT(info);
1139         oic = &info->oti_cache;
1140
1141         if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1142                 RETURN(-ENOENT);
1143
1144         /*
1145          * For the object is created as locking anchor, or for the object to
1146          * be created on disk. No need to osd_oi_lookup() at here because FID
1147          * shouldn't never be re-used, if it's really a duplicate FID from
1148          * unexpected reason, we should be able to detect it later by calling
1149          * do_create->osd_oi_insert().
1150          */
1151         if (conf && conf->loc_flags & LOC_F_NEW)
1152                 GOTO(out, result = 0);
1153
1154         /* Search order: 1. per-thread cache. */
1155         if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
1156                 id = &oic->oic_lid;
1157                 goto iget;
1158         }
1159
1160         id = &info->oti_id;
1161         memset(id, 0, sizeof(struct osd_inode_id));
1162         if (!list_empty(&scrub->os_inconsistent_items)) {
1163                 /* Search order: 2. OI scrub pending list. */
1164                 result = osd_oii_lookup(dev, fid, id);
1165                 if (!result)
1166                         goto iget;
1167         }
1168
1169         /*
1170          * The OI mapping in the OI file can be updated by the OI scrub
1171          * when we locate the inode via FID. So it may be not trustable.
1172          */
1173         trusted = false;
1174
1175         /* Search order: 3. OI files. */
1176         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1177         if (result == -ENOENT) {
1178                 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1179                     fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1180                     !ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
1181                                       sf->sf_oi_bitmap))
1182                         GOTO(out, result = 0);
1183
1184                 goto trigger;
1185         }
1186
1187         /* -ESTALE is returned if inode of OST object doesn't exist */
1188         if (result == -ESTALE &&
1189             fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1190                 GOTO(out, result = 0);
1191         }
1192
1193         if (result)
1194                 GOTO(out, result);
1195
1196 iget:
1197         obj->oo_inode = NULL;
1198         /* for later passes through checks, not true on first pass */
1199         if (!IS_ERR_OR_NULL(inode))
1200                 iput(inode);
1201
1202         inode = osd_iget_check(info, dev, fid, id, trusted);
1203         if (!IS_ERR(inode)) {
1204                 obj->oo_inode = inode;
1205                 result = 0;
1206                 if (remote)
1207                         goto trigger;
1208
1209                 goto check_lma;
1210         }
1211
1212         result = PTR_ERR(inode);
1213         if (result == -ENOENT || result == -ESTALE)
1214                 GOTO(out, result = 0);
1215
1216         if (result != -EREMCHG)
1217                 GOTO(out, result);
1218
1219 trigger:
1220         /*
1221          * We still have chance to get the valid inode: for the
1222          * object which is referenced by remote name entry, the
1223          * object on the local MDT will be linked under the dir
1224          * of "/REMOTE_PARENT_DIR" with its FID string as name.
1225          *
1226          * We do not know whether the object for the given FID
1227          * is referenced by some remote name entry or not, and
1228          * especially for DNE II, a multiple-linked object may
1229          * have many name entries reside on many MDTs.
1230          *
1231          * To simplify the operation, OSD will not distinguish
1232          * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1233          * only happened for the RPC from other MDT during the
1234          * OI scrub, or for the client side RPC with FID only,
1235          * such as FID to path, or from old connected client.
1236          */
1237         if (!remote) {
1238                 rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
1239                 if (!rc1) {
1240                         remote = true;
1241                         trusted = true;
1242                         flags |= SS_AUTO_PARTIAL;
1243                         flags &= ~SS_AUTO_FULL;
1244                         goto iget;
1245                 }
1246         }
1247
1248         if (scrub->os_running) {
1249                 if (scrub->os_partial_scan && !scrub->os_in_join)
1250                         goto join;
1251
1252                 if (IS_ERR_OR_NULL(inode) || result) {
1253                         osd_oii_insert(dev, fid, id, result == -ENOENT);
1254                         GOTO(out, result = -EINPROGRESS);
1255                 }
1256
1257                 LASSERT(remote);
1258                 LASSERT(obj->oo_inode == inode);
1259
1260                 osd_oii_insert(dev, fid, id, true);
1261                 goto found;
1262         }
1263
1264         if (dev->od_scrub.os_scrub.os_auto_scrub_interval == AS_NEVER) {
1265                 if (!remote)
1266                         GOTO(out, result = -EREMCHG);
1267
1268                 LASSERT(!result);
1269                 LASSERT(obj->oo_inode == inode);
1270
1271                 osd_add_oi_cache(info, dev, id, fid);
1272                 goto found;
1273         }
1274
1275 join:
1276         rc1 = osd_scrub_start(env, dev, flags);
1277         CDEBUG_LIMIT(D_LFSCK | D_CONSOLE | D_WARNING,
1278                      "%s: trigger OI scrub by RPC for "DFID"/%u with flags %#x: rc = %d\n",
1279                      osd_name(dev), PFID(fid), id->oii_ino, flags, rc1);
1280         if (rc1 && rc1 != -EALREADY)
1281                 GOTO(out, result = -EREMCHG);
1282
1283         if (IS_ERR_OR_NULL(inode) || result) {
1284                 osd_oii_insert(dev, fid, id, result == -ENOENT);
1285                 GOTO(out, result = -EINPROGRESS);
1286         }
1287
1288         LASSERT(remote);
1289         LASSERT(obj->oo_inode == inode);
1290
1291         osd_oii_insert(dev, fid, id, true);
1292         goto found;
1293
1294 check_lma:
1295         checked = true;
1296         if (unlikely(obj->oo_header))
1297                 goto found;
1298
1299         result = osd_check_lma(env, obj);
1300         if (!result)
1301                 goto found;
1302
1303         LASSERTF(id->oii_ino == inode->i_ino &&
1304                  id->oii_gen == inode->i_generation,
1305                  "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1306                  PFID(fid), id->oii_ino, id->oii_gen,
1307                  inode->i_ino, inode->i_generation);
1308
1309         saved_ino = inode->i_ino;
1310         saved_gen = inode->i_generation;
1311
1312         if (unlikely(result == -ENODATA)) {
1313                 /*
1314                  * If the OI scrub updated the OI mapping by race, it
1315                  * must be valid. Trust the inode that has no LMA EA.
1316                  */
1317                 if (updated)
1318                         goto found;
1319
1320                 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1321                 if (!result) {
1322                         /*
1323                          * The OI mapping is still there, the inode is still
1324                          * valid. It is just becaues the inode has no LMA EA.
1325                          */
1326                         if (saved_ino == id->oii_ino &&
1327                             saved_gen == id->oii_gen)
1328                                 goto found;
1329
1330                         /*
1331                          * It is the OI scrub updated the OI mapping by race.
1332                          * The new OI mapping must be valid.
1333                          */
1334                         trusted = true;
1335                         updated = true;
1336                         goto iget;
1337                 }
1338
1339                 /*
1340                  * "result == -ENOENT" means that the OI mappinghas been
1341                  * removed by race, so the inode belongs to other object.
1342                  *
1343                  * Others error can be returned  directly.
1344                  */
1345                 if (result == -ENOENT) {
1346                         LASSERT(trusted);
1347
1348                         obj->oo_inode = NULL;
1349                         result = 0;
1350                 }
1351         }
1352
1353         if (result != -EREMCHG)
1354                 GOTO(out, result);
1355
1356         LASSERT(!updated);
1357
1358         /*
1359          * if two OST objects map to the same inode, and inode mode is
1360          * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
1361          * reserved by precreate, and not written yet, in this case, don't
1362          * set inode for the object whose FID mismatch, so that it can create
1363          * inode and not block precreate.
1364          */
1365         if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
1366             inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
1367                 obj->oo_inode = NULL;
1368                 GOTO(out, result = 0);
1369         }
1370
1371         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1372         /*
1373          * "result == -ENOENT" means the cached OI mapping has been removed
1374          * from the OI file by race, above inode belongs to other object.
1375          */
1376         if (result == -ENOENT) {
1377                 LASSERT(trusted);
1378
1379                 obj->oo_inode = NULL;
1380                 GOTO(out, result = 0);
1381         }
1382
1383         if (result)
1384                 GOTO(out, result);
1385
1386         if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
1387                 result = -EREMCHG;
1388                 osd_scrub_refresh_mapping(info, dev, fid, id, DTO_INDEX_DELETE,
1389                                           true, 0, NULL);
1390                 goto trigger;
1391         }
1392
1393         /*
1394          * It is the OI scrub updated the OI mapping by race.
1395          * The new OI mapping must be valid.
1396          */
1397         trusted = true;
1398         updated = true;
1399         goto iget;
1400
1401 found:
1402         if (!checked) {
1403                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
1404                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
1405
1406                 result = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
1407                 if (!result) {
1408                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
1409                             dev->od_is_ost)
1410                                 obj->oo_pfid_in_lma = 1;
1411                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
1412                             !dev->od_is_ost)
1413                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
1414                 } else if (result != -ENODATA) {
1415                         GOTO(out, result);
1416                 }
1417         }
1418
1419         obj->oo_compat_dot_created = 1;
1420         obj->oo_compat_dotdot_created = 1;
1421
1422         if (S_ISDIR(inode->i_mode) &&
1423             (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1424                 osd_check_lmv(info, dev, inode);
1425
1426         result = osd_attach_jinode(inode);
1427         if (result)
1428                 GOTO(out, result);
1429
1430         if (!ldiskfs_pdo)
1431                 GOTO(out, result = 0);
1432
1433         LASSERT(!obj->oo_hl_head);
1434         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1435
1436         GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
1437
1438 out:
1439         if (result || !obj->oo_inode) {
1440                 if (!IS_ERR_OR_NULL(inode))
1441                         iput(inode);
1442
1443                 obj->oo_inode = NULL;
1444                 if (trusted)
1445                         fid_zero(&oic->oic_fid);
1446         }
1447
1448         LINVRNT(osd_invariant(obj));
1449         return result;
1450 }
1451
1452 /*
1453  * Concurrency: shouldn't matter.
1454  */
1455 static void osd_object_init0(struct osd_object *obj)
1456 {
1457         LASSERT(obj->oo_inode != NULL);
1458         obj->oo_dt.do_body_ops = &osd_body_ops;
1459         obj->oo_dt.do_lu.lo_header->loh_attr |=
1460                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1461 }
1462
1463 /*
1464  * Concurrency: no concurrent access is possible that early in object
1465  * life-cycle.
1466  */
1467 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1468                            const struct lu_object_conf *conf)
1469 {
1470         struct osd_object *obj = osd_obj(l);
1471         int result;
1472
1473         LINVRNT(osd_invariant(obj));
1474
1475         if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_LLOG_UMOUNT_RACE) &&
1476             cfs_fail_val == 2) {
1477                 struct osd_thread_info *info = osd_oti_get(env);
1478                 struct osd_idmap_cache *oic = &info->oti_cache;
1479                 /* invalidate thread cache */
1480                 memset(&oic->oic_fid, 0, sizeof(oic->oic_fid));
1481         }
1482         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1483                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1484                 l->lo_header->loh_attr |= LOHA_EXISTS;
1485                 return 0;
1486         }
1487
1488         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1489         obj->oo_dt.do_body_ops = &osd_body_ops_new;
1490         if (result == 0 && obj->oo_inode != NULL) {
1491                 struct osd_thread_info *oti = osd_oti_get(env);
1492                 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1493
1494                 osd_object_init0(obj);
1495                 if (unlikely(obj->oo_header))
1496                         return 0;
1497
1498                 result = osd_get_lma(oti, obj->oo_inode,
1499                                      &oti->oti_obj_dentry, loa);
1500                 if (!result) {
1501                         /*
1502                          * Convert LMAI flags to lustre LMA flags
1503                          * and cache it to oo_lma_flags
1504                          */
1505                         obj->oo_lma_flags =
1506                                 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1507                 } else if (result == -ENODATA) {
1508                         result = 0;
1509                 }
1510         }
1511         obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
1512
1513         LINVRNT(osd_invariant(obj));
1514         return result;
1515 }
1516
1517 /*
1518  * The first part of oxe_buf is xattr name, and is '\0' terminated.
1519  * The left part is for value, binary mode.
1520  */
1521 struct osd_xattr_entry {
1522         struct list_head        oxe_list;
1523         size_t                  oxe_len;
1524         size_t                  oxe_namelen;
1525         bool                    oxe_exist;
1526         struct rcu_head         oxe_rcu;
1527         char                    oxe_buf[0];
1528 };
1529
1530 static int osd_oxc_get(struct osd_object *obj, const char *name,
1531                        struct lu_buf *buf)
1532 {
1533         struct osd_xattr_entry *tmp;
1534         struct osd_xattr_entry *oxe = NULL;
1535         size_t namelen = strlen(name);
1536         int rc;
1537
1538         rcu_read_lock();
1539         list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1540                 if (namelen == tmp->oxe_namelen &&
1541                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1542                         oxe = tmp;
1543                         break;
1544                 }
1545         }
1546
1547         if (oxe == NULL)
1548                 GOTO(out, rc = -ENOENT);
1549
1550         if (!oxe->oxe_exist)
1551                 GOTO(out, rc = -ENODATA);
1552
1553         /* vallen */
1554         rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1555         LASSERT(rc > 0);
1556
1557         if (buf->lb_buf == NULL)
1558                 GOTO(out, rc);
1559
1560         if (buf->lb_len < rc)
1561                 GOTO(out, rc = -ERANGE);
1562
1563         memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1564 out:
1565         rcu_read_unlock();
1566
1567         return rc;
1568 }
1569
1570 static void osd_oxc_free(struct rcu_head *head)
1571 {
1572         struct osd_xattr_entry *oxe;
1573
1574         oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1575         OBD_FREE(oxe, oxe->oxe_len);
1576 }
1577
1578 static void osd_oxc_add(struct osd_object *obj, const char *name,
1579                         const char *buf, int buflen)
1580 {
1581         struct osd_xattr_entry *oxe;
1582         struct osd_xattr_entry *old = NULL;
1583         struct osd_xattr_entry *tmp;
1584         size_t namelen = strlen(name);
1585         size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1586
1587         OBD_ALLOC(oxe, len);
1588         if (oxe == NULL)
1589                 return;
1590
1591         INIT_LIST_HEAD(&oxe->oxe_list);
1592         oxe->oxe_len = len;
1593         oxe->oxe_namelen = namelen;
1594         memcpy(oxe->oxe_buf, name, namelen);
1595         if (buflen > 0) {
1596                 LASSERT(buf != NULL);
1597                 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1598                 oxe->oxe_exist = true;
1599         } else {
1600                 oxe->oxe_exist = false;
1601         }
1602
1603         /* this should be rarely called, just remove old and add new */
1604         spin_lock(&obj->oo_guard);
1605         list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1606                 if (namelen == tmp->oxe_namelen &&
1607                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1608                         old = tmp;
1609                         break;
1610                 }
1611         }
1612         if (old != NULL) {
1613                 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1614                 call_rcu(&old->oxe_rcu, osd_oxc_free);
1615         } else {
1616                 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1617         }
1618         spin_unlock(&obj->oo_guard);
1619 }
1620
1621 static void osd_oxc_del(struct osd_object *obj, const char *name)
1622 {
1623         struct osd_xattr_entry *oxe;
1624         size_t namelen = strlen(name);
1625
1626         spin_lock(&obj->oo_guard);
1627         list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1628                 if (namelen == oxe->oxe_namelen &&
1629                     strncmp(name, oxe->oxe_buf, namelen) == 0) {
1630                         list_del_rcu(&oxe->oxe_list);
1631                         call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1632                         break;
1633                 }
1634         }
1635         spin_unlock(&obj->oo_guard);
1636 }
1637
1638 static void osd_oxc_fini(struct osd_object *obj)
1639 {
1640         struct osd_xattr_entry *oxe, *next;
1641
1642         list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1643                 list_del(&oxe->oxe_list);
1644                 OBD_FREE(oxe, oxe->oxe_len);
1645         }
1646 }
1647
1648 /*
1649  * Concurrency: no concurrent access is possible that late in object
1650  * life-cycle.
1651  */
1652 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1653 {
1654         struct osd_object *obj = osd_obj(l);
1655         struct lu_object_header *h = obj->oo_header;
1656
1657         LINVRNT(osd_invariant(obj));
1658
1659         osd_oxc_fini(obj);
1660         dt_object_fini(&obj->oo_dt);
1661         if (obj->oo_hl_head != NULL)
1662                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1663         /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
1664         OBD_FREE_PTR(obj);
1665         if (unlikely(h))
1666                 lu_object_header_free(h);
1667 }
1668
1669 /*
1670  * Concurrency: no concurrent access is possible that late in object
1671  * life-cycle.
1672  */
1673 static void osd_index_fini(struct osd_object *o)
1674 {
1675         struct iam_container *bag;
1676
1677         if (o->oo_dir != NULL) {
1678                 bag = &o->oo_dir->od_container;
1679                 if (o->oo_inode != NULL) {
1680                         if (bag->ic_object == o->oo_inode)
1681                                 iam_container_fini(bag);
1682                 }
1683                 OBD_FREE_PTR(o->oo_dir);
1684                 o->oo_dir = NULL;
1685         }
1686 }
1687
1688 enum {
1689         OSD_TXN_OI_DELETE_CREDITS    = 20,
1690         OSD_TXN_INODE_DELETE_CREDITS = 20
1691 };
1692
1693 /*
1694  * Journal
1695  */
1696
1697 #if OSD_THANDLE_STATS
1698 /**
1699  * Set time when the handle is allocated
1700  */
1701 static void osd_th_alloced(struct osd_thandle *oth)
1702 {
1703         oth->oth_alloced = ktime_get();
1704 }
1705
1706 /**
1707  * Set time when the handle started
1708  */
1709 static void osd_th_started(struct osd_thandle *oth)
1710 {
1711         oth->oth_started = ktime_get();
1712 }
1713
1714 /**
1715  * Check whether the we deal with this handle for too long.
1716  */
1717 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1718                                 ktime_t alloced, ktime_t started,
1719                                 ktime_t closed)
1720 {
1721         ktime_t now = ktime_get();
1722
1723         LASSERT(dev != NULL);
1724
1725         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1726                             ktime_us_delta(started, alloced));
1727         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1728                             ktime_us_delta(closed, started));
1729         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1730                             ktime_us_delta(now, closed));
1731
1732         if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1733                 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1734                                 oth, now, alloced, started, closed);
1735                 libcfs_debug_dumpstack(NULL);
1736         }
1737 }
1738
1739 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
1740 {                                                                       \
1741         ktime_t __closed = ktime_get();                                 \
1742         ktime_t __alloced = oth->oth_alloced;                           \
1743         ktime_t __started = oth->oth_started;                           \
1744                                                                         \
1745         expr;                                                           \
1746         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
1747 }
1748
1749 #else /* OSD_THANDLE_STATS */
1750
1751 #define osd_th_alloced(h)                  do {} while(0)
1752 #define osd_th_started(h)                  do {} while(0)
1753 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
1754
1755 #endif /* OSD_THANDLE_STATS */
1756
1757 /*
1758  * Concurrency: doesn't access mutable data.
1759  */
1760 static int osd_param_is_not_sane(const struct osd_device *dev,
1761                                  const struct thandle *th)
1762 {
1763         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1764
1765         return oh->ot_credits > osd_transaction_size(dev);
1766 }
1767
1768 /*
1769  * Concurrency: shouldn't matter.
1770  */
1771 static void osd_trans_commit_cb(struct super_block *sb,
1772                                 struct ldiskfs_journal_cb_entry *jcb, int error)
1773 {
1774         struct osd_thandle *oh = container_of(jcb, struct osd_thandle, ot_jcb);
1775         struct thandle *th = &oh->ot_super;
1776         struct lu_device *lud = &th->th_dev->dd_lu_dev;
1777         struct osd_device *osd = osd_dev(lud);
1778         struct dt_txn_commit_cb *dcb, *tmp;
1779
1780         LASSERT(oh->ot_handle == NULL);
1781
1782         if (error)
1783                 CERROR("transaction @0x%p commit error: %d\n", th, error);
1784
1785         OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DELAY_TRANS, 40);
1786         /* call per-transaction callbacks if any */
1787         list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1788                                  dcb_linkage) {
1789                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1790                          "commit callback entry: magic=%x name='%s'\n",
1791                          dcb->dcb_magic, dcb->dcb_name);
1792                 list_del_init(&dcb->dcb_linkage);
1793                 dcb->dcb_func(NULL, th, dcb, error);
1794         }
1795
1796         lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1797         if (atomic_dec_and_test(&osd->od_commit_cb_in_flight))
1798                 wake_up(&osd->od_commit_cb_done);
1799         th->th_dev = NULL;
1800
1801         OBD_FREE_PTR(oh);
1802 }
1803
1804 static struct thandle *osd_trans_create(const struct lu_env *env,
1805                                         struct dt_device *d)
1806 {
1807         struct osd_thread_info *oti = osd_oti_get(env);
1808         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1809         struct osd_thandle *oh;
1810         struct thandle *th;
1811
1812         ENTRY;
1813
1814         if (d->dd_rdonly) {
1815                 CERROR("%s: someone try to start transaction under "
1816                        "readonly mode, should be disabled.\n",
1817                        osd_name(osd_dt_dev(d)));
1818                 dump_stack();
1819                 RETURN(ERR_PTR(-EROFS));
1820         }
1821
1822         /* on pending IO in this thread should left from prev. request */
1823         LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1824
1825         sb_start_write(osd_sb(osd_dt_dev(d)));
1826
1827         OBD_ALLOC_GFP(oh, sizeof(*oh), GFP_NOFS);
1828         if (!oh) {
1829                 sb_end_write(osd_sb(osd_dt_dev(d)));
1830                 RETURN(ERR_PTR(-ENOMEM));
1831         }
1832
1833         oh->ot_quota_trans = &oti->oti_quota_trans;
1834         memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1835         th = &oh->ot_super;
1836         th->th_dev = d;
1837         th->th_result = 0;
1838         oh->ot_credits = 0;
1839         oh->oh_declared_ext = 0;
1840         INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1841         INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1842         INIT_LIST_HEAD(&oh->ot_trunc_locks);
1843         osd_th_alloced(oh);
1844
1845         memset(oti->oti_declare_ops, 0,
1846                sizeof(oti->oti_declare_ops));
1847         memset(oti->oti_declare_ops_cred, 0,
1848                sizeof(oti->oti_declare_ops_cred));
1849         memset(oti->oti_declare_ops_used, 0,
1850                sizeof(oti->oti_declare_ops_used));
1851
1852         oti->oti_ins_cache_depth++;
1853
1854         RETURN(th);
1855 }
1856
1857 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1858 {
1859         struct osd_thread_info *oti = osd_oti_get(env);
1860         struct osd_thandle *oh;
1861
1862         oh = container_of(th, struct osd_thandle, ot_super);
1863         LASSERT(oh != NULL);
1864
1865         CWARN("  create: %u/%u/%u, destroy: %u/%u/%u\n",
1866               oti->oti_declare_ops[OSD_OT_CREATE],
1867               oti->oti_declare_ops_cred[OSD_OT_CREATE],
1868               oti->oti_declare_ops_used[OSD_OT_CREATE],
1869               oti->oti_declare_ops[OSD_OT_DESTROY],
1870               oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1871               oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1872         CWARN("  attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1873               oti->oti_declare_ops[OSD_OT_ATTR_SET],
1874               oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1875               oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1876               oti->oti_declare_ops[OSD_OT_XATTR_SET],
1877               oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1878               oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1879         CWARN("  write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1880               oti->oti_declare_ops[OSD_OT_WRITE],
1881               oti->oti_declare_ops_cred[OSD_OT_WRITE],
1882               oti->oti_declare_ops_used[OSD_OT_WRITE],
1883               oti->oti_declare_ops[OSD_OT_PUNCH],
1884               oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1885               oti->oti_declare_ops_used[OSD_OT_PUNCH],
1886               oti->oti_declare_ops[OSD_OT_QUOTA],
1887               oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1888               oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1889         CWARN("  insert: %u/%u/%u, delete: %u/%u/%u\n",
1890               oti->oti_declare_ops[OSD_OT_INSERT],
1891               oti->oti_declare_ops_cred[OSD_OT_INSERT],
1892               oti->oti_declare_ops_used[OSD_OT_INSERT],
1893               oti->oti_declare_ops[OSD_OT_DELETE],
1894               oti->oti_declare_ops_cred[OSD_OT_DELETE],
1895               oti->oti_declare_ops_used[OSD_OT_DELETE]);
1896         CWARN("  ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1897               oti->oti_declare_ops[OSD_OT_REF_ADD],
1898               oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1899               oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1900               oti->oti_declare_ops[OSD_OT_REF_DEL],
1901               oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1902               oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1903 }
1904
1905 /*
1906  * Concurrency: shouldn't matter.
1907  */
1908 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1909                            struct thandle *th)
1910 {
1911         struct osd_thread_info *oti = osd_oti_get(env);
1912         struct osd_device *dev = osd_dt_dev(d);
1913         handle_t *jh;
1914         struct osd_thandle *oh;
1915         int rc;
1916
1917         ENTRY;
1918
1919         LASSERT(current->journal_info == NULL);
1920
1921         oh = container_of(th, struct osd_thandle, ot_super);
1922         LASSERT(oh != NULL);
1923         LASSERT(oh->ot_handle == NULL);
1924
1925         rc = dt_txn_hook_start(env, d, th);
1926         if (rc != 0)
1927                 GOTO(out, rc);
1928
1929         if (unlikely(osd_param_is_not_sane(dev, th))) {
1930                 static unsigned long last_printed;
1931                 static int last_credits;
1932
1933                 /*
1934                  * don't make noise on a tiny testing systems
1935                  * actual credits misuse will be caught anyway
1936                  */
1937                 if (last_credits != oh->ot_credits &&
1938                     time_after(jiffies, last_printed +
1939                                cfs_time_seconds(60)) &&
1940                     osd_transaction_size(dev) > 512) {
1941                         CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1942                               oh->ot_credits, osd_transaction_size(dev));
1943                         osd_trans_dump_creds(env, th);
1944                         libcfs_debug_dumpstack(NULL);
1945                         last_credits = oh->ot_credits;
1946                         last_printed = jiffies;
1947                 }
1948                 /*
1949                  * XXX Limit the credits to 'max_transaction_buffers', and
1950                  *     let the underlying filesystem to catch the error if
1951                  *     we really need so many credits.
1952                  *
1953                  *     This should be removed when we can calculate the
1954                  *     credits precisely.
1955                  */
1956                 oh->ot_credits = osd_transaction_size(dev);
1957         } else if (ldiskfs_track_declares_assert != 0) {
1958                 /*
1959                  * reserve few credits to prevent an assertion in JBD
1960                  * our debugging mechanism will be able to detected
1961                  * overuse. this can help to debug single-update
1962                  * transactions
1963                  */
1964                 oh->ot_credits += 10;
1965                 if (unlikely(osd_param_is_not_sane(dev, th)))
1966                         oh->ot_credits = osd_transaction_size(dev);
1967         }
1968
1969         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_TXN_START))
1970                 GOTO(out, rc = -EIO);
1971
1972         /*
1973          * XXX temporary stuff. Some abstraction layer should
1974          * be used.
1975          */
1976         jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1977         osd_th_started(oh);
1978         if (!IS_ERR(jh)) {
1979                 oh->ot_handle = jh;
1980                 LASSERT(oti->oti_txns == 0);
1981
1982                 atomic_inc(&dev->od_commit_cb_in_flight);
1983                 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1984                               "osd-tx", th);
1985                 oti->oti_txns++;
1986                 rc = 0;
1987         } else {
1988                 rc = PTR_ERR(jh);
1989         }
1990 out:
1991         RETURN(rc);
1992 }
1993
1994 static int osd_seq_exists(const struct lu_env *env,
1995                           struct osd_device *osd, u64 seq)
1996 {
1997         struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1998         struct seq_server_site *ss = osd_seq_site(osd);
1999         int rc;
2000
2001         ENTRY;
2002
2003         LASSERT(ss != NULL);
2004         LASSERT(ss->ss_server_fld != NULL);
2005
2006         rc = osd_fld_lookup(env, osd, seq, range);
2007         if (rc != 0) {
2008                 if (rc != -ENOENT)
2009                         CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
2010                                osd_name(osd), seq, rc);
2011                 RETURN(0);
2012         }
2013
2014         RETURN(ss->ss_node_id == range->lsr_index);
2015 }
2016
2017 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
2018 {
2019         struct dt_txn_commit_cb *dcb;
2020         struct dt_txn_commit_cb *tmp;
2021
2022         /* call per-transaction stop callbacks if any */
2023         list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
2024                                  dcb_linkage) {
2025                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
2026                          "commit callback entry: magic=%x name='%s'\n",
2027                          dcb->dcb_magic, dcb->dcb_name);
2028                 list_del_init(&dcb->dcb_linkage);
2029                 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
2030         }
2031 }
2032
2033 /*
2034  * Concurrency: shouldn't matter.
2035  */
2036 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
2037                           struct thandle *th)
2038 {
2039         struct osd_thread_info *oti = osd_oti_get(env);
2040         struct osd_thandle *oh;
2041         struct osd_iobuf *iobuf = &oti->oti_iobuf;
2042         struct osd_device *osd = osd_dt_dev(th->th_dev);
2043         struct qsd_instance *qsd = osd_def_qsd(osd);
2044         struct lquota_trans *qtrans;
2045         LIST_HEAD(truncates);
2046         int rc = 0, remove_agents = 0;
2047
2048         ENTRY;
2049
2050         oh = container_of(th, struct osd_thandle, ot_super);
2051
2052         remove_agents = oh->ot_remove_agents;
2053
2054         qtrans = oh->ot_quota_trans;
2055         oh->ot_quota_trans = NULL;
2056
2057         /* move locks to local list, stop tx, execute truncates */
2058         list_splice(&oh->ot_trunc_locks, &truncates);
2059
2060         if (oh->ot_handle != NULL) {
2061                 int rc2;
2062
2063                 handle_t *hdl = oh->ot_handle;
2064
2065                 /*
2066                  * add commit callback
2067                  * notice we don't do this in osd_trans_start()
2068                  * as underlying transaction can change during truncate
2069                  */
2070                 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
2071                                              &oh->ot_jcb);
2072
2073                 LASSERT(oti->oti_txns == 1);
2074                 oti->oti_txns--;
2075
2076                 rc = dt_txn_hook_stop(env, th);
2077                 if (rc != 0)
2078                         CERROR("%s: failed in transaction hook: rc = %d\n",
2079                                osd_name(osd), rc);
2080
2081                 osd_trans_stop_cb(oh, rc);
2082                 /* hook functions might modify th_sync */
2083                 hdl->h_sync = th->th_sync;
2084
2085                 oh->ot_handle = NULL;
2086                 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
2087                 if (rc2 != 0)
2088                         CERROR("%s: failed to stop transaction: rc = %d\n",
2089                                osd_name(osd), rc2);
2090                 if (!rc)
2091                         rc = rc2;
2092
2093                 osd_process_truncates(env, &truncates);
2094         } else {
2095                 osd_trans_stop_cb(oh, th->th_result);
2096                 OBD_FREE_PTR(oh);
2097         }
2098
2099         osd_trunc_unlock_all(env, &truncates);
2100
2101         /* inform the quota slave device that the transaction is stopping */
2102         qsd_op_end(env, qsd, qtrans);
2103
2104         /*
2105          * as we want IO to journal and data IO be concurrent, we don't block
2106          * awaiting data IO completion in osd_do_bio(), instead we wait here
2107          * once transaction is submitted to the journal. all reqular requests
2108          * don't do direct IO (except read/write), thus this wait_event becomes
2109          * no-op for them.
2110          *
2111          * IMPORTANT: we have to wait till any IO submited by the thread is
2112          * completed otherwise iobuf may be corrupted by different request
2113          */
2114         wait_event(iobuf->dr_wait,
2115                        atomic_read(&iobuf->dr_numreqs) == 0);
2116         osd_fini_iobuf(osd, iobuf);
2117         if (!rc)
2118                 rc = iobuf->dr_error;
2119
2120         if (unlikely(remove_agents != 0))
2121                 osd_process_scheduled_agent_removals(env, osd);
2122
2123         LASSERT(oti->oti_ins_cache_depth > 0);
2124         oti->oti_ins_cache_depth--;
2125         /* reset OI cache for safety */
2126         if (oti->oti_ins_cache_depth == 0)
2127                 oti->oti_ins_cache_used = 0;
2128
2129         sb_end_write(osd_sb(osd));
2130
2131         RETURN(rc);
2132 }
2133
2134 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
2135 {
2136         struct osd_thandle *oh = container_of(th, struct osd_thandle,
2137                                               ot_super);
2138
2139         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
2140         LASSERT(&dcb->dcb_func != NULL);
2141         if (dcb->dcb_flags & DCB_TRANS_STOP)
2142                 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
2143         else
2144                 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
2145
2146         return 0;
2147 }
2148
2149 /*
2150  * Called just before object is freed. Releases all resources except for
2151  * object itself (that is released by osd_object_free()).
2152  *
2153  * Concurrency: no concurrent access is possible that late in object
2154  * life-cycle.
2155  */
2156 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
2157 {
2158         struct osd_object *obj = osd_obj(l);
2159         struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
2160         struct inode *inode = obj->oo_inode;
2161         __u64 projid;
2162         qid_t uid;
2163         qid_t gid;
2164
2165         LINVRNT(osd_invariant(obj));
2166
2167         /*
2168          * If object is unlinked remove fid->ino mapping from object index.
2169          */
2170
2171         osd_index_fini(obj);
2172
2173         if (!inode)
2174                 return;
2175
2176         if (osd_has_index(obj) &&  obj->oo_dt.do_index_ops == &osd_index_iam_ops)
2177                 ldiskfs_set_inode_flag(inode, LDISKFS_INODE_JOURNAL_DATA);
2178
2179         uid = i_uid_read(inode);
2180         gid = i_gid_read(inode);
2181         projid = i_projid_read(inode);
2182
2183         obj->oo_inode = NULL;
2184         iput(inode);
2185
2186         /* do not rebalance quota if the caller needs to release memory
2187          * otherwise qsd_refresh_usage() may went into a new ldiskfs
2188          * transaction and risk to deadlock - LU-12178 */
2189         if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
2190                 return;
2191
2192         if (!obj->oo_header && qsd) {
2193                 struct osd_thread_info *info = osd_oti_get(env);
2194                 struct lquota_id_info *qi = &info->oti_qi;
2195
2196                 /* Release granted quota to master if necessary */
2197                 qi->lqi_id.qid_uid = uid;
2198                 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
2199
2200                 qi->lqi_id.qid_uid = gid;
2201                 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
2202
2203                 qi->lqi_id.qid_uid = projid;
2204                 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
2205         }
2206 }
2207
2208 /*
2209  * Concurrency: ->loo_object_release() is called under site spin-lock.
2210  */
2211 static void osd_object_release(const struct lu_env *env,
2212                                struct lu_object *l)
2213 {
2214         struct osd_object *o = osd_obj(l);
2215
2216         /*
2217          * nobody should be releasing a non-destroyed object with nlink=0
2218          * the API allows this, but ldiskfs doesn't like and then report
2219          * this inode as deleted
2220          */
2221         LASSERT(!(o->oo_destroyed == 0 && o->oo_inode &&
2222                   o->oo_inode->i_nlink == 0));
2223 }
2224
2225 /*
2226  * Concurrency: shouldn't matter.
2227  */
2228 static int osd_object_print(const struct lu_env *env, void *cookie,
2229                             lu_printer_t p, const struct lu_object *l)
2230 {
2231         struct osd_object *o = osd_obj(l);
2232         struct iam_descr *d;
2233
2234         if (o->oo_dir != NULL)
2235                 d = o->oo_dir->od_container.ic_descr;
2236         else
2237                 d = NULL;
2238         return (*p)(env, cookie,
2239                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
2240                     o, o->oo_inode,
2241                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
2242                     o->oo_inode ? o->oo_inode->i_generation : 0,
2243                     d ? d->id_ops->id_name : "plain");
2244 }
2245
2246 /*
2247  * Concurrency: shouldn't matter.
2248  */
2249 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2250                 struct obd_statfs *sfs, struct obd_statfs_info *info)
2251 {
2252         struct osd_device *osd = osd_dt_dev(d);
2253         struct super_block *sb = osd_sb(osd);
2254         struct kstatfs *ksfs;
2255         __u64 reserved;
2256         int result = 0;
2257
2258         if (unlikely(osd->od_mnt == NULL))
2259                 return -EINPROGRESS;
2260
2261         /* osd_lproc.c call this without env, allocate ksfs for that case */
2262         if (unlikely(env == NULL)) {
2263                 OBD_ALLOC_PTR(ksfs);
2264                 if (ksfs == NULL)
2265                         return -ENOMEM;
2266         } else {
2267                 ksfs = &osd_oti_get(env)->oti_ksfs;
2268         }
2269
2270         result = sb->s_op->statfs(sb->s_root, ksfs);
2271         if (result)
2272                 goto out;
2273
2274         statfs_pack(sfs, ksfs);
2275         if (unlikely(sb->s_flags & SB_RDONLY))
2276                 sfs->os_state |= OS_STATFS_READONLY;
2277
2278         sfs->os_state |= osd->od_nonrotational ? OS_STATFS_NONROT : 0;
2279
2280         if (ldiskfs_has_feature_extents(sb))
2281                 sfs->os_maxbytes = sb->s_maxbytes;
2282         else
2283                 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2284
2285         /*
2286          * Reserve some space so to avoid fragmenting the filesystem too much.
2287          * Fragmentation not only impacts performance, but can also increase
2288          * metadata overhead significantly, causing grant calculation to be
2289          * wrong.
2290          *
2291          * Reserve 0.78% of total space, at least 8MB for small filesystems.
2292          */
2293         BUILD_BUG_ON(OSD_STATFS_RESERVED <= LDISKFS_MAX_BLOCK_SIZE);
2294         reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2295         if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2296                 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2297
2298         sfs->os_blocks -= reserved;
2299         sfs->os_bfree  -= min(reserved, sfs->os_bfree);
2300         sfs->os_bavail -= min(reserved, sfs->os_bavail);
2301
2302 out:
2303         if (unlikely(env == NULL))
2304                 OBD_FREE_PTR(ksfs);
2305         return result;
2306 }
2307
2308 /**
2309  * Estimate space needed for file creations. We assume the largest filename
2310  * which is 2^64 - 1, hence a filename of 20 chars.
2311  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2312  */
2313 #ifdef __LDISKFS_DIR_REC_LEN
2314 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2315 #else
2316 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2317 #endif
2318
2319 /*
2320  * Concurrency: doesn't access mutable data.
2321  */
2322 static void osd_conf_get(const struct lu_env *env,
2323                          const struct dt_device *dev,
2324                          struct dt_device_param *param)
2325 {
2326         struct osd_device *d = osd_dt_dev(dev);
2327         struct super_block *sb = osd_sb(d);
2328         struct blk_integrity *bi = bdev_get_integrity(sb->s_bdev);
2329         const char *name;
2330         int ea_overhead;
2331
2332         /*
2333          * XXX should be taken from not-yet-existing fs abstraction layer.
2334          */
2335         param->ddp_max_name_len = LDISKFS_NAME_LEN;
2336         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
2337         param->ddp_symlink_max  = sb->s_blocksize;
2338         param->ddp_mount_type   = LDD_MT_LDISKFS;
2339         if (ldiskfs_has_feature_extents(sb))
2340                 param->ddp_maxbytes = sb->s_maxbytes;
2341         else
2342                 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2343         /*
2344          * inode are statically allocated, so per-inode space consumption
2345          * is the space consumed by the directory entry
2346          */
2347         param->ddp_inodespace     = PER_OBJ_USAGE;
2348         /*
2349          * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2350          * is 128MB) which is unlikely to be hit in real life. Report a smaller
2351          * maximum length to not under-count the actual number of extents
2352          * needed for writing a file if there are sub-optimal block allocations.
2353          */
2354         param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
2355         /* worst-case extent insertion metadata overhead */
2356         param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2357         param->ddp_mntopts = 0;
2358         if (test_opt(sb, XATTR_USER))
2359                 param->ddp_mntopts |= MNTOPT_USERXATTR;
2360         if (test_opt(sb, POSIX_ACL))
2361                 param->ddp_mntopts |= MNTOPT_ACL;
2362
2363         /*
2364          * LOD might calculate the max stripe count based on max_ea_size,
2365          * so we need take account in the overhead as well,
2366          * xattr_header + magic + xattr_entry_head
2367          */
2368         ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2369                       LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2370
2371 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2372         if (ldiskfs_has_feature_ea_inode(sb))
2373                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2374                                                                 ea_overhead;
2375         else
2376 #endif
2377                 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2378
2379         if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
2380                 param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
2381
2382         /*
2383          * Preferred RPC size for efficient disk IO.  4MB shows good
2384          * all-around performance for ldiskfs, but use bigalloc chunk size
2385          * by default if larger.
2386          */
2387 #if defined(LDISKFS_CLUSTER_SIZE)
2388         if (LDISKFS_CLUSTER_SIZE(sb) > DT_DEF_BRW_SIZE)
2389                 param->ddp_brw_size = LDISKFS_CLUSTER_SIZE(sb);
2390         else
2391 #endif
2392                 param->ddp_brw_size = DT_DEF_BRW_SIZE;
2393
2394         param->ddp_t10_cksum_type = 0;
2395         if (bi) {
2396                 unsigned short interval = blk_integrity_interval(bi);
2397                 name = blk_integrity_name(bi);
2398                 /*
2399                  * Expected values:
2400                  * T10-DIF-TYPE1-CRC
2401                  * T10-DIF-TYPE3-CRC
2402                  * T10-DIF-TYPE1-IP
2403                  * T10-DIF-TYPE3-IP
2404                  */
2405                 if (strncmp(name, "T10-DIF-TYPE",
2406                             sizeof("T10-DIF-TYPE") - 1) == 0) {
2407                         /* also skip "1/3-" at end */
2408                         const int type_off = sizeof("T10-DIF-TYPE.");
2409                         char type_number = name[type_off - 2];
2410
2411                         if (interval != 512 && interval != 4096) {
2412                                 CERROR("%s: unsupported T10PI sector size %u\n",
2413                                        d->od_svname, interval);
2414                         } else if (type_number != '1' && type_number != '3') {
2415                                 CERROR("%s: unsupported T10PI type %s\n",
2416                                        d->od_svname, name);
2417                         } else if (strcmp(name + type_off, "CRC") == 0) {
2418                                 d->od_t10_type = type_number == '1' ?
2419                                         OSD_T10_TYPE1_CRC : OSD_T10_TYPE3_CRC;
2420                                 param->ddp_t10_cksum_type = interval == 512 ?
2421                                         OBD_CKSUM_T10CRC512 :
2422                                         OBD_CKSUM_T10CRC4K;
2423                         } else if (strcmp(name + type_off, "IP") == 0) {
2424                                 d->od_t10_type = type_number == '1' ?
2425                                         OSD_T10_TYPE1_IP : OSD_T10_TYPE3_IP;
2426                                 param->ddp_t10_cksum_type = interval == 512 ?
2427                                         OBD_CKSUM_T10IP512 :
2428                                         OBD_CKSUM_T10IP4K;
2429                         } else {
2430                                 CERROR("%s: unsupported checksum type of T10PI type '%s'\n",
2431                                        d->od_svname, name);
2432                         }
2433
2434                 } else {
2435                         CERROR("%s: unsupported T10PI type '%s'\n",
2436                                d->od_svname, name);
2437                 }
2438         }
2439
2440         param->ddp_has_lseek_data_hole = true;
2441 }
2442
2443 static struct super_block *osd_mnt_sb_get(const struct dt_device *d)
2444 {
2445         return osd_sb(osd_dt_dev(d));
2446 }
2447
2448 /*
2449  * Concurrency: shouldn't matter.
2450  */
2451 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2452 {
2453         int rc;
2454         struct super_block *s = osd_sb(osd_dt_dev(d));
2455         ENTRY;
2456
2457         down_read(&s->s_umount);
2458         rc = s->s_op->sync_fs(s, 1);
2459         up_read(&s->s_umount);
2460
2461         CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
2462                rc);
2463
2464         return rc;
2465 }
2466
2467 /**
2468  * Start commit for OSD device.
2469  *
2470  * An implementation of dt_commit_async method for OSD device.
2471  * Asychronously starts underlayng fs sync and thereby a transaction
2472  * commit.
2473  *
2474  * \param env environment
2475  * \param d dt device
2476  *
2477  * \see dt_device_operations
2478  */
2479 static int osd_commit_async(const struct lu_env *env,
2480                             struct dt_device *d)
2481 {
2482         struct super_block *s = osd_sb(osd_dt_dev(d));
2483         int rc;
2484
2485         ENTRY;
2486
2487         CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
2488         down_read(&s->s_umount);
2489         rc = s->s_op->sync_fs(s, 0);
2490         up_read(&s->s_umount);
2491
2492         RETURN(rc);
2493 }
2494
2495 static int (*priv_security_file_alloc)(struct file *file);
2496
2497 int osd_security_file_alloc(struct file *file)
2498 {
2499         if (priv_security_file_alloc)
2500                 return priv_security_file_alloc(file);
2501         return 0;
2502 }
2503
2504 /*
2505  * Concurrency: shouldn't matter.
2506  */
2507 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2508 {
2509         struct super_block *sb = osd_sb(osd_dt_dev(d));
2510         struct block_device *dev = sb->s_bdev;
2511         int rc = -EOPNOTSUPP;
2512
2513         ENTRY;
2514
2515         CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2516                osd_dt_dev(d)->od_svname, (long)dev, rc);
2517
2518         RETURN(rc);
2519 }
2520
2521 /**
2522  * Note: we do not count into QUOTA here.
2523  * If we mount with --data_journal we may need more.
2524  */
2525 const int osd_dto_credits_noquota[DTO_NR] = {
2526         /**
2527          * Insert.
2528          * INDEX_EXTRA_TRANS_BLOCKS(8) +
2529          * SINGLEDATA_TRANS_BLOCKS(8)
2530          * XXX Note: maybe iam need more, since iam have more level than
2531          *           EXT3 htree.
2532          */
2533         [DTO_INDEX_INSERT]  = 16,
2534         /**
2535          * Delete
2536          * just modify a single entry, probably merge few within a block
2537          */
2538         [DTO_INDEX_DELETE]  = 1,
2539         /**
2540          * Used for OI scrub
2541          */
2542         [DTO_INDEX_UPDATE]  = 16,
2543         /**
2544          * 4(inode, inode bits, groups, GDT)
2545          *   notice: OI updates are counted separately with DTO_INDEX_INSERT
2546          */
2547         [DTO_OBJECT_CREATE] = 4,
2548         /**
2549          * 4(inode, inode bits, groups, GDT)
2550          *   notice: OI updates are counted separately with DTO_INDEX_DELETE
2551          */
2552         [DTO_OBJECT_DELETE] = 4,
2553         /**
2554          * Attr set credits (inode)
2555          */
2556         [DTO_ATTR_SET_BASE] = 1,
2557         /**
2558          * Xattr set. The same as xattr of EXT3.
2559          * DATA_TRANS_BLOCKS(14)
2560          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2561          * are also counted in. Do not know why?
2562          */
2563         [DTO_XATTR_SET]     = 14,
2564         /**
2565          * credits for inode change during write.
2566          */
2567         [DTO_WRITE_BASE]    = 3,
2568         /**
2569          * credits for single block write.
2570          */
2571         [DTO_WRITE_BLOCK]   = 14,
2572         /**
2573          * Attr set credits for chown.
2574          * This is extra credits for setattr, and it is null without quota
2575          */
2576         [DTO_ATTR_SET_CHOWN] = 0
2577 };
2578
2579 /* reserve or free quota for some operation */
2580 static int osd_reserve_or_free_quota(const struct lu_env *env,
2581                                      struct dt_device *dev,
2582                                      enum quota_type type, __u64 uid,
2583                                      __u64 gid, __s64 count, bool is_md)
2584 {
2585         int rc;
2586         struct osd_device       *osd = osd_dt_dev(dev);
2587         struct osd_thread_info  *info = osd_oti_get(env);
2588         struct lquota_id_info   *qi = &info->oti_qi;
2589         struct qsd_instance     *qsd = NULL;
2590
2591         ENTRY;
2592
2593         if (is_md)
2594                 qsd = osd->od_quota_slave_md;
2595         else
2596                 qsd = osd->od_quota_slave_dt;
2597
2598         rc = quota_reserve_or_free(env, qsd, qi, type, uid, gid, count, is_md);
2599         RETURN(rc);
2600 }
2601
2602 static const struct dt_device_operations osd_dt_ops = {
2603         .dt_root_get              = osd_root_get,
2604         .dt_statfs                = osd_statfs,
2605         .dt_trans_create          = osd_trans_create,
2606         .dt_trans_start           = osd_trans_start,
2607         .dt_trans_stop            = osd_trans_stop,
2608         .dt_trans_cb_add          = osd_trans_cb_add,
2609         .dt_conf_get              = osd_conf_get,
2610         .dt_mnt_sb_get            = osd_mnt_sb_get,
2611         .dt_sync                  = osd_sync,
2612         .dt_ro                    = osd_ro,
2613         .dt_commit_async          = osd_commit_async,
2614         .dt_reserve_or_free_quota = osd_reserve_or_free_quota,
2615 };
2616
2617 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
2618                           unsigned int role)
2619 {
2620         struct osd_object *obj = osd_dt_obj(dt);
2621         struct osd_thread_info *oti = osd_oti_get(env);
2622
2623         LINVRNT(osd_invariant(obj));
2624
2625         LASSERT(obj->oo_owner != env);
2626         down_read_nested(&obj->oo_sem, role);
2627
2628         LASSERT(obj->oo_owner == NULL);
2629         oti->oti_r_locks++;
2630 }
2631
2632 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
2633                            unsigned int role)
2634 {
2635         struct osd_object *obj = osd_dt_obj(dt);
2636         struct osd_thread_info *oti = osd_oti_get(env);
2637
2638         LINVRNT(osd_invariant(obj));
2639
2640         LASSERT(obj->oo_owner != env);
2641         down_write_nested(&obj->oo_sem, role);
2642
2643         LASSERT(obj->oo_owner == NULL);
2644         obj->oo_owner = env;
2645         oti->oti_w_locks++;
2646 }
2647
2648 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
2649 {
2650         struct osd_object *obj = osd_dt_obj(dt);
2651         struct osd_thread_info *oti = osd_oti_get(env);
2652
2653         LINVRNT(osd_invariant(obj));
2654
2655         LASSERT(oti->oti_r_locks > 0);
2656         oti->oti_r_locks--;
2657         up_read(&obj->oo_sem);
2658 }
2659
2660 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
2661 {
2662         struct osd_object *obj = osd_dt_obj(dt);
2663         struct osd_thread_info *oti = osd_oti_get(env);
2664
2665         LINVRNT(osd_invariant(obj));
2666
2667         LASSERT(obj->oo_owner == env);
2668         LASSERT(oti->oti_w_locks > 0);
2669         oti->oti_w_locks--;
2670         obj->oo_owner = NULL;
2671         up_write(&obj->oo_sem);
2672 }
2673
2674 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
2675 {
2676         struct osd_object *obj = osd_dt_obj(dt);
2677
2678         LINVRNT(osd_invariant(obj));
2679
2680         return obj->oo_owner == env;
2681 }
2682
2683 static void osd_inode_getattr(const struct lu_env *env,
2684                               struct inode *inode, struct lu_attr *attr)
2685 {
2686         attr->la_valid  |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2687                            LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2688                            LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2689                            LA_BLKSIZE | LA_TYPE | LA_BTIME;
2690
2691         attr->la_atime = inode->i_atime.tv_sec;
2692         attr->la_mtime = inode->i_mtime.tv_sec;
2693         attr->la_ctime = inode->i_ctime.tv_sec;
2694         attr->la_btime = LDISKFS_I(inode)->i_crtime.tv_sec;
2695         attr->la_mode    = inode->i_mode;
2696         attr->la_size    = i_size_read(inode);
2697         attr->la_blocks  = inode->i_blocks;
2698         attr->la_uid     = i_uid_read(inode);
2699         attr->la_gid     = i_gid_read(inode);
2700         attr->la_projid  = i_projid_read(inode);
2701         attr->la_flags   = ll_inode_to_ext_flags(inode->i_flags);
2702         attr->la_nlink   = inode->i_nlink;
2703         attr->la_rdev    = inode->i_rdev;
2704         attr->la_blksize = 1 << inode->i_blkbits;
2705         attr->la_blkbits = inode->i_blkbits;
2706         /*
2707          * Ext4 did not transfer inherit flags from raw inode
2708          * to inode flags, and ext4 internally test raw inode
2709          * @i_flags directly. Instead of patching ext4, we do it here.
2710          */
2711         if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
2712                 attr->la_flags |= LUSTRE_PROJINHERIT_FL;
2713 }
2714
2715 static int osd_dirent_count(const struct lu_env *env, struct dt_object *dt,
2716                             u64 *count)
2717 {
2718         struct osd_object *obj = osd_dt_obj(dt);
2719         const struct dt_it_ops *iops;
2720         struct dt_it *it;
2721         int rc;
2722
2723         ENTRY;
2724
2725         LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2726         LASSERT(fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)));
2727
2728         if (obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
2729                 *count = obj->oo_dirent_count;
2730                 RETURN(0);
2731         }
2732
2733         /* directory not initialized yet */
2734         if (!dt->do_index_ops) {
2735                 *count = 0;
2736                 RETURN(0);
2737         }
2738
2739         iops = &dt->do_index_ops->dio_it;
2740         it = iops->init(env, dt, LUDA_64BITHASH);
2741         if (IS_ERR(it))
2742                 RETURN(PTR_ERR(it));
2743
2744         rc = iops->load(env, it, 0);
2745         if (rc < 0) {
2746                 if (rc == -ENODATA) {
2747                         rc = 0;
2748                         *count = 0;
2749                 }
2750                 GOTO(out, rc);
2751         }
2752         if (rc > 0)
2753                 rc = iops->next(env, it);
2754
2755         for (*count = 0; rc == 0 || rc == -ESTALE; rc = iops->next(env, it)) {
2756                 if (rc == -ESTALE)
2757                         continue;
2758
2759                 if (iops->key_size(env, it) == 0)
2760                         continue;
2761
2762                 (*count)++;
2763         }
2764         if (rc == 1) {
2765                 obj->oo_dirent_count = *count;
2766                 rc = 0;
2767         }
2768 out:
2769         iops->put(env, it);
2770         iops->fini(env, it);
2771
2772         RETURN(rc);
2773 }
2774
2775 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
2776                         struct lu_attr *attr)
2777 {
2778         struct osd_object *obj = osd_dt_obj(dt);
2779         int rc = 0;
2780
2781         if (unlikely(!dt_object_exists(dt)))
2782                 return -ENOENT;
2783         if (unlikely(obj->oo_destroyed))
2784                 return -ENOENT;
2785
2786         LASSERT(!dt_object_remote(dt));
2787         LINVRNT(osd_invariant(obj));
2788
2789         spin_lock(&obj->oo_guard);
2790         osd_inode_getattr(env, obj->oo_inode, attr);
2791         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
2792                 attr->la_valid |= LA_FLAGS;
2793                 attr->la_flags |= LUSTRE_ORPHAN_FL;
2794         }
2795         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
2796                 attr->la_valid |= LA_FLAGS;
2797                 attr->la_flags |= LUSTRE_ENCRYPT_FL;
2798         }
2799         spin_unlock(&obj->oo_guard);
2800
2801         if (S_ISDIR(obj->oo_inode->i_mode) &&
2802             fid_is_namespace_visible(lu_object_fid(&dt->do_lu)))
2803                 rc = osd_dirent_count(env, dt, &attr->la_dirent_count);
2804
2805         return rc;
2806 }
2807
2808 static int osd_declare_attr_qid(const struct lu_env *env,
2809                                 struct osd_object *obj,
2810                                 struct osd_thandle *oh, long long bspace,
2811                                 qid_t old_id, qid_t new_id, bool enforce,
2812                                 unsigned int type)
2813 {
2814         int rc;
2815         struct osd_thread_info *info = osd_oti_get(env);
2816         struct lquota_id_info  *qi = &info->oti_qi;
2817
2818         qi->lqi_type = type;
2819         /* inode accounting */
2820         qi->lqi_is_blk = false;
2821
2822         /* one more inode for the new id ... */
2823         qi->lqi_id.qid_uid = new_id;
2824         qi->lqi_space      = 1;
2825         /* Reserve credits for the new id */
2826         rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2827         if (rc == -EDQUOT || rc == -EINPROGRESS)
2828                 rc = 0;
2829         if (rc)
2830                 RETURN(rc);
2831
2832         /* and one less inode for the current id */
2833         qi->lqi_id.qid_uid = old_id;
2834         qi->lqi_space = -1;
2835         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2836         if (rc == -EDQUOT || rc == -EINPROGRESS)
2837                 rc = 0;
2838         if (rc)
2839                 RETURN(rc);
2840
2841         /* block accounting */
2842         qi->lqi_is_blk = true;
2843
2844         /* more blocks for the new id ... */
2845         qi->lqi_id.qid_uid = new_id;
2846         qi->lqi_space      = bspace;
2847         /*
2848          * Credits for the new uid has been reserved, re-use "obj"
2849          * to save credit reservation.
2850          */
2851         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2852         if (rc == -EDQUOT || rc == -EINPROGRESS)
2853                 rc = 0;
2854         if (rc)
2855                 RETURN(rc);
2856
2857         /* and finally less blocks for the current uid */
2858         qi->lqi_id.qid_uid = old_id;
2859         qi->lqi_space      = -bspace;
2860         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2861         if (rc == -EDQUOT || rc == -EINPROGRESS)
2862                 rc = 0;
2863
2864         RETURN(rc);
2865 }
2866
2867 static int osd_declare_attr_set(const struct lu_env *env,
2868                                 struct dt_object *dt,
2869                                 const struct lu_attr *attr,
2870                                 struct thandle *handle)
2871 {
2872         struct osd_thandle *oh;
2873         struct osd_object *obj;
2874         qid_t uid;
2875         qid_t gid;
2876         long long bspace;
2877         int rc = 0;
2878         bool enforce;
2879
2880         ENTRY;
2881
2882         LASSERT(dt != NULL);
2883         LASSERT(handle != NULL);
2884
2885         obj = osd_dt_obj(dt);
2886         LASSERT(osd_invariant(obj));
2887
2888         oh = container_of(handle, struct osd_thandle, ot_super);
2889         LASSERT(oh->ot_handle == NULL);
2890
2891         osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2892                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2893
2894         osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2895                              osd_dto_credits_noquota[DTO_XATTR_SET]);
2896
2897         if (attr == NULL || obj->oo_inode == NULL)
2898                 RETURN(rc);
2899
2900         bspace   = obj->oo_inode->i_blocks << 9;
2901         bspace   = toqb(bspace);
2902
2903         /*
2904          * Changing ownership is always preformed by super user, it should not
2905          * fail with EDQUOT unless required explicitly.
2906          *
2907          * We still need to call the osd_declare_qid() to calculate the journal
2908          * credits for updating quota accounting files and to trigger quota
2909          * space adjustment once the operation is completed.
2910          */
2911         if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2912                 /* USERQUOTA */
2913                 uid = i_uid_read(obj->oo_inode);
2914                 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2915                 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2916                                           attr->la_uid, enforce, USRQUOTA);
2917                 if (rc)
2918                         RETURN(rc);
2919
2920                 gid = i_gid_read(obj->oo_inode);
2921                 CDEBUG(D_QUOTA, "declare uid %d -> %d gid %d -> %d\n", uid,
2922                        attr->la_uid, gid, attr->la_gid);
2923                 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2924                 rc = osd_declare_attr_qid(env, obj, oh, bspace, gid,
2925                                           attr->la_gid, enforce, GRPQUOTA);
2926                 if (rc)
2927                         RETURN(rc);
2928
2929         }
2930 #ifdef HAVE_PROJECT_QUOTA
2931         if (attr->la_valid & LA_PROJID) {
2932                 __u32 projid = i_projid_read(obj->oo_inode);
2933
2934                 enforce = (attr->la_valid & LA_PROJID) &&
2935                                         (attr->la_projid != projid);
2936                 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2937                                           (qid_t)projid, (qid_t)attr->la_projid,
2938                                           enforce, PRJQUOTA);
2939                 if (rc)
2940                         RETURN(rc);
2941         }
2942 #endif
2943         RETURN(rc);
2944 }
2945
2946 static int osd_inode_setattr(const struct lu_env *env,
2947                              struct inode *inode, const struct lu_attr *attr)
2948 {
2949         __u64 bits = attr->la_valid;
2950
2951         /* Only allow set size for regular file */
2952         if (!S_ISREG(inode->i_mode))
2953                 bits &= ~(LA_SIZE | LA_BLOCKS);
2954
2955         if (bits == 0)
2956                 return 0;
2957
2958         if (bits & LA_ATIME)
2959                 inode->i_atime = osd_inode_time(inode, attr->la_atime);
2960         if (bits & LA_CTIME)
2961                 inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
2962         if (bits & LA_MTIME)
2963                 inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
2964         if (bits & LA_SIZE) {
2965                 spin_lock(&inode->i_lock);
2966                 LDISKFS_I(inode)->i_disksize = attr->la_size;
2967                 i_size_write(inode, attr->la_size);
2968                 spin_unlock(&inode->i_lock);
2969         }
2970
2971         /*
2972          * OSD should not change "i_blocks" which is used by quota.
2973          * "i_blocks" should be changed by ldiskfs only.
2974          */
2975         if (bits & LA_MODE)
2976                 inode->i_mode = (inode->i_mode & S_IFMT) |
2977                                 (attr->la_mode & ~S_IFMT);
2978         if (bits & LA_UID)
2979                 i_uid_write(inode, attr->la_uid);
2980         if (bits & LA_GID)
2981                 i_gid_write(inode, attr->la_gid);
2982         if (bits & LA_PROJID)
2983                 i_projid_write(inode, attr->la_projid);
2984         if (bits & LA_NLINK)
2985                 set_nlink(inode, attr->la_nlink);
2986         if (bits & LA_RDEV)
2987                 inode->i_rdev = attr->la_rdev;
2988
2989         if (bits & LA_FLAGS) {
2990                 /* always keep S_NOCMTIME */
2991                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2992                                  S_NOCMTIME;
2993 #if defined(S_ENCRYPTED)
2994                 /* Always remove S_ENCRYPTED, because ldiskfs must not be
2995                  * aware of encryption status. It is just stored into LMA
2996                  * so that it can be forwared to client side.
2997                  */
2998                 inode->i_flags &= ~S_ENCRYPTED;
2999 #endif
3000                 /*
3001                  * Ext4 did not transfer inherit flags from
3002                  * @inode->i_flags to raw inode i_flags when writing
3003                  * flags, we do it explictly here.
3004                  */
3005                 if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
3006                         LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
3007                 else
3008                         LDISKFS_I(inode)->i_flags &= ~LUSTRE_PROJINHERIT_FL;
3009         }
3010         return 0;
3011 }
3012
3013 #ifdef HAVE_PROJECT_QUOTA
3014 static int osd_transfer_project(struct inode *inode, __u32 projid,
3015                                 struct thandle *handle)
3016 {
3017         struct super_block *sb = inode->i_sb;
3018         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
3019         int err;
3020         kprojid_t kprojid;
3021         struct ldiskfs_iloc iloc;
3022         struct ldiskfs_inode *raw_inode;
3023         struct dquot *transfer_to[LDISKFS_MAXQUOTAS] = { };
3024
3025         if (!ldiskfs_has_feature_project(sb)) {
3026                 LASSERT(__kprojid_val(LDISKFS_I(inode)->i_projid)
3027                         == LDISKFS_DEF_PROJID);
3028                 if (projid != LDISKFS_DEF_PROJID)
3029                         return -EOPNOTSUPP;
3030                 else
3031                         return 0;
3032         }
3033
3034         if (LDISKFS_INODE_SIZE(sb) <= LDISKFS_GOOD_OLD_INODE_SIZE)
3035                 return -EOPNOTSUPP;
3036
3037         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3038         if (projid_eq(kprojid, LDISKFS_I(inode)->i_projid))
3039                 return 0;
3040
3041         err = ldiskfs_get_inode_loc(inode, &iloc);
3042         if (err)
3043                 return err;
3044
3045         raw_inode = ldiskfs_raw_inode(&iloc);
3046         if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
3047                 struct osd_thandle *oh = container_of(handle,
3048                                                       struct osd_thandle,
3049                                                       ot_super);
3050                 /**
3051                  * try to expand inode size automatically.
3052                  */
3053                 ldiskfs_mark_inode_dirty(oh->ot_handle, inode);
3054                 if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
3055                         err = -EOVERFLOW;
3056                         brelse(iloc.bh);
3057                         return err;
3058                 }
3059         }
3060         brelse(iloc.bh);
3061
3062         dquot_initialize(inode);
3063         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3064         if (transfer_to[PRJQUOTA]) {
3065                 lock_dquot_transfer(inode);
3066                 err = __dquot_transfer(inode, transfer_to);
3067                 unlock_dquot_transfer(inode);
3068                 dqput(transfer_to[PRJQUOTA]);
3069                 if (err)
3070                         return err;
3071         }
3072
3073         return err;
3074 }
3075 #endif
3076
3077 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr,
3078                               struct thandle *handle)
3079 {
3080         int rc;
3081
3082         if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
3083             (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
3084                 struct iattr iattr;
3085
3086                 CDEBUG(D_QUOTA,
3087                        "executing dquot_transfer inode %ld uid %d -> %d gid %d -> %d\n",
3088                        inode->i_ino, i_uid_read(inode), attr->la_uid,
3089                        i_gid_read(inode), attr->la_gid);
3090
3091                 dquot_initialize(inode);
3092                 iattr.ia_valid = 0;
3093                 if (attr->la_valid & LA_UID)
3094                         iattr.ia_valid |= ATTR_UID;
3095                 if (attr->la_valid & LA_GID)
3096                         iattr.ia_valid |= ATTR_GID;
3097                 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
3098                 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
3099
3100                 lock_dquot_transfer(inode);
3101                 rc = dquot_transfer(inode, &iattr);
3102                 unlock_dquot_transfer(inode);
3103                 if (rc) {
3104                         CERROR("%s: quota transfer failed. Is quota enforcement enabled on the ldiskfs filesystem? rc = %d\n",
3105                                osd_ino2name(inode), rc);
3106                         return rc;
3107                 }
3108         }
3109
3110         /* Handle project id transfer here properly */
3111         if (attr->la_valid & LA_PROJID &&
3112             attr->la_projid != i_projid_read(inode)) {
3113                 if (!projid_valid(make_kprojid(&init_user_ns, attr->la_projid)))
3114                         return -EINVAL;
3115 #ifdef HAVE_PROJECT_QUOTA
3116                 rc = osd_transfer_project(inode, attr->la_projid, handle);
3117 #else
3118                 rc = -ENOTSUPP;
3119 #endif
3120                 if (rc) {
3121                         CERROR("%s: quota transfer failed. Is project enforcement enabled on the ldiskfs filesystem? rc = %d\n",
3122                                osd_ino2name(inode), rc);
3123                         return rc;
3124                 }
3125         }
3126         return 0;
3127 }
3128
3129 static int osd_attr_set(const struct lu_env *env,
3130                         struct dt_object *dt,
3131                         const struct lu_attr *attr,
3132                         struct thandle *handle)
3133 {
3134         struct osd_object *obj = osd_dt_obj(dt);
3135         struct inode *inode;
3136         int rc;
3137
3138         if (!dt_object_exists(dt))
3139                 return -ENOENT;
3140
3141         LASSERT(handle != NULL);
3142         LASSERT(!dt_object_remote(dt));
3143         LASSERT(osd_invariant(obj));
3144
3145         osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
3146
3147         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) &&
3148             !osd_obj2dev(obj)->od_is_ost) {
3149                 struct osd_thread_info *oti = osd_oti_get(env);
3150                 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
3151                 struct lu_fid *fid1 = &oti->oti_fid;
3152                 struct osd_inode_id *id = &oti->oti_id;
3153                 struct iam_path_descr *ipd;
3154                 struct iam_container *bag;
3155                 struct osd_thandle *oh;
3156                 int rc;
3157
3158                 fid_cpu_to_be(fid1, fid0);
3159                 memset(id, 1, sizeof(*id));
3160                 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
3161                                   fid0)->oi_dir.od_container;
3162                 ipd = osd_idx_ipd_get(env, bag);
3163                 if (unlikely(ipd == NULL))
3164                         RETURN(-ENOMEM);
3165
3166                 oh = container_of(handle, struct osd_thandle, ot_super);
3167                 rc = iam_update(oh->ot_handle, bag,
3168                                 (const struct iam_key *)fid1,
3169                                 (const struct iam_rec *)id, ipd);
3170                 osd_ipd_put(env, bag, ipd);
3171                 return(rc > 0 ? 0 : rc);
3172         }
3173
3174         inode = obj->oo_inode;
3175
3176         rc = osd_quota_transfer(inode, attr, handle);
3177         if (rc)
3178                 return rc;
3179
3180         spin_lock(&obj->oo_guard);
3181         rc = osd_inode_setattr(env, inode, attr);
3182         spin_unlock(&obj->oo_guard);
3183         if (rc != 0)