Whamcloud - gitweb
e384806c655a3fc14ef7fd74313d5c28c30fc8f4
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd/osd_handler.c
33  *
34  * Top-level entry points into osd module
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
38  */
39
40 #define DEBUG_SUBSYSTEM S_OSD
41
42 #include <linux/fs_struct.h>
43 #include <linux/kallsyms.h>
44 #include <linux/module.h>
45 #include <linux/user_namespace.h>
46 #include <linux/uidgid.h>
47
48 /* prerequisite for linux/xattr.h */
49 #include <linux/types.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/fs.h>
52 /* XATTR_{REPLACE,CREATE} */
53 #include <linux/xattr.h>
54
55 #include <ldiskfs/ldiskfs.h>
56 #include <ldiskfs/xattr.h>
57 #include <ldiskfs/ldiskfs_extents.h>
58 #undef ENTRY
59 /*
60  * struct OBD_{ALLOC,FREE}*()
61  * OBD_FAIL_CHECK
62  */
63 #include <obd_support.h>
64 /* struct ptlrpc_thread */
65 #include <lustre_net.h>
66 #include <lustre_fid.h>
67 /* process_config */
68 #include <uapi/linux/lustre/lustre_param.h>
69
70 #include "osd_internal.h"
71 #include "osd_dynlocks.h"
72
73 /* llo_* api support */
74 #include <md_object.h>
75 #include <lustre_quota.h>
76
77 #include <lustre_linkea.h>
78
79 /* Maximum EA size is limited by LNET_MTU for remote objects */
80 #define OSD_MAX_EA_SIZE 1048364
81
82 int ldiskfs_pdo = 1;
83 module_param(ldiskfs_pdo, int, 0644);
84 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
85
86 int ldiskfs_track_declares_assert;
87 module_param(ldiskfs_track_declares_assert, int, 0644);
88 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
89
90 /* Slab to allocate dynlocks */
91 struct kmem_cache *dynlock_cachep;
92
93 /* Slab to allocate osd_it_ea */
94 struct kmem_cache *osd_itea_cachep;
95
96 static struct lu_kmem_descr ldiskfs_caches[] = {
97         {
98                 .ckd_cache = &dynlock_cachep,
99                 .ckd_name  = "dynlock_cache",
100                 .ckd_size  = sizeof(struct dynlock_handle)
101         },
102         {
103                 .ckd_cache = &osd_itea_cachep,
104                 .ckd_name  = "osd_itea_cache",
105                 .ckd_size  = sizeof(struct osd_it_ea)
106         },
107         {
108                 .ckd_cache = NULL
109         }
110 };
111
112 static const char dot[] = ".";
113 static const char dotdot[] = "..";
114
115 static const struct lu_object_operations      osd_lu_obj_ops;
116 static const struct dt_object_operations      osd_obj_ops;
117 static const struct dt_object_operations      osd_obj_otable_it_ops;
118 static const struct dt_index_operations       osd_index_iam_ops;
119 static const struct dt_index_operations       osd_index_ea_ops;
120
121 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
122                           const struct lu_fid *fid);
123 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
124                                                 struct osd_device *osd);
125
126 int osd_trans_declare_op2rb[] = {
127         [OSD_OT_ATTR_SET]       = OSD_OT_ATTR_SET,
128         [OSD_OT_PUNCH]          = OSD_OT_MAX,
129         [OSD_OT_XATTR_SET]      = OSD_OT_XATTR_SET,
130         [OSD_OT_CREATE]         = OSD_OT_DESTROY,
131         [OSD_OT_DESTROY]        = OSD_OT_CREATE,
132         [OSD_OT_REF_ADD]        = OSD_OT_REF_DEL,
133         [OSD_OT_REF_DEL]        = OSD_OT_REF_ADD,
134         [OSD_OT_WRITE]          = OSD_OT_WRITE,
135         [OSD_OT_INSERT]         = OSD_OT_DELETE,
136         [OSD_OT_DELETE]         = OSD_OT_INSERT,
137         [OSD_OT_QUOTA]          = OSD_OT_MAX,
138 };
139
140 static int osd_has_index(const struct osd_object *obj)
141 {
142         return obj->oo_dt.do_index_ops != NULL;
143 }
144
145 static int osd_object_invariant(const struct lu_object *l)
146 {
147         return osd_invariant(osd_obj(l));
148 }
149
150 /*
151  * Concurrency: doesn't matter
152  */
153 static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
154 {
155         struct osd_thread_info *oti = osd_oti_get(env);
156
157         return oti->oti_w_locks > 0 && o->oo_owner == env;
158 }
159
160 /*
161  * Concurrency: doesn't access mutable data
162  */
163 static int osd_root_get(const struct lu_env *env,
164                         struct dt_device *dev, struct lu_fid *f)
165 {
166         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
167         return 0;
168 }
169
170 /*
171  * the following set of functions are used to maintain per-thread
172  * cache of FID->ino mapping. this mechanism is needed to resolve
173  * FID to inode at dt_insert() which in turn stores ino in the
174  * directory entries to keep ldiskfs compatible with ext[34].
175  * due to locking-originated restrictions we can't lookup ino
176  * using LU cache (deadlock is possible). lookup using OI is quite
177  * expensive. so instead we maintain this cache and methods like
178  * dt_create() fill it. so in the majority of cases dt_insert() is
179  * able to find needed mapping in lockless manner.
180  */
181 static struct osd_idmap_cache *
182 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
183              const struct lu_fid *fid)
184 {
185         struct osd_thread_info *oti = osd_oti_get(env);
186         struct osd_idmap_cache *idc = oti->oti_ins_cache;
187         int i;
188
189         for (i = 0; i < oti->oti_ins_cache_used; i++) {
190                 if (!lu_fid_eq(&idc[i].oic_fid, fid))
191                         continue;
192                 if (idc[i].oic_dev != osd)
193                         continue;
194
195                 return idc + i;
196         }
197
198         return NULL;
199 }
200
201 static struct osd_idmap_cache *
202 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
203             const struct lu_fid *fid)
204 {
205         struct osd_thread_info *oti   = osd_oti_get(env);
206         struct osd_idmap_cache *idc;
207         int i;
208
209         if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
210                 i = oti->oti_ins_cache_size * 2;
211                 if (i == 0)
212                         i = OSD_INS_CACHE_SIZE;
213                 OBD_ALLOC_PTR_ARRAY(idc, i);
214                 if (idc == NULL)
215                         return ERR_PTR(-ENOMEM);
216                 if (oti->oti_ins_cache != NULL) {
217                         memcpy(idc, oti->oti_ins_cache,
218                                oti->oti_ins_cache_used * sizeof(*idc));
219                         OBD_FREE_PTR_ARRAY(oti->oti_ins_cache,
220                                            oti->oti_ins_cache_used);
221                 }
222                 oti->oti_ins_cache = idc;
223                 oti->oti_ins_cache_size = i;
224         }
225
226         idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
227         idc->oic_fid = *fid;
228         idc->oic_dev = osd;
229         idc->oic_lid.oii_ino = 0;
230         idc->oic_lid.oii_gen = 0;
231         idc->oic_remote = 0;
232
233         return idc;
234 }
235
236 /*
237  * lookup mapping for the given fid in the cache, initialize a
238  * new one if not found. the initialization checks whether the
239  * object is local or remote. for local objects, OI is used to
240  * learn ino/generation. the function is used when the caller
241  * has no information about the object, e.g. at dt_insert().
242  */
243 static struct osd_idmap_cache *
244 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
245                      const struct lu_fid *fid)
246 {
247         struct osd_idmap_cache *idc;
248         int rc;
249
250         idc = osd_idc_find(env, osd, fid);
251         LASSERT(!IS_ERR(idc));
252         if (idc != NULL)
253                 return idc;
254
255         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
256                osd->od_svname, PFID(fid));
257
258         /* new mapping is needed */
259         idc = osd_idc_add(env, osd, fid);
260         if (IS_ERR(idc)) {
261                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
262                        osd->od_svname, PFID(fid), PTR_ERR(idc));
263                 return idc;
264         }
265
266         /* initialize it */
267         rc = osd_remote_fid(env, osd, fid);
268         if (unlikely(rc < 0))
269                 return ERR_PTR(rc);
270
271         if (rc == 0) {
272                 /* the object is local, lookup in OI */
273                 /* XXX: probably cheaper to lookup in LU first? */
274                 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
275                                    &idc->oic_lid, 0);
276                 if (unlikely(rc < 0)) {
277                         CERROR("can't lookup: rc = %d\n", rc);
278                         return ERR_PTR(rc);
279                 }
280         } else {
281                 /* the object is remote */
282                 idc->oic_remote = 1;
283         }
284
285         return idc;
286 }
287
288 static void osd_idc_dump_lma(const struct lu_env *env,
289                                 struct osd_device *osd,
290                                 unsigned long ino,
291                                 bool check_in_oi)
292 {
293         struct osd_thread_info *info = osd_oti_get(env);
294         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
295         const struct lu_fid *fid;
296         struct osd_inode_id lid;
297         struct inode *inode;
298         int rc;
299
300         inode = osd_ldiskfs_iget(osd_sb(osd), ino);
301         if (IS_ERR(inode)) {
302                 CERROR("%s: can't get inode %lu: rc = %d\n",
303                        osd->od_svname, ino, (int)PTR_ERR(inode));
304                 return;
305         }
306         if (is_bad_inode(inode)) {
307                 CERROR("%s: bad inode %lu\n", osd->od_svname, ino);
308                 goto put;
309         }
310         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
311         if (rc) {
312                 CERROR("%s: can't get LMA for %lu: rc = %d\n",
313                        osd->od_svname, ino, rc);
314                 goto put;
315         }
316         fid = &loa->loa_lma.lma_self_fid;
317         LCONSOLE(D_INFO, "%s: "DFID" in inode %lu/%u\n", osd->od_svname,
318                       PFID(fid), ino, (unsigned)inode->i_generation);
319         if (!check_in_oi)
320                 goto put;
321         rc = osd_oi_lookup(osd_oti_get(env), osd, fid, &lid, 0);
322         if (rc) {
323                 CERROR("%s: can't lookup "DFID": rc = %d\n",
324                        osd->od_svname, PFID(fid), rc);
325                 goto put;
326         }
327         LCONSOLE(D_INFO, "%s: "DFID" maps to %u/%u\n", osd->od_svname,
328                       PFID(fid), lid.oii_ino, lid.oii_gen);
329 put:
330         iput(inode);
331 }
332
333 static void osd_idc_dump_debug(const struct lu_env *env,
334                                 struct osd_device *osd,
335                                 const struct lu_fid *fid,
336                                 unsigned long ino1,
337                                 unsigned long ino2)
338 {
339         struct osd_inode_id lid;
340
341         int rc;
342
343         rc = osd_oi_lookup(osd_oti_get(env), osd, fid, &lid, 0);
344         if (!rc) {
345                 LCONSOLE(D_INFO, "%s: "DFID" maps to %u/%u\n",
346                         osd->od_svname, PFID(fid), lid.oii_ino, lid.oii_gen);
347                 osd_idc_dump_lma(env, osd, lid.oii_ino, false);
348         } else {
349                 CERROR("%s: can't lookup "DFID": rc = %d\n",
350                        osd->od_svname, PFID(fid), rc);
351         }
352         if (ino1)
353                 osd_idc_dump_lma(env, osd, ino1, true);
354         if (ino2)
355                 osd_idc_dump_lma(env, osd, ino2, true);
356 }
357
358 /*
359  * lookup mapping for given FID and fill it from the given object.
360  * the object is lolcal by definition.
361  */
362 static int osd_idc_find_and_init(const struct lu_env *env,
363                                  struct osd_device *osd,
364                                  struct osd_object *obj)
365 {
366         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
367         struct osd_idmap_cache *idc;
368
369         idc = osd_idc_find(env, osd, fid);
370         LASSERT(!IS_ERR(idc));
371         if (idc != NULL) {
372                 if (obj->oo_inode == NULL)
373                         return 0;
374                 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
375                         if (idc->oic_lid.oii_ino) {
376                                 osd_idc_dump_debug(env, osd, fid,
377                                                    idc->oic_lid.oii_ino,
378                                                    obj->oo_inode->i_ino);
379                                 return -EINVAL;
380                         }
381                         idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
382                         idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
383                 }
384                 return 0;
385         }
386
387         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
388                osd->od_svname, PFID(fid));
389
390         /* new mapping is needed */
391         idc = osd_idc_add(env, osd, fid);
392         if (IS_ERR(idc)) {
393                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
394                        osd->od_svname, PFID(fid), PTR_ERR(idc));
395                 return PTR_ERR(idc);
396         }
397
398         if (obj->oo_inode != NULL) {
399                 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
400                 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
401         }
402         return 0;
403 }
404
405 /*
406  * OSD object methods.
407  */
408
409 /*
410  * Concurrency: no concurrent access is possible that early in object
411  * life-cycle.
412  */
413 static struct lu_object *osd_object_alloc(const struct lu_env *env,
414                                           const struct lu_object_header *hdr,
415                                           struct lu_device *d)
416 {
417         struct osd_object *mo;
418
419         OBD_ALLOC_PTR(mo);
420         if (mo != NULL) {
421                 struct lu_object *l;
422                 struct lu_object_header *h;
423                 struct osd_device *o = osd_dev(d);
424
425                 l = &mo->oo_dt.do_lu;
426                 if (unlikely(o->od_in_init)) {
427                         OBD_ALLOC_PTR(h);
428                         if (!h) {
429                                 OBD_FREE_PTR(mo);
430                                 return NULL;
431                         }
432
433                         lu_object_header_init(h);
434                         lu_object_init(l, h, d);
435                         lu_object_add_top(h, l);
436                         mo->oo_header = h;
437                 } else {
438                         dt_object_init(&mo->oo_dt, NULL, d);
439                         mo->oo_header = NULL;
440                 }
441
442                 mo->oo_dt.do_ops = &osd_obj_ops;
443                 l->lo_ops = &osd_lu_obj_ops;
444                 init_rwsem(&mo->oo_sem);
445                 init_rwsem(&mo->oo_ext_idx_sem);
446                 spin_lock_init(&mo->oo_guard);
447                 INIT_LIST_HEAD(&mo->oo_xattr_list);
448                 return l;
449         }
450         return NULL;
451 }
452
453 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
454                 struct dentry *dentry, struct lustre_ost_attrs *loa)
455 {
456         int rc;
457
458         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
459                              (void *)loa, sizeof(*loa));
460         if (rc > 0) {
461                 struct lustre_mdt_attrs *lma = &loa->loa_lma;
462
463                 if (rc < sizeof(*lma))
464                         return -EINVAL;
465
466                 rc = 0;
467                 lustre_loa_swab(loa, true);
468                 /* Check LMA compatibility */
469                 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
470                         rc = -EOPNOTSUPP;
471                         CWARN("%s: unsupported incompat LMA feature(s) %#x for fid = "DFID", ino = %lu: rc = %d\n",
472                               osd_ino2name(inode),
473                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
474                               PFID(&lma->lma_self_fid), inode->i_ino, rc);
475                 }
476         } else if (rc == 0) {
477                 rc = -ENODATA;
478         }
479
480         return rc;
481 }
482
483 /*
484  * retrieve object from backend ext fs.
485  **/
486 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
487                        struct osd_inode_id *id)
488 {
489         int rc;
490         struct inode *inode = NULL;
491
492         /*
493          * if we look for an inode withing a running
494          * transaction, then we risk to deadlock
495          * osd_dirent_check_repair() breaks this
496          */
497          /* LASSERT(current->journal_info == NULL); */
498
499         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
500         if (IS_ERR(inode)) {
501                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
502                        id->oii_ino, PTR_ERR(inode));
503         } else if (id->oii_gen != OSD_OII_NOGEN &&
504                    inode->i_generation != id->oii_gen) {
505                 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
506                        "i_generation = %u\n",
507                        id->oii_ino, id->oii_gen, inode->i_generation);
508                 iput(inode);
509                 inode = ERR_PTR(-ESTALE);
510         } else if (inode->i_nlink == 0) {
511                 /*
512                  * due to parallel readdir and unlink,
513                  * we can have dead inode here.
514                  */
515                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
516                 iput(inode);
517                 inode = ERR_PTR(-ESTALE);
518         } else if (is_bad_inode(inode)) {
519                 rc = -ENOENT;
520                 CWARN("%s: bad inode: ino = %u: rc = %d\n",
521                       osd_dev2name(dev), id->oii_ino, rc);
522                 iput(inode);
523                 inode = ERR_PTR(rc);
524         } else if ((rc = osd_attach_jinode(inode))) {
525                 iput(inode);
526                 inode = ERR_PTR(rc);
527         } else {
528                 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
529                 if (id->oii_gen == OSD_OII_NOGEN)
530                         osd_id_gen(id, inode->i_ino, inode->i_generation);
531
532                 /*
533                  * Do not update file c/mtime in ldiskfs.
534                  * NB: we don't have any lock to protect this because we don't
535                  * have reference on osd_object now, but contention with
536                  * another lookup + attr_set can't happen in the tiny window
537                  * between if (...) and set S_NOCMTIME.
538                  */
539                 if (!(inode->i_flags & S_NOCMTIME))
540                         inode->i_flags |= S_NOCMTIME;
541         }
542         return inode;
543 }
544
545 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
546                           handle_t *handle, struct dentry *child,
547                           struct inode *inode, struct htree_lock *hlock)
548 {
549         int rc, rc2;
550
551         rc = __ldiskfs_add_entry(handle, child, inode, hlock);
552         if (rc == -ENOBUFS || rc == -ENOSPC) {
553                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
554                 struct inode *parent = child->d_parent->d_inode;
555                 struct lu_fid *fid = NULL;
556                 char fidstr[FID_LEN + 1] = "unknown";
557
558                 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
559                 if (!rc2) {
560                         fid = &loa->loa_lma.lma_self_fid;
561                 } else if (rc2 == -ENODATA) {
562                         if (unlikely(parent == inode->i_sb->s_root->d_inode)) {
563                                 fid = &info->oti_fid3;
564                                 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
565                         } else if (!osd->od_is_ost && osd->od_index == 0) {
566                                 fid = &info->oti_fid3;
567                                 lu_igif_build(fid, parent->i_ino,
568                                               parent->i_generation);
569                         }
570                 }
571
572                 if (fid != NULL)
573                         snprintf(fidstr, sizeof(fidstr), DFID, PFID(fid));
574
575                 /* below message is checked in sanity.sh test_129 */
576                 if (rc == -ENOSPC) {
577                         CWARN("%s: directory (inode: %lu, FID: %s) has reached max size limit\n",
578                               osd_name(osd), parent->i_ino, fidstr);
579                 } else {
580                         rc = 0; /* ignore such error now */
581                         CWARN("%s: directory (inode: %lu, FID: %s) is approaching max size limit\n",
582                               osd_name(osd), parent->i_ino, fidstr);
583                 }
584
585         }
586
587         return rc;
588 }
589
590
591 struct inode *
592 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
593              struct osd_inode_id *id, struct lu_fid *fid)
594 {
595         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
596         struct inode *inode;
597         int rc;
598
599         inode = osd_iget(info, dev, id);
600         if (IS_ERR(inode))
601                 return inode;
602
603         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
604         if (!rc) {
605                 *fid = loa->loa_lma.lma_self_fid;
606         } else if (rc == -ENODATA) {
607                 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
608                         lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
609                 else
610                         lu_igif_build(fid, inode->i_ino, inode->i_generation);
611         } else {
612                 iput(inode);
613                 inode = ERR_PTR(rc);
614         }
615         return inode;
616 }
617
618 static struct inode *osd_iget_check(struct osd_thread_info *info,
619                                     struct osd_device *dev,
620                                     const struct lu_fid *fid,
621                                     struct osd_inode_id *id,
622                                     bool trusted)
623 {
624         struct inode *inode;
625         int rc = 0;
626
627         ENTRY;
628
629         /*
630          * The cached OI mapping is trustable. If we cannot locate the inode
631          * via the cached OI mapping, then return the failure to the caller
632          * directly without further OI checking.
633          */
634
635 again:
636         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
637         if (IS_ERR(inode)) {
638                 rc = PTR_ERR(inode);
639                 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
640                         goto check_oi;
641
642                 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
643                        PFID(fid), id->oii_ino, rc);
644                 GOTO(put, rc);
645         }
646
647         if (is_bad_inode(inode)) {
648                 rc = -ENOENT;
649                 if (!trusted)
650                         goto check_oi;
651
652                 CDEBUG(D_INODE, "bad inode for FID: "DFID", ino = %u\n",
653                        PFID(fid), id->oii_ino);
654                 GOTO(put, rc);
655         }
656
657         if (id->oii_gen != OSD_OII_NOGEN &&
658             inode->i_generation != id->oii_gen) {
659                 rc = -ESTALE;
660                 if (!trusted)
661                         goto check_oi;
662
663                 CDEBUG(D_INODE, "unmatched inode for FID: "DFID", ino = %u, "
664                        "oii_gen = %u, i_generation = %u\n", PFID(fid),
665                        id->oii_ino, id->oii_gen, inode->i_generation);
666                 GOTO(put, rc);
667         }
668
669         if (inode->i_nlink == 0) {
670                 rc = -ENOENT;
671                 if (!trusted)
672                         goto check_oi;
673
674                 CDEBUG(D_INODE, "stale inode for FID: "DFID", ino = %u\n",
675                        PFID(fid), id->oii_ino);
676                 GOTO(put, rc);
677         }
678
679         ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
680
681 check_oi:
682         if (rc != 0) {
683                 __u32 saved_ino = id->oii_ino;
684                 __u32 saved_gen = id->oii_gen;
685
686                 LASSERT(!trusted);
687                 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
688
689                 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
690                 /*
691                  * XXX: There are four possible cases:
692                  *      1. rc = 0.
693                  *         Backup/restore caused the OI invalid.
694                  *      2. rc = 0.
695                  *         Someone unlinked the object but NOT removed
696                  *         the OI mapping, such as mount target device
697                  *         as ldiskfs, and modify something directly.
698                  *      3. rc = -ENOENT.
699                  *         Someone just removed the object between the
700                  *         former oi_lookup and the iget. It is normal.
701                  *      4. Other failure cases.
702                  *
703                  *      Generally, when the device is mounted, it will
704                  *      auto check whether the system is restored from
705                  *      file-level backup or not. We trust such detect
706                  *      to distinguish the 1st case from the 2nd case:
707                  *      if the OI files are consistent but may contain
708                  *      stale OI mappings because of case 2, if iget()
709                  *      returns -ENOENT or -ESTALE, then it should be
710                  *      the case 2.
711                  */
712                 if (rc != 0)
713                         /*
714                          * If the OI mapping was in OI file before the
715                          * osd_iget_check(), but now, it is disappear,
716                          * then it must be removed by race. That is a
717                          * normal race case.
718                          */
719                         GOTO(put, rc);
720
721                 /*
722                  * It is the OI scrub updated the OI mapping by race.
723                  * The new OI mapping must be valid.
724                  */
725                 if (saved_ino != id->oii_ino ||
726                     (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
727                         if (!IS_ERR(inode))
728                                 iput(inode);
729
730                         trusted = true;
731                         goto again;
732                 }
733
734                 if (IS_ERR(inode)) {
735                         if (dev->od_scrub.os_scrub.os_file.sf_flags &
736                             SF_INCONSISTENT)
737                                 /*
738                                  * It still can be the case 2, but we cannot
739                                  * distinguish it from the case 1. So return
740                                  * -EREMCHG to block current operation until
741                                  *  OI scrub rebuilt the OI mappings.
742                                  */
743                                 rc = -EREMCHG;
744                         else
745                                 rc = -ENOENT;
746
747                         GOTO(put, rc);
748                 }
749
750                 if (inode->i_generation == id->oii_gen)
751                         rc = -ENOENT;
752                 else
753                         rc = -EREMCHG;
754         } else {
755                 if (id->oii_gen == OSD_OII_NOGEN)
756                         osd_id_gen(id, inode->i_ino, inode->i_generation);
757
758                 /*
759                  * Do not update file c/mtime in ldiskfs.
760                  * NB: we don't have any lock to protect this because we don't
761                  * have reference on osd_object now, but contention with
762                  * another lookup + attr_set can't happen in the tiny window
763                  * between if (...) and set S_NOCMTIME.
764                  */
765                 if (!(inode->i_flags & S_NOCMTIME))
766                         inode->i_flags |= S_NOCMTIME;
767         }
768
769         GOTO(put, rc);
770
771 put:
772         if (rc != 0) {
773                 if (!IS_ERR(inode))
774                         iput(inode);
775
776                 inode = ERR_PTR(rc);
777         }
778
779         return inode;
780 }
781
782 /**
783  * \retval +v: new filter_fid does not contain self-fid
784  * \retval 0:  filter_fid_18_23, contains self-fid
785  * \retval -v: other failure cases
786  */
787 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
788                  struct dentry *dentry, struct lu_fid *fid)
789 {
790         struct filter_fid *ff = &info->oti_ff;
791         struct ost_id *ostid = &info->oti_ostid;
792         int rc;
793
794         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
795         if (rc == sizeof(struct filter_fid_18_23)) {
796                 struct filter_fid_18_23 *ff_old = (void *)ff;
797
798                 ostid_set_seq(ostid, le64_to_cpu(ff_old->ff_seq));
799                 rc = ostid_set_id(ostid, le64_to_cpu(ff_old->ff_objid));
800                 /*
801                  * XXX: use 0 as the index for compatibility, the caller will
802                  * handle index related issues when necessary.
803                  */
804                 if (!rc)
805                         ostid_to_fid(fid, ostid, 0);
806         } else if (rc >= (int)sizeof(struct filter_fid_24_29)) {
807                 rc = 1;
808         } else if (rc >= 0) {
809                 rc = -EINVAL;
810         }
811
812         return rc;
813 }
814
815 static int osd_lma_self_repair(struct osd_thread_info *info,
816                                struct osd_device *osd, struct inode *inode,
817                                const struct lu_fid *fid, __u32 compat)
818 {
819         handle_t *jh;
820         int rc;
821
822         LASSERT(current->journal_info == NULL);
823
824         jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
825                                   osd_dto_credits_noquota[DTO_XATTR_SET]);
826         if (IS_ERR(jh)) {
827                 rc = PTR_ERR(jh);
828                 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
829                       osd_name(osd), rc);
830                 return rc;
831         }
832
833         rc = osd_ea_fid_set(info, inode, fid, compat, 0);
834         if (rc != 0)
835                 CWARN("%s: cannot self repair the LMA: rc = %d\n",
836                       osd_name(osd), rc);
837         ldiskfs_journal_stop(jh);
838         return rc;
839 }
840
841 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
842 {
843         struct osd_thread_info *info = osd_oti_get(env);
844         struct osd_device *osd = osd_obj2dev(obj);
845         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
846         struct lustre_mdt_attrs *lma = &loa->loa_lma;
847         struct inode *inode = obj->oo_inode;
848         struct dentry *dentry = &info->oti_obj_dentry;
849         struct lu_fid *fid = NULL;
850         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
851         int rc;
852
853         ENTRY;
854
855         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
856                              (void *)loa, sizeof(*loa));
857         if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
858                 fid = &lma->lma_self_fid;
859                 rc = osd_get_idif(info, inode, dentry, fid);
860                 if (rc > 0 || (rc == -ENODATA && osd->od_index_in_idif)) {
861                         /*
862                          * For the given OST-object, if it has neither LMA nor
863                          * FID in XATTR_NAME_FID, then the given FID (which is
864                          * contained in the @obj, from client RPC for locating
865                          * the OST-object) is trusted. We use it to generate
866                          * the LMA.
867                          */
868                         osd_lma_self_repair(info, osd, inode, rfid,
869                                             LMAC_FID_ON_OST);
870                         RETURN(0);
871                 }
872         }
873
874         if (rc < 0)
875                 RETURN(rc);
876
877         if (rc > 0) {
878                 rc = 0;
879                 lustre_lma_swab(lma);
880                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
881                              (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
882                               S_ISREG(inode->i_mode)))) {
883                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
884                               "fid = "DFID", ino = %lu\n", osd_name(osd),
885                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
886                               PFID(rfid), inode->i_ino);
887                         rc = -EOPNOTSUPP;
888                 } else {
889                         fid = &lma->lma_self_fid;
890                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
891                             osd->od_is_ost)
892                                 obj->oo_pfid_in_lma = 1;
893                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
894                             !osd->od_is_ost)
895                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
896                 }
897         }
898
899         if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
900                 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
901                         struct ost_id   *oi   = &info->oti_ostid;
902                         struct lu_fid   *fid1 = &info->oti_fid3;
903                         __u32            idx  = fid_idif_ost_idx(rfid);
904
905                         /*
906                          * For old IDIF, the OST index is not part of the IDIF,
907                          * Means that different OSTs may have the same IDIFs.
908                          * Under such case, we need to make some compatible
909                          * check to make sure to trigger OI scrub properly.
910                          */
911                         if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
912                                 /* Given @rfid is new, LMA is old. */
913                                 fid_to_ostid(fid, oi);
914                                 ostid_to_fid(fid1, oi, idx);
915                                 if (lu_fid_eq(fid1, rfid)) {
916                                         if (osd->od_index_in_idif)
917                                                 osd_lma_self_repair(info, osd,
918                                                         inode, rfid,
919                                                         LMAC_FID_ON_OST);
920                                         RETURN(0);
921                                 }
922                         }
923                 }
924
925                 rc = -EREMCHG;
926         }
927
928         RETURN(rc);
929 }
930
931 struct osd_check_lmv_buf {
932         /* please keep it as first member */
933         struct dir_context ctx;
934         struct osd_thread_info *oclb_info;
935         struct osd_device *oclb_dev;
936         struct osd_idmap_cache *oclb_oic;
937         int oclb_items;
938         bool oclb_found;
939 };
940
941 /**
942  * It is called internally by ->iterate*() to filter out the
943  * local slave object's FID of the striped directory.
944  *
945  * \retval      1 found the local slave's FID
946  * \retval      0 continue to check next item
947  * \retval      -ve for failure
948  */
949 #ifdef HAVE_FILLDIR_USE_CTX
950 static int osd_stripe_dir_filldir(struct dir_context *buf,
951 #else
952 static int osd_stripe_dir_filldir(void *buf,
953 #endif
954                                   const char *name, int namelen,
955                                   loff_t offset, __u64 ino, unsigned int d_type)
956 {
957         struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
958         struct osd_thread_info *oti = oclb->oclb_info;
959         struct lu_fid *fid = &oti->oti_fid3;
960         struct osd_inode_id *id = &oti->oti_id3;
961         struct osd_device *dev = oclb->oclb_dev;
962         struct osd_idmap_cache *oic = oclb->oclb_oic;
963         struct inode *inode;
964
965         oclb->oclb_items++;
966
967         if (name[0] == '.')
968                 return 0;
969
970         fid_zero(fid);
971         sscanf(name + 1, SFID, RFID(fid));
972         if (!fid_is_sane(fid))
973                 return 0;
974
975         if (osd_remote_fid(oti->oti_env, dev, fid))
976                 return 0;
977
978         osd_id_gen(id, ino, OSD_OII_NOGEN);
979         inode = osd_iget(oti, dev, id);
980         if (IS_ERR(inode))
981                 return PTR_ERR(inode);
982
983         iput(inode);
984         osd_add_oi_cache(oti, dev, id, fid);
985         oic->oic_fid = *fid;
986         oic->oic_lid = *id;
987         oic->oic_dev = dev;
988         osd_oii_insert(dev, oic, true);
989         oclb->oclb_found = true;
990
991         return 1;
992 }
993
994 /*
995  * When lookup item under striped directory, we need to locate the master
996  * MDT-object of the striped directory firstly, then the client will send
997  * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
998  * and the item's name. If the system is restored from MDT file level backup,
999  * then before the OI scrub completely built the OI files, the OI mappings of
1000  * the master MDT-object and slave MDT-object may be invalid. Usually, it is
1001  * not a problem for the master MDT-object. Because when locate the master
1002  * MDT-object, we will do name based lookup (for the striped directory itself)
1003  * firstly, during such process we can setup the correct OI mapping for the
1004  * master MDT-object. But it will be trouble for the slave MDT-object. Because
1005  * the client will not trigger name based lookup on the MDT to locate the slave
1006  * MDT-object before locating item under the striped directory, then when
1007  * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
1008  * is invalid and does not know what the right OI mapping is, then the MDT has
1009  * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
1010  * the OI file, related OI mapping is unknown yet, please try again later. And
1011  * then client will re-try the RPC again and again until related OI mapping has
1012  * been updated. That is quite inefficient.
1013  *
1014  * To resolve above trouble, we will handle it as the following two cases:
1015  *
1016  * 1) The slave MDT-object and the master MDT-object are on different MDTs.
1017  *    It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
1018  *    is linked under /REMOTE_PARENT_DIR with the name of its FID string.
1019  *    We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
1020  *    directly. Please check osd_fid_lookup().
1021  *
1022  * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
1023  *    Under such case, during lookup the master MDT-object, we will lookup the
1024  *    slave MDT-object via readdir against the master MDT-object, because the
1025  *    slave MDT-objects information are stored as sub-directories with the name
1026  *    "${FID}:${index}". Then when find the local slave MDT-object, its OI
1027  *    mapping will be recorded. Then subsequent osd_fid_lookup() will know
1028  *    the correct OI mapping for the slave MDT-object.
1029  */
1030 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
1031                          struct inode *inode, struct osd_idmap_cache *oic)
1032 {
1033         struct lu_buf *buf = &oti->oti_big_buf;
1034         struct dentry *dentry = &oti->oti_obj_dentry;
1035         struct file *filp;
1036         struct lmv_mds_md_v1 *lmv1;
1037         struct osd_check_lmv_buf oclb = {
1038                 .ctx.actor = osd_stripe_dir_filldir,
1039                 .oclb_info = oti,
1040                 .oclb_dev = dev,
1041                 .oclb_oic = oic,
1042                 .oclb_found = false,
1043         };
1044         int rc = 0;
1045
1046         ENTRY;
1047
1048 again:
1049         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, buf->lb_buf,
1050                              buf->lb_len);
1051         if (rc == -ERANGE) {
1052                 rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMV, NULL, 0);
1053                 if (rc > 0) {
1054                         lu_buf_realloc(buf, rc);
1055                         if (buf->lb_buf == NULL)
1056                                 GOTO(out, rc = -ENOMEM);
1057
1058                         goto again;
1059                 }
1060         }
1061
1062         if (unlikely(rc == 0 || rc == -ENODATA))
1063                 GOTO(out, rc = 0);
1064
1065         if (rc < 0)
1066                 GOTO(out, rc);
1067
1068         if (unlikely(buf->lb_buf == NULL)) {
1069                 lu_buf_realloc(buf, rc);
1070                 if (buf->lb_buf == NULL)
1071                         GOTO(out, rc = -ENOMEM);
1072
1073                 goto again;
1074         }
1075
1076         lmv1 = buf->lb_buf;
1077         if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
1078                 GOTO(out, rc = 0);
1079
1080         filp = osd_quasi_file(oti->oti_env, inode);
1081         rc = osd_security_file_alloc(filp);
1082         if (rc)
1083                 goto out;
1084
1085         do {
1086                 oclb.oclb_items = 0;
1087                 rc = iterate_dir(filp, &oclb.ctx);
1088         } while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
1089                  filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
1090         inode->i_fop->release(inode, filp);
1091
1092 out:
1093         if (rc < 0)
1094                 CDEBUG(D_LFSCK,
1095                        "%s: cannot check LMV, ino = %lu/%u "DFID": rc = %d\n",
1096                        osd_ino2name(inode), inode->i_ino, inode->i_generation,
1097                        PFID(&oic->oic_fid), rc);
1098         else
1099                 rc = 0;
1100
1101         RETURN(rc);
1102 }
1103
1104 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
1105                           const struct lu_fid *fid,
1106                           const struct lu_object_conf *conf)
1107 {
1108         struct osd_thread_info *info;
1109         struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1110         struct osd_device *dev;
1111         struct osd_idmap_cache *oic;
1112         struct osd_inode_id *id;
1113         struct inode *inode = NULL;
1114         struct lustre_scrub *scrub;
1115         struct scrub_file *sf;
1116         __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
1117         __u32 saved_ino;
1118         __u32 saved_gen;
1119         int result = 0;
1120         int rc1 = 0;
1121         bool remote = false;
1122         bool trusted = true;
1123         bool updated = false;
1124         bool checked = false;
1125
1126         ENTRY;
1127
1128         LINVRNT(osd_invariant(obj));
1129         LASSERT(obj->oo_inode == NULL);
1130         LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID"\n", PFID(fid));
1131
1132         dev = osd_dev(ldev);
1133         scrub = &dev->od_scrub.os_scrub;
1134         sf = &scrub->os_file;
1135         info = osd_oti_get(env);
1136         LASSERT(info);
1137         oic = &info->oti_cache;
1138
1139         if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1140                 RETURN(-ENOENT);
1141
1142         /*
1143          * For the object is created as locking anchor, or for the object to
1144          * be created on disk. No need to osd_oi_lookup() at here because FID
1145          * shouldn't never be re-used, if it's really a duplicate FID from
1146          * unexpected reason, we should be able to detect it later by calling
1147          * do_create->osd_oi_insert().
1148          */
1149         if (conf && conf->loc_flags & LOC_F_NEW)
1150                 GOTO(out, result = 0);
1151
1152         /* Search order: 1. per-thread cache. */
1153         if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
1154                 id = &oic->oic_lid;
1155                 goto iget;
1156         }
1157
1158         id = &info->oti_id;
1159         if (!list_empty(&scrub->os_inconsistent_items)) {
1160                 /* Search order: 2. OI scrub pending list. */
1161                 result = osd_oii_lookup(dev, fid, id);
1162                 if (!result)
1163                         goto iget;
1164         }
1165
1166         /*
1167          * The OI mapping in the OI file can be updated by the OI scrub
1168          * when we locate the inode via FID. So it may be not trustable.
1169          */
1170         trusted = false;
1171
1172         /* Search order: 3. OI files. */
1173         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1174         if (result == -ENOENT) {
1175                 if (!(fid_is_norm(fid) || fid_is_igif(fid)) ||
1176                     fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1177                     !ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
1178                                       sf->sf_oi_bitmap))
1179                         GOTO(out, result = 0);
1180
1181                 goto trigger;
1182         }
1183
1184         /* -ESTALE is returned if inode of OST object doesn't exist */
1185         if (result == -ESTALE &&
1186             fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1187                 GOTO(out, result = 0);
1188         }
1189
1190         if (result)
1191                 GOTO(out, result);
1192
1193 iget:
1194         obj->oo_inode = NULL;
1195         /* for later passes through checks, not true on first pass */
1196         if (!IS_ERR_OR_NULL(inode))
1197                 iput(inode);
1198
1199         inode = osd_iget_check(info, dev, fid, id, trusted);
1200         if (!IS_ERR(inode)) {
1201                 obj->oo_inode = inode;
1202                 result = 0;
1203                 if (remote)
1204                         goto trigger;
1205
1206                 goto check_lma;
1207         }
1208
1209         result = PTR_ERR(inode);
1210         if (result == -ENOENT || result == -ESTALE)
1211                 GOTO(out, result = 0);
1212
1213         if (result != -EREMCHG)
1214                 GOTO(out, result);
1215
1216 trigger:
1217         /*
1218          * We still have chance to get the valid inode: for the
1219          * object which is referenced by remote name entry, the
1220          * object on the local MDT will be linked under the dir
1221          * of "/REMOTE_PARENT_DIR" with its FID string as name.
1222          *
1223          * We do not know whether the object for the given FID
1224          * is referenced by some remote name entry or not, and
1225          * especially for DNE II, a multiple-linked object may
1226          * have many name entries reside on many MDTs.
1227          *
1228          * To simplify the operation, OSD will not distinguish
1229          * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1230          * only happened for the RPC from other MDT during the
1231          * OI scrub, or for the client side RPC with FID only,
1232          * such as FID to path, or from old connected client.
1233          */
1234         if (!remote) {
1235                 rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
1236                 if (!rc1) {
1237                         remote = true;
1238                         trusted = true;
1239                         flags |= SS_AUTO_PARTIAL;
1240                         flags &= ~SS_AUTO_FULL;
1241                         goto iget;
1242                 }
1243         }
1244
1245         if (scrub->os_running) {
1246                 if (scrub->os_partial_scan && !scrub->os_in_join)
1247                         goto join;
1248
1249                 osd_add_oi_cache(info, dev, id, fid);
1250                 if (IS_ERR_OR_NULL(inode) || result) {
1251                         osd_oii_insert(dev, oic, result == -ENOENT);
1252                         GOTO(out, result = -EINPROGRESS);
1253                 }
1254
1255                 LASSERT(remote);
1256                 LASSERT(obj->oo_inode == inode);
1257
1258                 osd_oii_insert(dev, oic, true);
1259                 goto found;
1260         }
1261
1262         if (dev->od_auto_scrub_interval == AS_NEVER) {
1263                 if (!remote)
1264                         GOTO(out, result = -EREMCHG);
1265
1266                 LASSERT(!result);
1267                 LASSERT(obj->oo_inode == inode);
1268
1269                 osd_add_oi_cache(info, dev, id, fid);
1270                 goto found;
1271         }
1272
1273 join:
1274         rc1 = osd_scrub_start(env, dev, flags);
1275         LCONSOLE_WARN("%s: trigger OI scrub by RPC for the " DFID" with flags "
1276                       "0x%x, rc = %d\n", osd_name(dev), PFID(fid), flags, rc1);
1277         if (rc1 && rc1 != -EALREADY)
1278                 GOTO(out, result = -EREMCHG);
1279
1280         osd_add_oi_cache(info, dev, id, fid);
1281         if (IS_ERR_OR_NULL(inode) || result) {
1282                 osd_oii_insert(dev, oic, result == -ENOENT);
1283                 GOTO(out, result = -EINPROGRESS);
1284         }
1285
1286         LASSERT(remote);
1287         LASSERT(obj->oo_inode == inode);
1288
1289         osd_oii_insert(dev, oic, true);
1290         goto found;
1291
1292 check_lma:
1293         checked = true;
1294         if (unlikely(obj->oo_header))
1295                 goto found;
1296
1297         result = osd_check_lma(env, obj);
1298         if (!result)
1299                 goto found;
1300
1301         LASSERTF(id->oii_ino == inode->i_ino &&
1302                  id->oii_gen == inode->i_generation,
1303                  "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1304                  PFID(fid), id->oii_ino, id->oii_gen,
1305                  inode->i_ino, inode->i_generation);
1306
1307         saved_ino = inode->i_ino;
1308         saved_gen = inode->i_generation;
1309
1310         if (unlikely(result == -ENODATA)) {
1311                 /*
1312                  * If the OI scrub updated the OI mapping by race, it
1313                  * must be valid. Trust the inode that has no LMA EA.
1314                  */
1315                 if (updated)
1316                         goto found;
1317
1318                 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1319                 if (!result) {
1320                         /*
1321                          * The OI mapping is still there, the inode is still
1322                          * valid. It is just becaues the inode has no LMA EA.
1323                          */
1324                         if (saved_ino == id->oii_ino &&
1325                             saved_gen == id->oii_gen)
1326                                 goto found;
1327
1328                         /*
1329                          * It is the OI scrub updated the OI mapping by race.
1330                          * The new OI mapping must be valid.
1331                          */
1332                         trusted = true;
1333                         updated = true;
1334                         goto iget;
1335                 }
1336
1337                 /*
1338                  * "result == -ENOENT" means that the OI mappinghas been
1339                  * removed by race, so the inode belongs to other object.
1340                  *
1341                  * Others error can be returned  directly.
1342                  */
1343                 if (result == -ENOENT) {
1344                         LASSERT(trusted);
1345
1346                         obj->oo_inode = NULL;
1347                         result = 0;
1348                 }
1349         }
1350
1351         if (result != -EREMCHG)
1352                 GOTO(out, result);
1353
1354         LASSERT(!updated);
1355
1356         /*
1357          * if two OST objects map to the same inode, and inode mode is
1358          * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
1359          * reserved by precreate, and not written yet, in this case, don't
1360          * set inode for the object whose FID mismatch, so that it can create
1361          * inode and not block precreate.
1362          */
1363         if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
1364             inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
1365                 obj->oo_inode = NULL;
1366                 GOTO(out, result = 0);
1367         }
1368
1369         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1370         /*
1371          * "result == -ENOENT" means the cached OI mapping has been removed
1372          * from the OI file by race, above inode belongs to other object.
1373          */
1374         if (result == -ENOENT) {
1375                 LASSERT(trusted);
1376
1377                 obj->oo_inode = NULL;
1378                 GOTO(out, result = 0);
1379         }
1380
1381         if (result)
1382                 GOTO(out, result);
1383
1384         if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
1385                 result = -EREMCHG;
1386                 goto trigger;
1387         }
1388
1389         /*
1390          * It is the OI scrub updated the OI mapping by race.
1391          * The new OI mapping must be valid.
1392          */
1393         trusted = true;
1394         updated = true;
1395         goto iget;
1396
1397 found:
1398         if (!checked) {
1399                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
1400                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
1401
1402                 result = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
1403                 if (!result) {
1404                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
1405                             dev->od_is_ost)
1406                                 obj->oo_pfid_in_lma = 1;
1407                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
1408                             !dev->od_is_ost)
1409                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
1410                 } else if (result != -ENODATA) {
1411                         GOTO(out, result);
1412                 }
1413         }
1414
1415         obj->oo_compat_dot_created = 1;
1416         obj->oo_compat_dotdot_created = 1;
1417
1418         if (S_ISDIR(inode->i_mode) &&
1419             (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1420                 osd_check_lmv(info, dev, inode, oic);
1421
1422         result = osd_attach_jinode(inode);
1423         if (result)
1424                 GOTO(out, result);
1425
1426         if (!ldiskfs_pdo)
1427                 GOTO(out, result = 0);
1428
1429         LASSERT(!obj->oo_hl_head);
1430         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1431
1432         GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
1433
1434 out:
1435         if (result || !obj->oo_inode) {
1436                 if (!IS_ERR_OR_NULL(inode))
1437                         iput(inode);
1438
1439                 obj->oo_inode = NULL;
1440                 if (trusted)
1441                         fid_zero(&oic->oic_fid);
1442         }
1443
1444         LINVRNT(osd_invariant(obj));
1445         return result;
1446 }
1447
1448 /*
1449  * Concurrency: shouldn't matter.
1450  */
1451 static void osd_object_init0(struct osd_object *obj)
1452 {
1453         LASSERT(obj->oo_inode != NULL);
1454         obj->oo_dt.do_body_ops = &osd_body_ops;
1455         obj->oo_dt.do_lu.lo_header->loh_attr |=
1456                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1457 }
1458
1459 /*
1460  * Concurrency: no concurrent access is possible that early in object
1461  * life-cycle.
1462  */
1463 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1464                            const struct lu_object_conf *conf)
1465 {
1466         struct osd_object *obj = osd_obj(l);
1467         int result;
1468
1469         LINVRNT(osd_invariant(obj));
1470
1471         if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_LLOG_UMOUNT_RACE) &&
1472             cfs_fail_val == 2) {
1473                 struct osd_thread_info *info = osd_oti_get(env);
1474                 struct osd_idmap_cache *oic = &info->oti_cache;
1475                 /* invalidate thread cache */
1476                 memset(&oic->oic_fid, 0, sizeof(oic->oic_fid));
1477         }
1478         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1479                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1480                 l->lo_header->loh_attr |= LOHA_EXISTS;
1481                 return 0;
1482         }
1483
1484         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1485         obj->oo_dt.do_body_ops = &osd_body_ops_new;
1486         if (result == 0 && obj->oo_inode != NULL) {
1487                 struct osd_thread_info *oti = osd_oti_get(env);
1488                 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1489
1490                 osd_object_init0(obj);
1491                 if (unlikely(obj->oo_header))
1492                         return 0;
1493
1494                 result = osd_get_lma(oti, obj->oo_inode,
1495                                      &oti->oti_obj_dentry, loa);
1496                 if (!result) {
1497                         /*
1498                          * Convert LMAI flags to lustre LMA flags
1499                          * and cache it to oo_lma_flags
1500                          */
1501                         obj->oo_lma_flags =
1502                                 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1503                 } else if (result == -ENODATA) {
1504                         result = 0;
1505                 }
1506         }
1507         obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
1508
1509         LINVRNT(osd_invariant(obj));
1510         return result;
1511 }
1512
1513 /*
1514  * The first part of oxe_buf is xattr name, and is '\0' terminated.
1515  * The left part is for value, binary mode.
1516  */
1517 struct osd_xattr_entry {
1518         struct list_head        oxe_list;
1519         size_t                  oxe_len;
1520         size_t                  oxe_namelen;
1521         bool                    oxe_exist;
1522         struct rcu_head         oxe_rcu;
1523         char                    oxe_buf[0];
1524 };
1525
1526 static int osd_oxc_get(struct osd_object *obj, const char *name,
1527                        struct lu_buf *buf)
1528 {
1529         struct osd_xattr_entry *tmp;
1530         struct osd_xattr_entry *oxe = NULL;
1531         size_t namelen = strlen(name);
1532         int rc;
1533
1534         rcu_read_lock();
1535         list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1536                 if (namelen == tmp->oxe_namelen &&
1537                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1538                         oxe = tmp;
1539                         break;
1540                 }
1541         }
1542
1543         if (oxe == NULL)
1544                 GOTO(out, rc = -ENOENT);
1545
1546         if (!oxe->oxe_exist)
1547                 GOTO(out, rc = -ENODATA);
1548
1549         /* vallen */
1550         rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1551         LASSERT(rc > 0);
1552
1553         if (buf->lb_buf == NULL)
1554                 GOTO(out, rc);
1555
1556         if (buf->lb_len < rc)
1557                 GOTO(out, rc = -ERANGE);
1558
1559         memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1560 out:
1561         rcu_read_unlock();
1562
1563         return rc;
1564 }
1565
1566 static void osd_oxc_free(struct rcu_head *head)
1567 {
1568         struct osd_xattr_entry *oxe;
1569
1570         oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1571         OBD_FREE(oxe, oxe->oxe_len);
1572 }
1573
1574 static void osd_oxc_add(struct osd_object *obj, const char *name,
1575                         const char *buf, int buflen)
1576 {
1577         struct osd_xattr_entry *oxe;
1578         struct osd_xattr_entry *old = NULL;
1579         struct osd_xattr_entry *tmp;
1580         size_t namelen = strlen(name);
1581         size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1582
1583         OBD_ALLOC(oxe, len);
1584         if (oxe == NULL)
1585                 return;
1586
1587         INIT_LIST_HEAD(&oxe->oxe_list);
1588         oxe->oxe_len = len;
1589         oxe->oxe_namelen = namelen;
1590         memcpy(oxe->oxe_buf, name, namelen);
1591         if (buflen > 0) {
1592                 LASSERT(buf != NULL);
1593                 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1594                 oxe->oxe_exist = true;
1595         } else {
1596                 oxe->oxe_exist = false;
1597         }
1598
1599         /* this should be rarely called, just remove old and add new */
1600         spin_lock(&obj->oo_guard);
1601         list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1602                 if (namelen == tmp->oxe_namelen &&
1603                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1604                         old = tmp;
1605                         break;
1606                 }
1607         }
1608         if (old != NULL) {
1609                 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1610                 call_rcu(&old->oxe_rcu, osd_oxc_free);
1611         } else {
1612                 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1613         }
1614         spin_unlock(&obj->oo_guard);
1615 }
1616
1617 static void osd_oxc_del(struct osd_object *obj, const char *name)
1618 {
1619         struct osd_xattr_entry *oxe;
1620         size_t namelen = strlen(name);
1621
1622         spin_lock(&obj->oo_guard);
1623         list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1624                 if (namelen == oxe->oxe_namelen &&
1625                     strncmp(name, oxe->oxe_buf, namelen) == 0) {
1626                         list_del_rcu(&oxe->oxe_list);
1627                         call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1628                         break;
1629                 }
1630         }
1631         spin_unlock(&obj->oo_guard);
1632 }
1633
1634 static void osd_oxc_fini(struct osd_object *obj)
1635 {
1636         struct osd_xattr_entry *oxe, *next;
1637
1638         list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1639                 list_del(&oxe->oxe_list);
1640                 OBD_FREE(oxe, oxe->oxe_len);
1641         }
1642 }
1643
1644 /*
1645  * Concurrency: no concurrent access is possible that late in object
1646  * life-cycle.
1647  */
1648 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1649 {
1650         struct osd_object *obj = osd_obj(l);
1651         struct lu_object_header *h = obj->oo_header;
1652
1653         LINVRNT(osd_invariant(obj));
1654
1655         osd_oxc_fini(obj);
1656         dt_object_fini(&obj->oo_dt);
1657         if (obj->oo_hl_head != NULL)
1658                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1659         /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
1660         OBD_FREE_PTR(obj);
1661         if (unlikely(h))
1662                 lu_object_header_free(h);
1663 }
1664
1665 /*
1666  * Concurrency: no concurrent access is possible that late in object
1667  * life-cycle.
1668  */
1669 static void osd_index_fini(struct osd_object *o)
1670 {
1671         struct iam_container *bag;
1672
1673         if (o->oo_dir != NULL) {
1674                 bag = &o->oo_dir->od_container;
1675                 if (o->oo_inode != NULL) {
1676                         if (bag->ic_object == o->oo_inode)
1677                                 iam_container_fini(bag);
1678                 }
1679                 OBD_FREE_PTR(o->oo_dir);
1680                 o->oo_dir = NULL;
1681         }
1682 }
1683
1684 enum {
1685         OSD_TXN_OI_DELETE_CREDITS    = 20,
1686         OSD_TXN_INODE_DELETE_CREDITS = 20
1687 };
1688
1689 /*
1690  * Journal
1691  */
1692
1693 #if OSD_THANDLE_STATS
1694 /**
1695  * Set time when the handle is allocated
1696  */
1697 static void osd_th_alloced(struct osd_thandle *oth)
1698 {
1699         oth->oth_alloced = ktime_get();
1700 }
1701
1702 /**
1703  * Set time when the handle started
1704  */
1705 static void osd_th_started(struct osd_thandle *oth)
1706 {
1707         oth->oth_started = ktime_get();
1708 }
1709
1710 /**
1711  * Check whether the we deal with this handle for too long.
1712  */
1713 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1714                                 ktime_t alloced, ktime_t started,
1715                                 ktime_t closed)
1716 {
1717         ktime_t now = ktime_get();
1718
1719         LASSERT(dev != NULL);
1720
1721         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1722                             ktime_us_delta(started, alloced));
1723         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1724                             ktime_us_delta(closed, started));
1725         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1726                             ktime_us_delta(now, closed));
1727
1728         if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1729                 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1730                                 oth, now, alloced, started, closed);
1731                 libcfs_debug_dumpstack(NULL);
1732         }
1733 }
1734
1735 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
1736 {                                                                       \
1737         ktime_t __closed = ktime_get();                                 \
1738         ktime_t __alloced = oth->oth_alloced;                           \
1739         ktime_t __started = oth->oth_started;                           \
1740                                                                         \
1741         expr;                                                           \
1742         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
1743 }
1744
1745 #else /* OSD_THANDLE_STATS */
1746
1747 #define osd_th_alloced(h)                  do {} while(0)
1748 #define osd_th_started(h)                  do {} while(0)
1749 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
1750
1751 #endif /* OSD_THANDLE_STATS */
1752
1753 /*
1754  * Concurrency: doesn't access mutable data.
1755  */
1756 static int osd_param_is_not_sane(const struct osd_device *dev,
1757                                  const struct thandle *th)
1758 {
1759         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1760
1761         return oh->ot_credits > osd_transaction_size(dev);
1762 }
1763
1764 /*
1765  * Concurrency: shouldn't matter.
1766  */
1767 static void osd_trans_commit_cb(struct super_block *sb,
1768                                 struct ldiskfs_journal_cb_entry *jcb, int error)
1769 {
1770         struct osd_thandle *oh = container_of(jcb, struct osd_thandle, ot_jcb);
1771         struct thandle *th = &oh->ot_super;
1772         struct lu_device *lud = &th->th_dev->dd_lu_dev;
1773         struct osd_device *osd = osd_dev(lud);
1774         struct dt_txn_commit_cb *dcb, *tmp;
1775
1776         LASSERT(oh->ot_handle == NULL);
1777
1778         if (error)
1779                 CERROR("transaction @0x%p commit error: %d\n", th, error);
1780
1781         OBD_FAIL_TIMEOUT(OBD_FAIL_OST_DELAY_TRANS, 40);
1782         /* call per-transaction callbacks if any */
1783         list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1784                                  dcb_linkage) {
1785                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1786                          "commit callback entry: magic=%x name='%s'\n",
1787                          dcb->dcb_magic, dcb->dcb_name);
1788                 list_del_init(&dcb->dcb_linkage);
1789                 dcb->dcb_func(NULL, th, dcb, error);
1790         }
1791
1792         lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1793         if (atomic_dec_and_test(&osd->od_commit_cb_in_flight))
1794                 wake_up(&osd->od_commit_cb_done);
1795         th->th_dev = NULL;
1796
1797         OBD_FREE_PTR(oh);
1798 }
1799
1800 static struct thandle *osd_trans_create(const struct lu_env *env,
1801                                         struct dt_device *d)
1802 {
1803         struct osd_thread_info *oti = osd_oti_get(env);
1804         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1805         struct osd_thandle *oh;
1806         struct thandle *th;
1807
1808         ENTRY;
1809
1810         if (d->dd_rdonly) {
1811                 CERROR("%s: someone try to start transaction under "
1812                        "readonly mode, should be disabled.\n",
1813                        osd_name(osd_dt_dev(d)));
1814                 dump_stack();
1815                 RETURN(ERR_PTR(-EROFS));
1816         }
1817
1818         /* on pending IO in this thread should left from prev. request */
1819         LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1820
1821         sb_start_write(osd_sb(osd_dt_dev(d)));
1822
1823         OBD_ALLOC_GFP(oh, sizeof(*oh), GFP_NOFS);
1824         if (!oh) {
1825                 sb_end_write(osd_sb(osd_dt_dev(d)));
1826                 RETURN(ERR_PTR(-ENOMEM));
1827         }
1828
1829         oh->ot_quota_trans = &oti->oti_quota_trans;
1830         memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1831         th = &oh->ot_super;
1832         th->th_dev = d;
1833         th->th_result = 0;
1834         oh->ot_credits = 0;
1835         oh->oh_declared_ext = 0;
1836         INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1837         INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1838         INIT_LIST_HEAD(&oh->ot_trunc_locks);
1839         osd_th_alloced(oh);
1840
1841         memset(oti->oti_declare_ops, 0,
1842                sizeof(oti->oti_declare_ops));
1843         memset(oti->oti_declare_ops_cred, 0,
1844                sizeof(oti->oti_declare_ops_cred));
1845         memset(oti->oti_declare_ops_used, 0,
1846                sizeof(oti->oti_declare_ops_used));
1847
1848         oti->oti_ins_cache_depth++;
1849
1850         RETURN(th);
1851 }
1852
1853 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1854 {
1855         struct osd_thread_info *oti = osd_oti_get(env);
1856         struct osd_thandle *oh;
1857
1858         oh = container_of(th, struct osd_thandle, ot_super);
1859         LASSERT(oh != NULL);
1860
1861         CWARN("  create: %u/%u/%u, destroy: %u/%u/%u\n",
1862               oti->oti_declare_ops[OSD_OT_CREATE],
1863               oti->oti_declare_ops_cred[OSD_OT_CREATE],
1864               oti->oti_declare_ops_used[OSD_OT_CREATE],
1865               oti->oti_declare_ops[OSD_OT_DESTROY],
1866               oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1867               oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1868         CWARN("  attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1869               oti->oti_declare_ops[OSD_OT_ATTR_SET],
1870               oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1871               oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1872               oti->oti_declare_ops[OSD_OT_XATTR_SET],
1873               oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1874               oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1875         CWARN("  write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1876               oti->oti_declare_ops[OSD_OT_WRITE],
1877               oti->oti_declare_ops_cred[OSD_OT_WRITE],
1878               oti->oti_declare_ops_used[OSD_OT_WRITE],
1879               oti->oti_declare_ops[OSD_OT_PUNCH],
1880               oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1881               oti->oti_declare_ops_used[OSD_OT_PUNCH],
1882               oti->oti_declare_ops[OSD_OT_QUOTA],
1883               oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1884               oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1885         CWARN("  insert: %u/%u/%u, delete: %u/%u/%u\n",
1886               oti->oti_declare_ops[OSD_OT_INSERT],
1887               oti->oti_declare_ops_cred[OSD_OT_INSERT],
1888               oti->oti_declare_ops_used[OSD_OT_INSERT],
1889               oti->oti_declare_ops[OSD_OT_DELETE],
1890               oti->oti_declare_ops_cred[OSD_OT_DELETE],
1891               oti->oti_declare_ops_used[OSD_OT_DELETE]);
1892         CWARN("  ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1893               oti->oti_declare_ops[OSD_OT_REF_ADD],
1894               oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1895               oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1896               oti->oti_declare_ops[OSD_OT_REF_DEL],
1897               oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1898               oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1899 }
1900
1901 /*
1902  * Concurrency: shouldn't matter.
1903  */
1904 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1905                            struct thandle *th)
1906 {
1907         struct osd_thread_info *oti = osd_oti_get(env);
1908         struct osd_device *dev = osd_dt_dev(d);
1909         handle_t *jh;
1910         struct osd_thandle *oh;
1911         int rc;
1912
1913         ENTRY;
1914
1915         LASSERT(current->journal_info == NULL);
1916
1917         oh = container_of(th, struct osd_thandle, ot_super);
1918         LASSERT(oh != NULL);
1919         LASSERT(oh->ot_handle == NULL);
1920
1921         rc = dt_txn_hook_start(env, d, th);
1922         if (rc != 0)
1923                 GOTO(out, rc);
1924
1925         if (unlikely(osd_param_is_not_sane(dev, th))) {
1926                 static unsigned long last_printed;
1927                 static int last_credits;
1928
1929                 /*
1930                  * don't make noise on a tiny testing systems
1931                  * actual credits misuse will be caught anyway
1932                  */
1933                 if (last_credits != oh->ot_credits &&
1934                     time_after(jiffies, last_printed +
1935                                cfs_time_seconds(60)) &&
1936                     osd_transaction_size(dev) > 512) {
1937                         CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1938                               oh->ot_credits, osd_transaction_size(dev));
1939                         osd_trans_dump_creds(env, th);
1940                         libcfs_debug_dumpstack(NULL);
1941                         last_credits = oh->ot_credits;
1942                         last_printed = jiffies;
1943                 }
1944                 /*
1945                  * XXX Limit the credits to 'max_transaction_buffers', and
1946                  *     let the underlying filesystem to catch the error if
1947                  *     we really need so many credits.
1948                  *
1949                  *     This should be removed when we can calculate the
1950                  *     credits precisely.
1951                  */
1952                 oh->ot_credits = osd_transaction_size(dev);
1953         } else if (ldiskfs_track_declares_assert != 0) {
1954                 /*
1955                  * reserve few credits to prevent an assertion in JBD
1956                  * our debugging mechanism will be able to detected
1957                  * overuse. this can help to debug single-update
1958                  * transactions
1959                  */
1960                 oh->ot_credits += 10;
1961                 if (unlikely(osd_param_is_not_sane(dev, th)))
1962                         oh->ot_credits = osd_transaction_size(dev);
1963         }
1964
1965         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_TXN_START))
1966                 GOTO(out, rc = -EIO);
1967
1968         /*
1969          * XXX temporary stuff. Some abstraction layer should
1970          * be used.
1971          */
1972         jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1973         osd_th_started(oh);
1974         if (!IS_ERR(jh)) {
1975                 oh->ot_handle = jh;
1976                 LASSERT(oti->oti_txns == 0);
1977
1978                 atomic_inc(&dev->od_commit_cb_in_flight);
1979                 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1980                               "osd-tx", th);
1981                 oti->oti_txns++;
1982                 rc = 0;
1983         } else {
1984                 rc = PTR_ERR(jh);
1985         }
1986 out:
1987         RETURN(rc);
1988 }
1989
1990 static int osd_seq_exists(const struct lu_env *env,
1991                           struct osd_device *osd, u64 seq)
1992 {
1993         struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1994         struct seq_server_site *ss = osd_seq_site(osd);
1995         int rc;
1996
1997         ENTRY;
1998
1999         LASSERT(ss != NULL);
2000         LASSERT(ss->ss_server_fld != NULL);
2001
2002         rc = osd_fld_lookup(env, osd, seq, range);
2003         if (rc != 0) {
2004                 if (rc != -ENOENT)
2005                         CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
2006                                osd_name(osd), seq, rc);
2007                 RETURN(0);
2008         }
2009
2010         RETURN(ss->ss_node_id == range->lsr_index);
2011 }
2012
2013 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
2014 {
2015         struct dt_txn_commit_cb *dcb;
2016         struct dt_txn_commit_cb *tmp;
2017
2018         /* call per-transaction stop callbacks if any */
2019         list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
2020                                  dcb_linkage) {
2021                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
2022                          "commit callback entry: magic=%x name='%s'\n",
2023                          dcb->dcb_magic, dcb->dcb_name);
2024                 list_del_init(&dcb->dcb_linkage);
2025                 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
2026         }
2027 }
2028
2029 /*
2030  * Concurrency: shouldn't matter.
2031  */
2032 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
2033                           struct thandle *th)
2034 {
2035         struct osd_thread_info *oti = osd_oti_get(env);
2036         struct osd_thandle *oh;
2037         struct osd_iobuf *iobuf = &oti->oti_iobuf;
2038         struct osd_device *osd = osd_dt_dev(th->th_dev);
2039         struct qsd_instance *qsd = osd_def_qsd(osd);
2040         struct lquota_trans *qtrans;
2041         LIST_HEAD(truncates);
2042         int rc = 0, remove_agents = 0;
2043
2044         ENTRY;
2045
2046         oh = container_of(th, struct osd_thandle, ot_super);
2047
2048         remove_agents = oh->ot_remove_agents;
2049
2050         qtrans = oh->ot_quota_trans;
2051         oh->ot_quota_trans = NULL;
2052
2053         /* move locks to local list, stop tx, execute truncates */
2054         list_splice(&oh->ot_trunc_locks, &truncates);
2055
2056         if (oh->ot_handle != NULL) {
2057                 int rc2;
2058
2059                 handle_t *hdl = oh->ot_handle;
2060
2061                 /*
2062                  * add commit callback
2063                  * notice we don't do this in osd_trans_start()
2064                  * as underlying transaction can change during truncate
2065                  */
2066                 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
2067                                              &oh->ot_jcb);
2068
2069                 LASSERT(oti->oti_txns == 1);
2070                 oti->oti_txns--;
2071
2072                 rc = dt_txn_hook_stop(env, th);
2073                 if (rc != 0)
2074                         CERROR("%s: failed in transaction hook: rc = %d\n",
2075                                osd_name(osd), rc);
2076
2077                 osd_trans_stop_cb(oh, rc);
2078                 /* hook functions might modify th_sync */
2079                 hdl->h_sync = th->th_sync;
2080
2081                 oh->ot_handle = NULL;
2082                 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
2083                 if (rc2 != 0)
2084                         CERROR("%s: failed to stop transaction: rc = %d\n",
2085                                osd_name(osd), rc2);
2086                 if (!rc)
2087                         rc = rc2;
2088
2089                 osd_process_truncates(&truncates);
2090         } else {
2091                 osd_trans_stop_cb(oh, th->th_result);
2092                 OBD_FREE_PTR(oh);
2093         }
2094
2095         osd_trunc_unlock_all(env, &truncates);
2096
2097         /* inform the quota slave device that the transaction is stopping */
2098         qsd_op_end(env, qsd, qtrans);
2099
2100         /*
2101          * as we want IO to journal and data IO be concurrent, we don't block
2102          * awaiting data IO completion in osd_do_bio(), instead we wait here
2103          * once transaction is submitted to the journal. all reqular requests
2104          * don't do direct IO (except read/write), thus this wait_event becomes
2105          * no-op for them.
2106          *
2107          * IMPORTANT: we have to wait till any IO submited by the thread is
2108          * completed otherwise iobuf may be corrupted by different request
2109          */
2110         wait_event(iobuf->dr_wait,
2111                        atomic_read(&iobuf->dr_numreqs) == 0);
2112         osd_fini_iobuf(osd, iobuf);
2113         if (!rc)
2114                 rc = iobuf->dr_error;
2115
2116         if (unlikely(remove_agents != 0))
2117                 osd_process_scheduled_agent_removals(env, osd);
2118
2119         LASSERT(oti->oti_ins_cache_depth > 0);
2120         oti->oti_ins_cache_depth--;
2121         /* reset OI cache for safety */
2122         if (oti->oti_ins_cache_depth == 0)
2123                 oti->oti_ins_cache_used = 0;
2124
2125         sb_end_write(osd_sb(osd));
2126
2127         RETURN(rc);
2128 }
2129
2130 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
2131 {
2132         struct osd_thandle *oh = container_of(th, struct osd_thandle,
2133                                               ot_super);
2134
2135         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
2136         LASSERT(&dcb->dcb_func != NULL);
2137         if (dcb->dcb_flags & DCB_TRANS_STOP)
2138                 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
2139         else
2140                 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
2141
2142         return 0;
2143 }
2144
2145 /*
2146  * Called just before object is freed. Releases all resources except for
2147  * object itself (that is released by osd_object_free()).
2148  *
2149  * Concurrency: no concurrent access is possible that late in object
2150  * life-cycle.
2151  */
2152 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
2153 {
2154         struct osd_object *obj = osd_obj(l);
2155         struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
2156         struct inode *inode = obj->oo_inode;
2157         __u64 projid;
2158         qid_t uid;
2159         qid_t gid;
2160
2161         LINVRNT(osd_invariant(obj));
2162
2163         /*
2164          * If object is unlinked remove fid->ino mapping from object index.
2165          */
2166
2167         osd_index_fini(obj);
2168
2169         if (!inode)
2170                 return;
2171
2172         if (osd_has_index(obj) &&  obj->oo_dt.do_index_ops == &osd_index_iam_ops)
2173                 ldiskfs_set_inode_flag(inode, LDISKFS_INODE_JOURNAL_DATA);
2174
2175         uid = i_uid_read(inode);
2176         gid = i_gid_read(inode);
2177         projid = i_projid_read(inode);
2178
2179         obj->oo_inode = NULL;
2180         iput(inode);
2181
2182         /* do not rebalance quota if the caller needs to release memory
2183          * otherwise qsd_refresh_usage() may went into a new ldiskfs
2184          * transaction and risk to deadlock - LU-12178 */
2185         if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
2186                 return;
2187
2188         if (!obj->oo_header && qsd) {
2189                 struct osd_thread_info *info = osd_oti_get(env);
2190                 struct lquota_id_info *qi = &info->oti_qi;
2191
2192                 /* Release granted quota to master if necessary */
2193                 qi->lqi_id.qid_uid = uid;
2194                 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
2195
2196                 qi->lqi_id.qid_uid = gid;
2197                 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
2198
2199                 qi->lqi_id.qid_uid = projid;
2200                 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
2201         }
2202 }
2203
2204 /*
2205  * Concurrency: ->loo_object_release() is called under site spin-lock.
2206  */
2207 static void osd_object_release(const struct lu_env *env,
2208                                struct lu_object *l)
2209 {
2210         struct osd_object *o = osd_obj(l);
2211
2212         /*
2213          * nobody should be releasing a non-destroyed object with nlink=0
2214          * the API allows this, but ldiskfs doesn't like and then report
2215          * this inode as deleted
2216          */
2217         LASSERT(!(o->oo_destroyed == 0 && o->oo_inode &&
2218                   o->oo_inode->i_nlink == 0));
2219 }
2220
2221 /*
2222  * Concurrency: shouldn't matter.
2223  */
2224 static int osd_object_print(const struct lu_env *env, void *cookie,
2225                             lu_printer_t p, const struct lu_object *l)
2226 {
2227         struct osd_object *o = osd_obj(l);
2228         struct iam_descr *d;
2229
2230         if (o->oo_dir != NULL)
2231                 d = o->oo_dir->od_container.ic_descr;
2232         else
2233                 d = NULL;
2234         return (*p)(env, cookie,
2235                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
2236                     o, o->oo_inode,
2237                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
2238                     o->oo_inode ? o->oo_inode->i_generation : 0,
2239                     d ? d->id_ops->id_name : "plain");
2240 }
2241
2242 /*
2243  * Concurrency: shouldn't matter.
2244  */
2245 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2246                 struct obd_statfs *sfs, struct obd_statfs_info *info)
2247 {
2248         struct osd_device *osd = osd_dt_dev(d);
2249         struct super_block *sb = osd_sb(osd);
2250         struct kstatfs *ksfs;
2251         __u64 reserved;
2252         int result = 0;
2253
2254         if (unlikely(osd->od_mnt == NULL))
2255                 return -EINPROGRESS;
2256
2257         /* osd_lproc.c call this without env, allocate ksfs for that case */
2258         if (unlikely(env == NULL)) {
2259                 OBD_ALLOC_PTR(ksfs);
2260                 if (ksfs == NULL)
2261                         return -ENOMEM;
2262         } else {
2263                 ksfs = &osd_oti_get(env)->oti_ksfs;
2264         }
2265
2266         result = sb->s_op->statfs(sb->s_root, ksfs);
2267         if (result)
2268                 goto out;
2269
2270         statfs_pack(sfs, ksfs);
2271         if (unlikely(sb->s_flags & SB_RDONLY))
2272                 sfs->os_state |= OS_STATFS_READONLY;
2273
2274         sfs->os_state |= osd->od_nonrotational ? OS_STATFS_NONROT : 0;
2275
2276         if (ldiskfs_has_feature_extents(sb))
2277                 sfs->os_maxbytes = sb->s_maxbytes;
2278         else
2279                 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2280
2281         /*
2282          * Reserve some space so to avoid fragmenting the filesystem too much.
2283          * Fragmentation not only impacts performance, but can also increase
2284          * metadata overhead significantly, causing grant calculation to be
2285          * wrong.
2286          *
2287          * Reserve 0.78% of total space, at least 8MB for small filesystems.
2288          */
2289         BUILD_BUG_ON(OSD_STATFS_RESERVED <= LDISKFS_MAX_BLOCK_SIZE);
2290         reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2291         if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2292                 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2293
2294         sfs->os_blocks -= reserved;
2295         sfs->os_bfree  -= min(reserved, sfs->os_bfree);
2296         sfs->os_bavail -= min(reserved, sfs->os_bavail);
2297
2298 out:
2299         if (unlikely(env == NULL))
2300                 OBD_FREE_PTR(ksfs);
2301         return result;
2302 }
2303
2304 /**
2305  * Estimate space needed for file creations. We assume the largest filename
2306  * which is 2^64 - 1, hence a filename of 20 chars.
2307  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2308  */
2309 #ifdef __LDISKFS_DIR_REC_LEN
2310 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2311 #else
2312 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2313 #endif
2314
2315 /*
2316  * Concurrency: doesn't access mutable data.
2317  */
2318 static void osd_conf_get(const struct lu_env *env,
2319                          const struct dt_device *dev,
2320                          struct dt_device_param *param)
2321 {
2322         struct osd_device *d = osd_dt_dev(dev);
2323         struct super_block *sb = osd_sb(d);
2324         struct blk_integrity *bi = bdev_get_integrity(sb->s_bdev);
2325         const char *name;
2326         int ea_overhead;
2327
2328         /*
2329          * XXX should be taken from not-yet-existing fs abstraction layer.
2330          */
2331         param->ddp_max_name_len = LDISKFS_NAME_LEN;
2332         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
2333         param->ddp_symlink_max  = sb->s_blocksize;
2334         param->ddp_mount_type   = LDD_MT_LDISKFS;
2335         if (ldiskfs_has_feature_extents(sb))
2336                 param->ddp_maxbytes = sb->s_maxbytes;
2337         else
2338                 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2339         /*
2340          * inode are statically allocated, so per-inode space consumption
2341          * is the space consumed by the directory entry
2342          */
2343         param->ddp_inodespace     = PER_OBJ_USAGE;
2344         /*
2345          * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2346          * is 128MB) which is unlikely to be hit in real life. Report a smaller
2347          * maximum length to not under-count the actual number of extents
2348          * needed for writing a file if there are sub-optimal block allocations.
2349          */
2350         param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
2351         /* worst-case extent insertion metadata overhead */
2352         param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2353         param->ddp_mntopts = 0;
2354         if (test_opt(sb, XATTR_USER))
2355                 param->ddp_mntopts |= MNTOPT_USERXATTR;
2356         if (test_opt(sb, POSIX_ACL))
2357                 param->ddp_mntopts |= MNTOPT_ACL;
2358
2359         /*
2360          * LOD might calculate the max stripe count based on max_ea_size,
2361          * so we need take account in the overhead as well,
2362          * xattr_header + magic + xattr_entry_head
2363          */
2364         ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2365                       LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2366
2367 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2368         if (ldiskfs_has_feature_ea_inode(sb))
2369                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2370                                                                 ea_overhead;
2371         else
2372 #endif
2373                 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2374
2375         if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
2376                 param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
2377
2378         /*
2379          * Preferred RPC size for efficient disk IO.  4MB shows good
2380          * all-around performance for ldiskfs, but use bigalloc chunk size
2381          * by default if larger.
2382          */
2383 #if defined(LDISKFS_CLUSTER_SIZE)
2384         if (LDISKFS_CLUSTER_SIZE(sb) > DT_DEF_BRW_SIZE)
2385                 param->ddp_brw_size = LDISKFS_CLUSTER_SIZE(sb);
2386         else
2387 #endif
2388                 param->ddp_brw_size = DT_DEF_BRW_SIZE;
2389
2390         param->ddp_t10_cksum_type = 0;
2391         if (bi) {
2392                 unsigned short interval = blk_integrity_interval(bi);
2393                 name = blk_integrity_name(bi);
2394                 /*
2395                  * Expected values:
2396                  * T10-DIF-TYPE1-CRC
2397                  * T10-DIF-TYPE3-CRC
2398                  * T10-DIF-TYPE1-IP
2399                  * T10-DIF-TYPE3-IP
2400                  */
2401                 if (strncmp(name, "T10-DIF-TYPE",
2402                             sizeof("T10-DIF-TYPE") - 1) == 0) {
2403                         /* also skip "1/3-" at end */
2404                         const int type_off = sizeof("T10-DIF-TYPE.");
2405                         char type_number = name[type_off - 2];
2406
2407                         if (interval != 512 && interval != 4096) {
2408                                 CERROR("%s: unsupported T10PI sector size %u\n",
2409                                        d->od_svname, interval);
2410                         } else if (type_number != '1' && type_number != '3') {
2411                                 CERROR("%s: unsupported T10PI type %s\n",
2412                                        d->od_svname, name);
2413                         } else if (strcmp(name + type_off, "CRC") == 0) {
2414                                 d->od_t10_type = type_number == '1' ?
2415                                         OSD_T10_TYPE1_CRC : OSD_T10_TYPE3_CRC;
2416                                 param->ddp_t10_cksum_type = interval == 512 ?
2417                                         OBD_CKSUM_T10CRC512 :
2418                                         OBD_CKSUM_T10CRC4K;
2419                         } else if (strcmp(name + type_off, "IP") == 0) {
2420                                 d->od_t10_type = type_number == '1' ?
2421                                         OSD_T10_TYPE1_IP : OSD_T10_TYPE3_IP;
2422                                 param->ddp_t10_cksum_type = interval == 512 ?
2423                                         OBD_CKSUM_T10IP512 :
2424                                         OBD_CKSUM_T10IP4K;
2425                         } else {
2426                                 CERROR("%s: unsupported checksum type of "
2427                                        "T10PI type '%s'",
2428                                        d->od_svname, name);
2429                         }
2430
2431                 } else {
2432                         CERROR("%s: unsupported T10PI type '%s'",
2433                                d->od_svname, name);
2434                 }
2435         }
2436
2437         param->ddp_has_lseek_data_hole = true;
2438 }
2439
2440 static struct super_block *osd_mnt_sb_get(const struct dt_device *d)
2441 {
2442         return osd_sb(osd_dt_dev(d));
2443 }
2444
2445 /*
2446  * Concurrency: shouldn't matter.
2447  */
2448 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2449 {
2450         int rc;
2451         struct super_block *s = osd_sb(osd_dt_dev(d));
2452         ENTRY;
2453
2454         down_read(&s->s_umount);
2455         rc = s->s_op->sync_fs(s, 1);
2456         up_read(&s->s_umount);
2457
2458         CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
2459                rc);
2460
2461         return rc;
2462 }
2463
2464 /**
2465  * Start commit for OSD device.
2466  *
2467  * An implementation of dt_commit_async method for OSD device.
2468  * Asychronously starts underlayng fs sync and thereby a transaction
2469  * commit.
2470  *
2471  * \param env environment
2472  * \param d dt device
2473  *
2474  * \see dt_device_operations
2475  */
2476 static int osd_commit_async(const struct lu_env *env,
2477                             struct dt_device *d)
2478 {
2479         struct super_block *s = osd_sb(osd_dt_dev(d));
2480         int rc;
2481
2482         ENTRY;
2483
2484         CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
2485         down_read(&s->s_umount);
2486         rc = s->s_op->sync_fs(s, 0);
2487         up_read(&s->s_umount);
2488
2489         RETURN(rc);
2490 }
2491
2492 static int (*priv_security_file_alloc)(struct file *file);
2493
2494 int osd_security_file_alloc(struct file *file)
2495 {
2496         if (priv_security_file_alloc)
2497                 return priv_security_file_alloc(file);
2498         return 0;
2499 }
2500
2501 /*
2502  * Concurrency: shouldn't matter.
2503  */
2504 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2505 {
2506         struct super_block *sb = osd_sb(osd_dt_dev(d));
2507         struct block_device *dev = sb->s_bdev;
2508         int rc = -EOPNOTSUPP;
2509
2510         ENTRY;
2511
2512         CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2513                osd_dt_dev(d)->od_svname, (long)dev, rc);
2514
2515         RETURN(rc);
2516 }
2517
2518 /**
2519  * Note: we do not count into QUOTA here.
2520  * If we mount with --data_journal we may need more.
2521  */
2522 const int osd_dto_credits_noquota[DTO_NR] = {
2523         /**
2524          * Insert.
2525          * INDEX_EXTRA_TRANS_BLOCKS(8) +
2526          * SINGLEDATA_TRANS_BLOCKS(8)
2527          * XXX Note: maybe iam need more, since iam have more level than
2528          *           EXT3 htree.
2529          */
2530         [DTO_INDEX_INSERT]  = 16,
2531         /**
2532          * Delete
2533          * just modify a single entry, probably merge few within a block
2534          */
2535         [DTO_INDEX_DELETE]  = 1,
2536         /**
2537          * Used for OI scrub
2538          */
2539         [DTO_INDEX_UPDATE]  = 16,
2540         /**
2541          * 4(inode, inode bits, groups, GDT)
2542          *   notice: OI updates are counted separately with DTO_INDEX_INSERT
2543          */
2544         [DTO_OBJECT_CREATE] = 4,
2545         /**
2546          * 4(inode, inode bits, groups, GDT)
2547          *   notice: OI updates are counted separately with DTO_INDEX_DELETE
2548          */
2549         [DTO_OBJECT_DELETE] = 4,
2550         /**
2551          * Attr set credits (inode)
2552          */
2553         [DTO_ATTR_SET_BASE] = 1,
2554         /**
2555          * Xattr set. The same as xattr of EXT3.
2556          * DATA_TRANS_BLOCKS(14)
2557          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2558          * are also counted in. Do not know why?
2559          */
2560         [DTO_XATTR_SET]     = 14,
2561         /**
2562          * credits for inode change during write.
2563          */
2564         [DTO_WRITE_BASE]    = 3,
2565         /**
2566          * credits for single block write.
2567          */
2568         [DTO_WRITE_BLOCK]   = 14,
2569         /**
2570          * Attr set credits for chown.
2571          * This is extra credits for setattr, and it is null without quota
2572          */
2573         [DTO_ATTR_SET_CHOWN] = 0
2574 };
2575
2576 static const struct dt_device_operations osd_dt_ops = {
2577         .dt_root_get       = osd_root_get,
2578         .dt_statfs         = osd_statfs,
2579         .dt_trans_create   = osd_trans_create,
2580         .dt_trans_start    = osd_trans_start,
2581         .dt_trans_stop     = osd_trans_stop,
2582         .dt_trans_cb_add   = osd_trans_cb_add,
2583         .dt_conf_get       = osd_conf_get,
2584         .dt_mnt_sb_get     = osd_mnt_sb_get,
2585         .dt_sync           = osd_sync,
2586         .dt_ro             = osd_ro,
2587         .dt_commit_async   = osd_commit_async,
2588 };
2589
2590 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
2591                           unsigned int role)
2592 {
2593         struct osd_object *obj = osd_dt_obj(dt);
2594         struct osd_thread_info *oti = osd_oti_get(env);
2595
2596         LINVRNT(osd_invariant(obj));
2597
2598         LASSERT(obj->oo_owner != env);
2599         down_read_nested(&obj->oo_sem, role);
2600
2601         LASSERT(obj->oo_owner == NULL);
2602         oti->oti_r_locks++;
2603 }
2604
2605 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
2606                            unsigned int role)
2607 {
2608         struct osd_object *obj = osd_dt_obj(dt);
2609         struct osd_thread_info *oti = osd_oti_get(env);
2610
2611         LINVRNT(osd_invariant(obj));
2612
2613         LASSERT(obj->oo_owner != env);
2614         down_write_nested(&obj->oo_sem, role);
2615
2616         LASSERT(obj->oo_owner == NULL);
2617         obj->oo_owner = env;
2618         oti->oti_w_locks++;
2619 }
2620
2621 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
2622 {
2623         struct osd_object *obj = osd_dt_obj(dt);
2624         struct osd_thread_info *oti = osd_oti_get(env);
2625
2626         LINVRNT(osd_invariant(obj));
2627
2628         LASSERT(oti->oti_r_locks > 0);
2629         oti->oti_r_locks--;
2630         up_read(&obj->oo_sem);
2631 }
2632
2633 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
2634 {
2635         struct osd_object *obj = osd_dt_obj(dt);
2636         struct osd_thread_info *oti = osd_oti_get(env);
2637
2638         LINVRNT(osd_invariant(obj));
2639
2640         LASSERT(obj->oo_owner == env);
2641         LASSERT(oti->oti_w_locks > 0);
2642         oti->oti_w_locks--;
2643         obj->oo_owner = NULL;
2644         up_write(&obj->oo_sem);
2645 }
2646
2647 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
2648 {
2649         struct osd_object *obj = osd_dt_obj(dt);
2650
2651         LINVRNT(osd_invariant(obj));
2652
2653         return obj->oo_owner == env;
2654 }
2655
2656 static void osd_inode_getattr(const struct lu_env *env,
2657                               struct inode *inode, struct lu_attr *attr)
2658 {
2659         attr->la_valid  |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2660                            LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2661                            LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2662                            LA_BLKSIZE | LA_TYPE | LA_BTIME;
2663
2664         attr->la_atime = inode->i_atime.tv_sec;
2665         attr->la_mtime = inode->i_mtime.tv_sec;
2666         attr->la_ctime = inode->i_ctime.tv_sec;
2667         attr->la_btime = LDISKFS_I(inode)->i_crtime.tv_sec;
2668         attr->la_mode    = inode->i_mode;
2669         attr->la_size    = i_size_read(inode);
2670         attr->la_blocks  = inode->i_blocks;
2671         attr->la_uid     = i_uid_read(inode);
2672         attr->la_gid     = i_gid_read(inode);
2673         attr->la_projid  = i_projid_read(inode);
2674         attr->la_flags   = ll_inode_to_ext_flags(inode->i_flags);
2675         attr->la_nlink   = inode->i_nlink;
2676         attr->la_rdev    = inode->i_rdev;
2677         attr->la_blksize = 1 << inode->i_blkbits;
2678         attr->la_blkbits = inode->i_blkbits;
2679         /*
2680          * Ext4 did not transfer inherit flags from raw inode
2681          * to inode flags, and ext4 internally test raw inode
2682          * @i_flags directly. Instead of patching ext4, we do it here.
2683          */
2684         if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
2685                 attr->la_flags |= LUSTRE_PROJINHERIT_FL;
2686 }
2687
2688 static int osd_dirent_count(const struct lu_env *env, struct dt_object *dt,
2689                             u64 *count)
2690 {
2691         struct osd_object *obj = osd_dt_obj(dt);
2692         const struct dt_it_ops *iops;
2693         struct dt_it *it;
2694         int rc;
2695
2696         ENTRY;
2697
2698         LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2699         LASSERT(fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)));
2700
2701         if (obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
2702                 *count = obj->oo_dirent_count;
2703                 RETURN(0);
2704         }
2705
2706         /* directory not initialized yet */
2707         if (!dt->do_index_ops) {
2708                 *count = 0;
2709                 RETURN(0);
2710         }
2711
2712         iops = &dt->do_index_ops->dio_it;
2713         it = iops->init(env, dt, LUDA_64BITHASH);
2714         if (IS_ERR(it))
2715                 RETURN(PTR_ERR(it));
2716
2717         rc = iops->load(env, it, 0);
2718         if (rc < 0) {
2719                 if (rc == -ENODATA) {
2720                         rc = 0;
2721                         *count = 0;
2722                 }
2723                 GOTO(out, rc);
2724         }
2725         if (rc > 0)
2726                 rc = iops->next(env, it);
2727
2728         for (*count = 0; rc == 0 || rc == -ESTALE; rc = iops->next(env, it)) {
2729                 if (rc == -ESTALE)
2730                         continue;
2731
2732                 if (iops->key_size(env, it) == 0)
2733                         continue;
2734
2735                 (*count)++;
2736         }
2737         if (rc == 1) {
2738                 obj->oo_dirent_count = *count;
2739                 rc = 0;
2740         }
2741 out:
2742         iops->put(env, it);
2743         iops->fini(env, it);
2744
2745         RETURN(rc);
2746 }
2747
2748 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
2749                         struct lu_attr *attr)
2750 {
2751         struct osd_object *obj = osd_dt_obj(dt);
2752         int rc = 0;
2753
2754         if (unlikely(!dt_object_exists(dt)))
2755                 return -ENOENT;
2756         if (unlikely(obj->oo_destroyed))
2757                 return -ENOENT;
2758
2759         LASSERT(!dt_object_remote(dt));
2760         LINVRNT(osd_invariant(obj));
2761
2762         spin_lock(&obj->oo_guard);
2763         osd_inode_getattr(env, obj->oo_inode, attr);
2764         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
2765                 attr->la_valid |= LA_FLAGS;
2766                 attr->la_flags |= LUSTRE_ORPHAN_FL;
2767         }
2768         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
2769                 attr->la_valid |= LA_FLAGS;
2770                 attr->la_flags |= LUSTRE_ENCRYPT_FL;
2771         }
2772         spin_unlock(&obj->oo_guard);
2773
2774         if (S_ISDIR(obj->oo_inode->i_mode) &&
2775             fid_is_namespace_visible(lu_object_fid(&dt->do_lu)))
2776                 rc = osd_dirent_count(env, dt, &attr->la_dirent_count);
2777
2778         return rc;
2779 }
2780
2781 static int osd_declare_attr_qid(const struct lu_env *env,
2782                                 struct osd_object *obj,
2783                                 struct osd_thandle *oh, long long bspace,
2784                                 qid_t old_id, qid_t new_id, bool enforce,
2785                                 unsigned int type, bool ignore_edquot)
2786 {
2787         int rc;
2788         struct osd_thread_info *info = osd_oti_get(env);
2789         struct lquota_id_info  *qi = &info->oti_qi;
2790
2791         qi->lqi_type = type;
2792         /* inode accounting */
2793         qi->lqi_is_blk = false;
2794
2795         /* one more inode for the new id ... */
2796         qi->lqi_id.qid_uid = new_id;
2797         qi->lqi_space      = 1;
2798         /* Reserve credits for the new id */
2799         rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2800         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2801                 rc = 0;
2802         if (rc)
2803                 RETURN(rc);
2804
2805         /* and one less inode for the current id */
2806         qi->lqi_id.qid_uid = old_id;
2807         qi->lqi_space = -1;
2808         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2809         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2810                 rc = 0;
2811         if (rc)
2812                 RETURN(rc);
2813
2814         /* block accounting */
2815         qi->lqi_is_blk = true;
2816
2817         /* more blocks for the new id ... */
2818         qi->lqi_id.qid_uid = new_id;
2819         qi->lqi_space      = bspace;
2820         /*
2821          * Credits for the new uid has been reserved, re-use "obj"
2822          * to save credit reservation.
2823          */
2824         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2825         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2826                 rc = 0;
2827         if (rc)
2828                 RETURN(rc);
2829
2830         /* and finally less blocks for the current uid */
2831         qi->lqi_id.qid_uid = old_id;
2832         qi->lqi_space      = -bspace;
2833         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2834         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
2835                 rc = 0;
2836
2837         RETURN(rc);
2838 }
2839
2840 static int osd_declare_attr_set(const struct lu_env *env,
2841                                 struct dt_object *dt,
2842                                 const struct lu_attr *attr,
2843                                 struct thandle *handle)
2844 {
2845         struct osd_thandle *oh;
2846         struct osd_object *obj;
2847         qid_t uid;
2848         qid_t gid;
2849         long long bspace;
2850         int rc = 0;
2851         bool enforce;
2852
2853         ENTRY;
2854
2855         LASSERT(dt != NULL);
2856         LASSERT(handle != NULL);
2857
2858         obj = osd_dt_obj(dt);
2859         LASSERT(osd_invariant(obj));
2860
2861         oh = container_of(handle, struct osd_thandle, ot_super);
2862         LASSERT(oh->ot_handle == NULL);
2863
2864         osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2865                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2866
2867         osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2868                              osd_dto_credits_noquota[DTO_XATTR_SET]);
2869
2870         if (attr == NULL || obj->oo_inode == NULL)
2871                 RETURN(rc);
2872
2873         bspace   = obj->oo_inode->i_blocks << 9;
2874         bspace   = toqb(bspace);
2875
2876         /*
2877          * Changing ownership is always preformed by super user, it should not
2878          * fail with EDQUOT unless required explicitly.
2879          *
2880          * We still need to call the osd_declare_qid() to calculate the journal
2881          * credits for updating quota accounting files and to trigger quota
2882          * space adjustment once the operation is completed.
2883          */
2884         if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2885                 bool ignore_edquot = !(attr->la_flags & LUSTRE_SET_SYNC_FL);
2886
2887                 if (!ignore_edquot)
2888                         CDEBUG(D_QUOTA,
2889                                "%s: enforce quota on UID %u, GID %u (quota space is %lld)\n",
2890                                osd_ino2name(obj->oo_inode), attr->la_uid,
2891                                attr->la_gid, bspace);
2892
2893                 /* USERQUOTA */
2894                 uid = i_uid_read(obj->oo_inode);
2895                 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2896                 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2897                                           attr->la_uid, enforce, USRQUOTA,
2898                                           true);
2899                 if (rc)
2900                         RETURN(rc);
2901
2902                 gid = i_gid_read(obj->oo_inode);
2903                 CDEBUG(D_QUOTA, "declare uid %d -> %d gid %d -> %d\n", uid,
2904                        attr->la_uid, gid, attr->la_gid);
2905                 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2906                 rc = osd_declare_attr_qid(env, obj, oh, bspace, gid,
2907                                           attr->la_gid, enforce, GRPQUOTA,
2908                                           ignore_edquot);
2909                 if (rc)
2910                         RETURN(rc);
2911
2912         }
2913 #ifdef HAVE_PROJECT_QUOTA
2914         if (attr->la_valid & LA_PROJID) {
2915                 __u32 projid = i_projid_read(obj->oo_inode);
2916
2917                 enforce = (attr->la_valid & LA_PROJID) &&
2918                                         (attr->la_projid != projid);
2919                 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2920                                           (qid_t)projid, (qid_t)attr->la_projid,
2921                                           enforce, PRJQUOTA, true);
2922                 if (rc)
2923                         RETURN(rc);
2924         }
2925 #endif
2926         RETURN(rc);
2927 }
2928
2929 static int osd_inode_setattr(const struct lu_env *env,
2930                              struct inode *inode, const struct lu_attr *attr)
2931 {
2932         __u64 bits = attr->la_valid;
2933
2934         /* Only allow set size for regular file */
2935         if (!S_ISREG(inode->i_mode))
2936                 bits &= ~(LA_SIZE | LA_BLOCKS);
2937
2938         if (bits == 0)
2939                 return 0;
2940
2941         if (bits & LA_ATIME)
2942                 inode->i_atime = osd_inode_time(inode, attr->la_atime);
2943         if (bits & LA_CTIME)
2944                 inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
2945         if (bits & LA_MTIME)
2946                 inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
2947         if (bits & LA_SIZE) {
2948                 spin_lock(&inode->i_lock);
2949                 LDISKFS_I(inode)->i_disksize = attr->la_size;
2950                 i_size_write(inode, attr->la_size);
2951                 spin_unlock(&inode->i_lock);
2952         }
2953
2954         /*
2955          * OSD should not change "i_blocks" which is used by quota.
2956          * "i_blocks" should be changed by ldiskfs only.
2957          */
2958         if (bits & LA_MODE)
2959                 inode->i_mode = (inode->i_mode & S_IFMT) |
2960                                 (attr->la_mode & ~S_IFMT);
2961         if (bits & LA_UID)
2962                 i_uid_write(inode, attr->la_uid);
2963         if (bits & LA_GID)
2964                 i_gid_write(inode, attr->la_gid);
2965         if (bits & LA_PROJID)
2966                 i_projid_write(inode, attr->la_projid);
2967         if (bits & LA_NLINK)
2968                 set_nlink(inode, attr->la_nlink);
2969         if (bits & LA_RDEV)
2970                 inode->i_rdev = attr->la_rdev;
2971
2972         if (bits & LA_FLAGS) {
2973                 /* always keep S_NOCMTIME */
2974                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2975                                  S_NOCMTIME;
2976 #if defined(S_ENCRYPTED)
2977                 /* Always remove S_ENCRYPTED, because ldiskfs must not be
2978                  * aware of encryption status. It is just stored into LMA
2979                  * so that it can be forwared to client side.
2980                  */
2981                 inode->i_flags &= ~S_ENCRYPTED;
2982 #endif
2983                 /*
2984                  * Ext4 did not transfer inherit flags from
2985                  * @inode->i_flags to raw inode i_flags when writing
2986                  * flags, we do it explictly here.
2987                  */
2988                 if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
2989                         LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
2990                 else
2991                         LDISKFS_I(inode)->i_flags &= ~LUSTRE_PROJINHERIT_FL;
2992         }
2993         return 0;
2994 }
2995
2996 #ifdef HAVE_PROJECT_QUOTA
2997 static int osd_transfer_project(struct inode *inode, __u32 projid,
2998                                 struct thandle *handle)
2999 {
3000         struct super_block *sb = inode->i_sb;
3001         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
3002         int err;
3003         kprojid_t kprojid;
3004         struct ldiskfs_iloc iloc;
3005         struct ldiskfs_inode *raw_inode;
3006         struct dquot *transfer_to[LDISKFS_MAXQUOTAS] = { };
3007
3008         if (!ldiskfs_has_feature_project(sb)) {
3009                 LASSERT(__kprojid_val(LDISKFS_I(inode)->i_projid)
3010                         == LDISKFS_DEF_PROJID);
3011                 if (projid != LDISKFS_DEF_PROJID)
3012                         return -EOPNOTSUPP;
3013                 else
3014                         return 0;
3015         }
3016
3017         if (LDISKFS_INODE_SIZE(sb) <= LDISKFS_GOOD_OLD_INODE_SIZE)
3018                 return -EOPNOTSUPP;
3019
3020         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3021         if (projid_eq(kprojid, LDISKFS_I(inode)->i_projid))
3022                 return 0;
3023
3024         err = ldiskfs_get_inode_loc(inode, &iloc);
3025         if (err)
3026                 return err;
3027
3028         raw_inode = ldiskfs_raw_inode(&iloc);
3029         if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
3030                 struct osd_thandle *oh = container_of(handle,
3031                                                       struct osd_thandle,
3032                                                       ot_super);
3033                 /**
3034                  * try to expand inode size automatically.
3035                  */
3036                 ldiskfs_mark_inode_dirty(oh->ot_handle, inode);
3037                 if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
3038                         err = -EOVERFLOW;
3039                         brelse(iloc.bh);
3040                         return err;
3041                 }
3042         }
3043         brelse(iloc.bh);
3044
3045         dquot_initialize(inode);
3046         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3047         if (transfer_to[PRJQUOTA]) {
3048                 err = __dquot_transfer(inode, transfer_to);
3049                 dqput(transfer_to[PRJQUOTA]);
3050                 if (err)
3051                         return err;
3052         }
3053
3054         return err;
3055 }
3056 #endif
3057
3058 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr,
3059                               struct thandle *handle)
3060 {
3061         int rc;
3062
3063         if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
3064             (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
3065                 struct iattr iattr;
3066
3067                 CDEBUG(D_QUOTA,
3068                        "executing dquot_transfer inode %ld uid %d -> %d gid %d -> %d\n",
3069                        inode->i_ino, i_uid_read(inode), attr->la_uid,
3070                        i_gid_read(inode), attr->la_gid);
3071
3072                 dquot_initialize(inode);
3073                 iattr.ia_valid = 0;
3074                 if (attr->la_valid & LA_UID)
3075                         iattr.ia_valid |= ATTR_UID;
3076                 if (attr->la_valid & LA_GID)
3077                         iattr.ia_valid |= ATTR_GID;
3078                 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
3079                 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
3080
3081                 rc = dquot_transfer(inode, &iattr);
3082                 if (rc) {
3083                         CERROR("%s: quota transfer failed. Is quota enforcement enabled on the ldiskfs filesystem? rc = %d\n",
3084                                osd_ino2name(inode), rc);
3085                         return rc;
3086                 }
3087         }
3088
3089         /* Handle project id transfer here properly */
3090         if (attr->la_valid & LA_PROJID &&
3091             attr->la_projid != i_projid_read(inode)) {
3092 #ifdef HAVE_PROJECT_QUOTA
3093                 rc = osd_transfer_project(inode, attr->la_projid, handle);
3094 #else
3095                 rc = -ENOTSUPP;
3096 #endif
3097                 if (rc) {
3098                         CERROR("%s: quota transfer failed. Is project enforcement enabled on the ldiskfs filesystem? rc = %d\n",
3099                                osd_ino2name(inode), rc);
3100                         return rc;
3101                 }
3102         }
3103         return 0;
3104 }
3105
3106 static int osd_attr_set(const struct lu_env *env,
3107                         struct dt_object *dt,
3108                         const struct lu_attr *attr,
3109                         struct thandle *handle)
3110 {
3111         struct osd_object *obj = osd_dt_obj(dt);
3112         struct inode *inode;
3113         int rc;
3114
3115         if (!dt_object_exists(dt))
3116                 return -ENOENT;
3117
3118         LASSERT(handle != NULL);
3119         LASSERT(!dt_object_remote(dt));
3120         LASSERT(osd_invariant(obj));
3121
3122         osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
3123
3124         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) &&
3125             !osd_obj2dev(obj)->od_is_ost) {
3126                 struct osd_thread_info *oti = osd_oti_get(env);
3127                 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
3128                 struct lu_fid *fid1 = &oti->oti_fid;
3129                 struct osd_inode_id *id = &oti->oti_id;
3130                 struct iam_path_descr *ipd;
3131                 struct iam_container *bag;
3132                 struct osd_thandle *oh;
3133                 int rc;
3134
3135                 fid_cpu_to_be(fid1, fid0);
3136                 memset(id, 1, sizeof(*id));
3137                 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
3138                                   fid0)->oi_dir.od_container;
3139                 ipd = osd_idx_ipd_get(env, bag);
3140                 if (unlikely(ipd == NULL))
3141                         RETURN(-ENOMEM);
3142
3143                 oh = container_of(handle, struct osd_thandle, ot_super);
3144                 rc = iam_update(oh->ot_handle, bag,
3145                                 (const struct iam_key *)fid1,
3146                                 (const struct iam_rec *)id, ipd);
3147                 osd_ipd_put(env, bag, ipd);
3148                 return(rc > 0 ? 0 : rc);
3149         }
3150
3151         inode = obj->oo_inode;
3152
3153         rc = osd_quota_transfer(inode, attr, handle);
3154         if (rc)
3155                 return rc;
3156
3157         spin_lock(&obj->oo_guard);
3158         rc = osd_inode_setattr(env, inode, attr);
3159         spin_unlock(&obj->oo_guard);
3160         if (rc != 0)
3161                 GOTO(out, rc);
3162
3163         osd_dirty_inode(inode, I_DIRTY_DATASYNC);
3164
3165         osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
3166
3167         if (!(attr->la_valid & LA_FLAGS))
3168                 GOTO(out, rc);
3169
3170         /* Let's check if there are extra flags need to be set into LMA */
3171         if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
3172                 struct osd_thread_info *info = osd_oti_get(env);
3173                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
3174
3175                 LASSERT(!obj->oo_pfid_in_lma);
3176
3177                 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
3178                                  &info->oti_ost_attrs);
3179                 if (rc)
3180                         GOTO(out, rc);
3181
3182                 lma->lma_incompat |=
3183                         lustre_to_lma_flags(attr->la_flags);
3184                 lustre_lma_swab(lma);
3185