Whamcloud - gitweb
LU-15880 quota: fix issues in reserving quota
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd/osd_handler.c
32  *
33  * Top-level entry points into osd module
34  *
35  * Author: Nikita Danilov <nikita@clusterfs.com>
36  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <linux/fs_struct.h>
42 #include <linux/module.h>
43 #include <linux/user_namespace.h>
44 #include <linux/uidgid.h>
45
46 /* prerequisite for linux/xattr.h */
47 #include <linux/types.h>
48 /* prerequisite for linux/xattr.h */
49 #include <linux/fs.h>
50 /* XATTR_{REPLACE,CREATE} */
51 #include <linux/xattr.h>
52
53 #include <ldiskfs/ldiskfs.h>
54 #include <ldiskfs/xattr.h>
55 #include <ldiskfs/ldiskfs_extents.h>
56 #undef ENTRY
57 /*
58  * struct OBD_{ALLOC,FREE}*()
59  * OBD_FAIL_CHECK
60  */
61 #include <obd_support.h>
62 /* struct ptlrpc_thread */
63 #include <lustre_net.h>
64 #include <lustre_fid.h>
65 /* process_config */
66 #include <uapi/linux/lustre/lustre_param.h>
67
68 #include "osd_internal.h"
69 #include "osd_dynlocks.h"
70
71 /* llo_* api support */
72 #include <md_object.h>
73 #include <lustre_quota.h>
74
75 #include <lustre_linkea.h>
76
77 /* encoding routines */
78 #include <lustre_crypto.h>
79
80 /* Maximum EA size is limited by LNET_MTU for remote objects */
81 #define OSD_MAX_EA_SIZE 1048364
82
83 int ldiskfs_pdo = 1;
84 module_param(ldiskfs_pdo, int, 0644);
85 MODULE_PARM_DESC(ldiskfs_pdo, "ldiskfs with parallel directory operations");
86
87 int ldiskfs_track_declares_assert;
88 module_param(ldiskfs_track_declares_assert, int, 0644);
89 MODULE_PARM_DESC(ldiskfs_track_declares_assert, "LBUG during tracking of declares");
90
91 /* Slab to allocate dynlocks */
92 struct kmem_cache *dynlock_cachep;
93
94 /* Slab to allocate osd_it_ea */
95 struct kmem_cache *osd_itea_cachep;
96
97 static struct lu_kmem_descr ldiskfs_caches[] = {
98         {
99                 .ckd_cache = &dynlock_cachep,
100                 .ckd_name  = "dynlock_cache",
101                 .ckd_size  = sizeof(struct dynlock_handle)
102         },
103         {
104                 .ckd_cache = &osd_itea_cachep,
105                 .ckd_name  = "osd_itea_cache",
106                 .ckd_size  = sizeof(struct osd_it_ea)
107         },
108         {
109                 .ckd_cache = NULL
110         }
111 };
112
113 static const char dot[] = ".";
114 static const char dotdot[] = "..";
115
116 static const struct lu_object_operations      osd_lu_obj_ops;
117 static const struct dt_object_operations      osd_obj_ops;
118 static const struct dt_object_operations      osd_obj_otable_it_ops;
119 static const struct dt_index_operations       osd_index_iam_ops;
120 static const struct dt_index_operations       osd_index_ea_ops;
121
122 static int osd_remote_fid(const struct lu_env *env, struct osd_device *osd,
123                           const struct lu_fid *fid);
124 static int osd_process_scheduled_agent_removals(const struct lu_env *env,
125                                                 struct osd_device *osd);
126
127 int osd_trans_declare_op2rb[] = {
128         [OSD_OT_ATTR_SET]       = OSD_OT_ATTR_SET,
129         [OSD_OT_PUNCH]          = OSD_OT_MAX,
130         [OSD_OT_XATTR_SET]      = OSD_OT_XATTR_SET,
131         [OSD_OT_CREATE]         = OSD_OT_DESTROY,
132         [OSD_OT_DESTROY]        = OSD_OT_CREATE,
133         [OSD_OT_REF_ADD]        = OSD_OT_REF_DEL,
134         [OSD_OT_REF_DEL]        = OSD_OT_REF_ADD,
135         [OSD_OT_WRITE]          = OSD_OT_WRITE,
136         [OSD_OT_INSERT]         = OSD_OT_DELETE,
137         [OSD_OT_DELETE]         = OSD_OT_INSERT,
138         [OSD_OT_QUOTA]          = OSD_OT_MAX,
139 };
140
141 static int osd_has_index(const struct osd_object *obj)
142 {
143         return obj->oo_dt.do_index_ops != NULL;
144 }
145
146 static int osd_object_invariant(const struct lu_object *l)
147 {
148         return osd_invariant(osd_obj(l));
149 }
150
151 /*
152  * Concurrency: doesn't matter
153  */
154 static int osd_is_write_locked(const struct lu_env *env, struct osd_object *o)
155 {
156         struct osd_thread_info *oti = osd_oti_get(env);
157
158         return oti->oti_w_locks > 0 && o->oo_owner == env;
159 }
160
161 /*
162  * Concurrency: doesn't access mutable data
163  */
164 static int osd_root_get(const struct lu_env *env,
165                         struct dt_device *dev, struct lu_fid *f)
166 {
167         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
168         return 0;
169 }
170
171 /*
172  * the following set of functions are used to maintain per-thread
173  * cache of FID->ino mapping. this mechanism is needed to resolve
174  * FID to inode at dt_insert() which in turn stores ino in the
175  * directory entries to keep ldiskfs compatible with ext[34].
176  * due to locking-originated restrictions we can't lookup ino
177  * using LU cache (deadlock is possible). lookup using OI is quite
178  * expensive. so instead we maintain this cache and methods like
179  * dt_create() fill it. so in the majority of cases dt_insert() is
180  * able to find needed mapping in lockless manner.
181  */
182 static struct osd_idmap_cache *
183 osd_idc_find(const struct lu_env *env, struct osd_device *osd,
184              const struct lu_fid *fid)
185 {
186         struct osd_thread_info *oti = osd_oti_get(env);
187         struct osd_idmap_cache *idc = oti->oti_ins_cache;
188         int i;
189
190         for (i = 0; i < oti->oti_ins_cache_used; i++) {
191                 if (!lu_fid_eq(&idc[i].oic_fid, fid))
192                         continue;
193                 if (idc[i].oic_dev != osd)
194                         continue;
195
196                 return idc + i;
197         }
198
199         return NULL;
200 }
201
202 static struct osd_idmap_cache *
203 osd_idc_add(const struct lu_env *env, struct osd_device *osd,
204             const struct lu_fid *fid)
205 {
206         struct osd_thread_info *oti   = osd_oti_get(env);
207         struct osd_idmap_cache *idc;
208         int i;
209
210         if (unlikely(oti->oti_ins_cache_used >= oti->oti_ins_cache_size)) {
211                 i = oti->oti_ins_cache_size * 2;
212                 if (i == 0)
213                         i = OSD_INS_CACHE_SIZE;
214                 OBD_ALLOC_PTR_ARRAY_LARGE(idc, i);
215                 if (idc == NULL)
216                         return ERR_PTR(-ENOMEM);
217                 if (oti->oti_ins_cache != NULL) {
218                         memcpy(idc, oti->oti_ins_cache,
219                                oti->oti_ins_cache_used * sizeof(*idc));
220                         OBD_FREE_PTR_ARRAY_LARGE(oti->oti_ins_cache,
221                                            oti->oti_ins_cache_used);
222                 }
223                 oti->oti_ins_cache = idc;
224                 oti->oti_ins_cache_size = i;
225         }
226
227         idc = oti->oti_ins_cache + oti->oti_ins_cache_used++;
228         idc->oic_fid = *fid;
229         idc->oic_dev = osd;
230         idc->oic_lid.oii_ino = 0;
231         idc->oic_lid.oii_gen = 0;
232         idc->oic_remote = 0;
233
234         return idc;
235 }
236
237 /*
238  * lookup mapping for the given fid in the cache, initialize a
239  * new one if not found. the initialization checks whether the
240  * object is local or remote. for local objects, OI is used to
241  * learn ino/generation. the function is used when the caller
242  * has no information about the object, e.g. at dt_insert().
243  */
244 static struct osd_idmap_cache *
245 osd_idc_find_or_init(const struct lu_env *env, struct osd_device *osd,
246                      const struct lu_fid *fid)
247 {
248         struct osd_idmap_cache *idc;
249         int rc;
250
251         idc = osd_idc_find(env, osd, fid);
252         LASSERT(!IS_ERR(idc));
253         if (idc != NULL)
254                 return idc;
255
256         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
257                osd->od_svname, PFID(fid));
258
259         /* new mapping is needed */
260         idc = osd_idc_add(env, osd, fid);
261         if (IS_ERR(idc)) {
262                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
263                        osd->od_svname, PFID(fid), PTR_ERR(idc));
264                 return idc;
265         }
266
267         /* initialize it */
268         rc = osd_remote_fid(env, osd, fid);
269         if (unlikely(rc < 0))
270                 return ERR_PTR(rc);
271
272         if (rc == 0) {
273                 /* the object is local, lookup in OI */
274                 /* XXX: probably cheaper to lookup in LU first? */
275                 rc = osd_oi_lookup(osd_oti_get(env), osd, fid,
276                                    &idc->oic_lid, 0);
277                 if (unlikely(rc < 0)) {
278                         CERROR("can't lookup: rc = %d\n", rc);
279                         return ERR_PTR(rc);
280                 }
281         } else {
282                 /* the object is remote */
283                 idc->oic_remote = 1;
284         }
285
286         return idc;
287 }
288
289 static void osd_idc_dump_lma(const struct lu_env *env,
290                                 struct osd_device *osd,
291                                 unsigned long ino,
292                                 bool check_in_oi)
293 {
294         struct osd_thread_info *info = osd_oti_get(env);
295         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
296         const struct lu_fid *fid;
297         struct osd_inode_id lid;
298         struct inode *inode;
299         int rc;
300
301         inode = osd_ldiskfs_iget(osd_sb(osd), ino);
302         if (IS_ERR(inode)) {
303                 CERROR("%s: can't get inode %lu: rc = %d\n",
304                        osd->od_svname, ino, (int)PTR_ERR(inode));
305                 return;
306         }
307         if (is_bad_inode(inode)) {
308                 CERROR("%s: bad inode %lu\n", osd->od_svname, ino);
309                 goto put;
310         }
311         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
312         if (rc) {
313                 CERROR("%s: can't get LMA for %lu: rc = %d\n",
314                        osd->od_svname, ino, rc);
315                 goto put;
316         }
317         fid = &loa->loa_lma.lma_self_fid;
318         LCONSOLE(D_INFO, "%s: "DFID" in inode %lu/%u\n", osd->od_svname,
319                       PFID(fid), ino, (unsigned)inode->i_generation);
320         if (!check_in_oi)
321                 goto put;
322         rc = osd_oi_lookup(osd_oti_get(env), osd, fid, &lid, 0);
323         if (rc) {
324                 CERROR("%s: can't lookup "DFID": rc = %d\n",
325                        osd->od_svname, PFID(fid), rc);
326                 goto put;
327         }
328         LCONSOLE(D_INFO, "%s: "DFID" maps to %u/%u\n", osd->od_svname,
329                       PFID(fid), lid.oii_ino, lid.oii_gen);
330 put:
331         iput(inode);
332 }
333
334 static void osd_idc_dump_debug(const struct lu_env *env,
335                                 struct osd_device *osd,
336                                 const struct lu_fid *fid,
337                                 unsigned long ino1,
338                                 unsigned long ino2)
339 {
340         struct osd_inode_id lid;
341
342         int rc;
343
344         rc = osd_oi_lookup(osd_oti_get(env), osd, fid, &lid, 0);
345         if (!rc) {
346                 LCONSOLE(D_INFO, "%s: "DFID" maps to %u/%u\n",
347                         osd->od_svname, PFID(fid), lid.oii_ino, lid.oii_gen);
348                 osd_idc_dump_lma(env, osd, lid.oii_ino, false);
349         } else {
350                 CERROR("%s: can't lookup "DFID": rc = %d\n",
351                        osd->od_svname, PFID(fid), rc);
352         }
353         if (ino1)
354                 osd_idc_dump_lma(env, osd, ino1, true);
355         if (ino2)
356                 osd_idc_dump_lma(env, osd, ino2, true);
357 }
358
359 /*
360  * lookup mapping for given FID and fill it from the given object.
361  * the object is lolcal by definition.
362  */
363 static int osd_idc_find_and_init(const struct lu_env *env,
364                                  struct osd_device *osd,
365                                  struct osd_object *obj)
366 {
367         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
368         struct osd_idmap_cache *idc;
369
370         idc = osd_idc_find(env, osd, fid);
371         LASSERT(!IS_ERR(idc));
372         if (idc != NULL) {
373                 if (obj->oo_inode == NULL)
374                         return 0;
375                 if (idc->oic_lid.oii_ino != obj->oo_inode->i_ino) {
376                         if (idc->oic_lid.oii_ino) {
377                                 osd_idc_dump_debug(env, osd, fid,
378                                                    idc->oic_lid.oii_ino,
379                                                    obj->oo_inode->i_ino);
380                                 return -EINVAL;
381                         }
382                         idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
383                         idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
384                 }
385                 return 0;
386         }
387
388         CDEBUG(D_INODE, "%s: FID "DFID" not in the id map cache\n",
389                osd->od_svname, PFID(fid));
390
391         /* new mapping is needed */
392         idc = osd_idc_add(env, osd, fid);
393         if (IS_ERR(idc)) {
394                 CERROR("%s: FID "DFID" add id map cache failed: %ld\n",
395                        osd->od_svname, PFID(fid), PTR_ERR(idc));
396                 return PTR_ERR(idc);
397         }
398
399         if (obj->oo_inode != NULL) {
400                 idc->oic_lid.oii_ino = obj->oo_inode->i_ino;
401                 idc->oic_lid.oii_gen = obj->oo_inode->i_generation;
402         }
403         return 0;
404 }
405
406 /*
407  * OSD object methods.
408  */
409
410 /*
411  * Concurrency: no concurrent access is possible that early in object
412  * life-cycle.
413  */
414 static struct lu_object *osd_object_alloc(const struct lu_env *env,
415                                           const struct lu_object_header *hdr,
416                                           struct lu_device *d)
417 {
418         struct osd_object *mo;
419
420         OBD_ALLOC_PTR(mo);
421         if (mo != NULL) {
422                 struct lu_object *l;
423                 struct lu_object_header *h;
424                 struct osd_device *o = osd_dev(d);
425
426                 l = &mo->oo_dt.do_lu;
427                 if (unlikely(o->od_in_init)) {
428                         OBD_ALLOC_PTR(h);
429                         if (!h) {
430                                 OBD_FREE_PTR(mo);
431                                 return NULL;
432                         }
433
434                         lu_object_header_init(h);
435                         lu_object_init(l, h, d);
436                         lu_object_add_top(h, l);
437                         mo->oo_header = h;
438                 } else {
439                         dt_object_init(&mo->oo_dt, NULL, d);
440                         mo->oo_header = NULL;
441                 }
442
443                 mo->oo_dt.do_ops = &osd_obj_ops;
444                 l->lo_ops = &osd_lu_obj_ops;
445                 init_rwsem(&mo->oo_sem);
446                 init_rwsem(&mo->oo_ext_idx_sem);
447                 spin_lock_init(&mo->oo_guard);
448                 INIT_LIST_HEAD(&mo->oo_xattr_list);
449                 return l;
450         }
451         return NULL;
452 }
453
454 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
455                 struct dentry *dentry, struct lustre_ost_attrs *loa)
456 {
457         int rc;
458
459         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
460                              (void *)loa, sizeof(*loa));
461         if (rc > 0) {
462                 struct lustre_mdt_attrs *lma = &loa->loa_lma;
463
464                 if (rc < sizeof(*lma))
465                         return -EINVAL;
466
467                 rc = 0;
468                 lustre_loa_swab(loa, true);
469                 /* Check LMA compatibility */
470                 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
471                         rc = -EOPNOTSUPP;
472                         CWARN("%s: unsupported incompat LMA feature(s) %#x for fid = "DFID", ino = %lu: rc = %d\n",
473                               osd_ino2name(inode),
474                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
475                               PFID(&lma->lma_self_fid), inode->i_ino, rc);
476                 }
477         } else if (rc == 0) {
478                 rc = -ENODATA;
479         }
480
481         return rc;
482 }
483
484 /*
485  * retrieve object from backend ext fs.
486  **/
487 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
488                        struct osd_inode_id *id)
489 {
490         int rc;
491         struct inode *inode = NULL;
492
493         /*
494          * if we look for an inode withing a running
495          * transaction, then we risk to deadlock
496          * osd_dirent_check_repair() breaks this
497          */
498          /* LASSERT(current->journal_info == NULL); */
499
500         inode = osd_ldiskfs_iget(osd_sb(dev), id->oii_ino);
501         if (IS_ERR(inode)) {
502                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
503                        id->oii_ino, PTR_ERR(inode));
504         } else if (id->oii_gen != OSD_OII_NOGEN &&
505                    inode->i_generation != id->oii_gen) {
506                 CDEBUG(D_INODE, "unmatched inode: ino = %u, oii_gen = %u, "
507                        "i_generation = %u\n",
508                        id->oii_ino, id->oii_gen, inode->i_generation);
509                 iput(inode);
510                 inode = ERR_PTR(-ESTALE);
511         } else if (inode->i_nlink == 0) {
512                 /*
513                  * due to parallel readdir and unlink,
514                  * we can have dead inode here.
515                  */
516                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
517                 iput(inode);
518                 inode = ERR_PTR(-ESTALE);
519         } else if (is_bad_inode(inode)) {
520                 CWARN("%s: bad inode: ino = %u: rc = %d\n",
521                       osd_dev2name(dev), id->oii_ino, -ENOENT);
522                 iput(inode);
523                 inode = ERR_PTR(-ENOENT);
524         } else  if (osd_is_ea_inode(inode)) {
525                 /*
526                  * EA inode is internal ldiskfs object, should don't visible
527                  * on osd
528                  */
529                 CDEBUG(D_INODE, "EA inode: ino = %u\n", id->oii_ino);
530                 iput(inode);
531                 inode = ERR_PTR(-ENOENT);
532         } else if ((rc = osd_attach_jinode(inode))) {
533                 iput(inode);
534                 inode = ERR_PTR(rc);
535         } else {
536                 ldiskfs_clear_inode_state(inode, LDISKFS_STATE_LUSTRE_DESTROY);
537                 if (id->oii_gen == OSD_OII_NOGEN)
538                         osd_id_gen(id, inode->i_ino, inode->i_generation);
539
540                 /*
541                  * Do not update file c/mtime in ldiskfs.
542                  * NB: we don't have any lock to protect this because we don't
543                  * have reference on osd_object now, but contention with
544                  * another lookup + attr_set can't happen in the tiny window
545                  * between if (...) and set S_NOCMTIME.
546                  */
547                 if (!(inode->i_flags & S_NOCMTIME))
548                         inode->i_flags |= S_NOCMTIME;
549         }
550         return inode;
551 }
552
553 int osd_ldiskfs_add_entry(struct osd_thread_info *info, struct osd_device *osd,
554                           handle_t *handle, struct dentry *child,
555                           struct inode *inode, struct htree_lock *hlock)
556 {
557         int rc, rc2;
558
559         rc = __ldiskfs_add_entry(handle, child, inode, hlock);
560         if (rc == -ENOBUFS || rc == -ENOSPC) {
561                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
562                 struct inode *parent = child->d_parent->d_inode;
563                 struct lu_fid *fid = NULL;
564                 char fidstr[FID_LEN + 1] = "unknown";
565
566                 rc2 = osd_get_lma(info, parent, child->d_parent, loa);
567                 if (!rc2) {
568                         fid = &loa->loa_lma.lma_self_fid;
569                 } else if (rc2 == -ENODATA) {
570                         if (unlikely(is_root_inode(parent))) {
571                                 fid = &info->oti_fid3;
572                                 lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
573                         } else if (!osd->od_is_ost && osd->od_index == 0) {
574                                 fid = &info->oti_fid3;
575                                 lu_igif_build(fid, parent->i_ino,
576                                               parent->i_generation);
577                         }
578                 }
579
580                 if (fid != NULL)
581                         snprintf(fidstr, sizeof(fidstr), DFID, PFID(fid));
582
583                 /* below message is checked in sanity.sh test_129 */
584                 if (rc == -ENOSPC) {
585                         CWARN("%s: directory (inode: %lu, FID: %s) has reached max size limit\n",
586                               osd_name(osd), parent->i_ino, fidstr);
587                 } else {
588                         rc = 0; /* ignore such error now */
589                         CWARN("%s: directory (inode: %lu, FID: %s) is approaching max size limit\n",
590                               osd_name(osd), parent->i_ino, fidstr);
591                 }
592
593         }
594
595         return rc;
596 }
597
598
599 struct inode *
600 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
601              struct osd_inode_id *id, struct lu_fid *fid)
602 {
603         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
604         struct inode *inode;
605         int rc;
606
607         inode = osd_iget(info, dev, id);
608         if (IS_ERR(inode))
609                 return inode;
610
611         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
612         if (!rc) {
613                 *fid = loa->loa_lma.lma_self_fid;
614         } else if (rc == -ENODATA) {
615                 if (unlikely(is_root_inode(inode)))
616                         lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
617                 else
618                         lu_igif_build(fid, inode->i_ino, inode->i_generation);
619         } else {
620                 iput(inode);
621                 inode = ERR_PTR(rc);
622         }
623         return inode;
624 }
625
626 static struct inode *osd_iget_check(struct osd_thread_info *info,
627                                     struct osd_device *dev,
628                                     const struct lu_fid *fid,
629                                     struct osd_inode_id *id,
630                                     bool trusted)
631 {
632         struct inode *inode;
633         int rc = 0;
634
635         ENTRY;
636
637         /*
638          * The cached OI mapping is trustable. If we cannot locate the inode
639          * via the cached OI mapping, then return the failure to the caller
640          * directly without further OI checking.
641          */
642
643 again:
644         inode = osd_iget(info, dev, id);
645         if (IS_ERR(inode)) {
646                 rc = PTR_ERR(inode);
647                 if (!trusted && (rc == -ENOENT || rc == -ESTALE))
648                         goto check_oi;
649
650                 CDEBUG(D_INODE, "no inode for FID: "DFID", ino = %u, rc = %d\n",
651                        PFID(fid), id->oii_ino, rc);
652                 GOTO(put, rc);
653         }
654
655 check_oi:
656         if (rc != 0) {
657                 __u32 saved_ino = id->oii_ino;
658                 __u32 saved_gen = id->oii_gen;
659
660                 LASSERT(!trusted);
661                 LASSERTF(rc == -ESTALE || rc == -ENOENT, "rc = %d\n", rc);
662
663                 rc = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
664                 /*
665                  * XXX: There are four possible cases:
666                  *      1. rc = 0.
667                  *         Backup/restore caused the OI invalid.
668                  *      2. rc = 0.
669                  *         Someone unlinked the object but NOT removed
670                  *         the OI mapping, such as mount target device
671                  *         as ldiskfs, and modify something directly.
672                  *      3. rc = -ENOENT.
673                  *         Someone just removed the object between the
674                  *         former oi_lookup and the iget. It is normal.
675                  *      4. Other failure cases.
676                  *
677                  *      Generally, when the device is mounted, it will
678                  *      auto check whether the system is restored from
679                  *      file-level backup or not. We trust such detect
680                  *      to distinguish the 1st case from the 2nd case:
681                  *      if the OI files are consistent but may contain
682                  *      stale OI mappings because of case 2, if iget()
683                  *      returns -ENOENT or -ESTALE, then it should be
684                  *      the case 2.
685                  */
686                 if (rc != 0)
687                         /*
688                          * If the OI mapping was in OI file before the
689                          * osd_iget_check(), but now, it is disappear,
690                          * then it must be removed by race. That is a
691                          * normal race case.
692                          */
693                         GOTO(put, rc);
694
695                 /*
696                  * It is the OI scrub updated the OI mapping by race.
697                  * The new OI mapping must be valid.
698                  */
699                 if (saved_ino != id->oii_ino ||
700                     (saved_gen != id->oii_gen && saved_gen != OSD_OII_NOGEN)) {
701                         if (!IS_ERR(inode))
702                                 iput(inode);
703
704                         trusted = true;
705                         goto again;
706                 }
707
708                 if (IS_ERR(inode)) {
709                         if (dev->od_scrub.os_scrub.os_file.sf_flags &
710                             SF_INCONSISTENT)
711                                 /*
712                                  * It still can be the case 2, but we cannot
713                                  * distinguish it from the case 1. So return
714                                  * -EREMCHG to block current operation until
715                                  *  OI scrub rebuilt the OI mappings.
716                                  */
717                                 rc = -EREMCHG;
718                         else
719                                 rc = -ENOENT;
720
721                         GOTO(put, rc);
722                 }
723
724                 if (inode->i_generation == id->oii_gen)
725                         rc = -ENOENT;
726                 else
727                         rc = -EREMCHG;
728         }
729
730         GOTO(put, rc);
731
732 put:
733         if (rc != 0) {
734                 if (!IS_ERR(inode))
735                         iput(inode);
736
737                 inode = ERR_PTR(rc);
738         }
739
740         return inode;
741 }
742
743 /**
744  * \retval +v: new filter_fid does not contain self-fid
745  * \retval 0:  filter_fid_18_23, contains self-fid
746  * \retval -v: other failure cases
747  */
748 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
749                  struct dentry *dentry, struct lu_fid *fid)
750 {
751         struct filter_fid *ff = &info->oti_ff;
752         struct ost_id *ostid = &info->oti_ostid;
753         int rc;
754
755         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
756         if (rc == sizeof(struct filter_fid_18_23)) {
757                 struct filter_fid_18_23 *ff_old = (void *)ff;
758
759                 ostid_set_seq(ostid, le64_to_cpu(ff_old->ff_seq));
760                 rc = ostid_set_id(ostid, le64_to_cpu(ff_old->ff_objid));
761                 /*
762                  * XXX: use 0 as the index for compatibility, the caller will
763                  * handle index related issues when necessary.
764                  */
765                 if (!rc)
766                         ostid_to_fid(fid, ostid, 0);
767         } else if (rc >= (int)sizeof(struct filter_fid_24_29)) {
768                 rc = 1;
769         } else if (rc >= 0) {
770                 rc = -EINVAL;
771         }
772
773         return rc;
774 }
775
776 static int osd_lma_self_repair(struct osd_thread_info *info,
777                                struct osd_device *osd, struct inode *inode,
778                                const struct lu_fid *fid, __u32 compat)
779 {
780         handle_t *jh;
781         int rc;
782
783         LASSERT(current->journal_info == NULL);
784
785         jh = osd_journal_start_sb(osd_sb(osd), LDISKFS_HT_MISC,
786                                   osd_dto_credits_noquota[DTO_XATTR_SET]);
787         if (IS_ERR(jh)) {
788                 rc = PTR_ERR(jh);
789                 CWARN("%s: cannot start journal for lma_self_repair: rc = %d\n",
790                       osd_name(osd), rc);
791                 return rc;
792         }
793
794         rc = osd_ea_fid_set(info, inode, fid, compat, 0);
795         if (rc != 0)
796                 CWARN("%s: cannot self repair the LMA: rc = %d\n",
797                       osd_name(osd), rc);
798         ldiskfs_journal_stop(jh);
799         return rc;
800 }
801
802 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
803 {
804         struct osd_thread_info *info = osd_oti_get(env);
805         struct osd_device *osd = osd_obj2dev(obj);
806         struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
807         struct lustre_mdt_attrs *lma = &loa->loa_lma;
808         struct inode *inode = obj->oo_inode;
809         struct dentry *dentry = &info->oti_obj_dentry;
810         struct lu_fid *fid = NULL;
811         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
812         int rc;
813
814         ENTRY;
815
816         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
817                              (void *)loa, sizeof(*loa));
818         if (rc == -ENODATA && !fid_is_igif(rfid) && osd->od_check_ff) {
819                 fid = &lma->lma_self_fid;
820                 rc = osd_get_idif(info, inode, dentry, fid);
821                 if (rc > 0 || (rc == -ENODATA && osd->od_index_in_idif)) {
822                         /*
823                          * For the given OST-object, if it has neither LMA nor
824                          * FID in XATTR_NAME_FID, then the given FID (which is
825                          * contained in the @obj, from client RPC for locating
826                          * the OST-object) is trusted. We use it to generate
827                          * the LMA.
828                          */
829                         osd_lma_self_repair(info, osd, inode, rfid,
830                                             LMAC_FID_ON_OST);
831                         RETURN(0);
832                 }
833         }
834
835         if (rc < 0)
836                 RETURN(rc);
837
838         if (rc > 0) {
839                 rc = 0;
840                 lustre_lma_swab(lma);
841                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
842                              (CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT) &&
843                               S_ISREG(inode->i_mode)))) {
844                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
845                               "fid = "DFID", ino = %lu\n", osd_name(osd),
846                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
847                               PFID(rfid), inode->i_ino);
848                         rc = -EOPNOTSUPP;
849                 } else {
850                         fid = &lma->lma_self_fid;
851                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
852                             osd->od_is_ost)
853                                 obj->oo_pfid_in_lma = 1;
854                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
855                             !osd->od_is_ost)
856                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
857                 }
858         }
859
860         if (fid != NULL && unlikely(!lu_fid_eq(rfid, fid))) {
861                 if (fid_is_idif(rfid) && fid_is_idif(fid)) {
862                         struct ost_id   *oi   = &info->oti_ostid;
863                         struct lu_fid   *fid1 = &info->oti_fid3;
864                         __u32            idx  = fid_idif_ost_idx(rfid);
865
866                         /*
867                          * For old IDIF, the OST index is not part of the IDIF,
868                          * Means that different OSTs may have the same IDIFs.
869                          * Under such case, we need to make some compatible
870                          * check to make sure to trigger OI scrub properly.
871                          */
872                         if (idx != 0 && fid_idif_ost_idx(fid) == 0) {
873                                 /* Given @rfid is new, LMA is old. */
874                                 fid_to_ostid(fid, oi);
875                                 ostid_to_fid(fid1, oi, idx);
876                                 if (lu_fid_eq(fid1, rfid)) {
877                                         if (osd->od_index_in_idif)
878                                                 osd_lma_self_repair(info, osd,
879                                                         inode, rfid,
880                                                         LMAC_FID_ON_OST);
881                                         RETURN(0);
882                                 }
883                         }
884                 }
885
886                 rc = -EREMCHG;
887         }
888
889         RETURN(rc);
890 }
891
892 struct osd_check_lmv_buf {
893         /* please keep it as first member */
894         struct dir_context ctx;
895         struct osd_thread_info *oclb_info;
896         struct osd_device *oclb_dev;
897         int oclb_items;
898         bool oclb_found;
899 };
900
901 /**
902  * It is called internally by ->iterate*() to filter out the
903  * local slave object's FID of the striped directory.
904  *
905  * \retval      1 found the local slave's FID
906  * \retval      0 continue to check next item
907  * \retval      -ve for failure
908  */
909 #ifdef HAVE_FILLDIR_USE_CTX
910 static int osd_stripe_dir_filldir(struct dir_context *buf,
911 #else
912 static int osd_stripe_dir_filldir(void *buf,
913 #endif
914                                   const char *name, int namelen,
915                                   loff_t offset, __u64 ino, unsigned int d_type)
916 {
917         struct osd_check_lmv_buf *oclb = (struct osd_check_lmv_buf *)buf;
918         struct osd_thread_info *oti = oclb->oclb_info;
919         struct lu_fid *fid = &oti->oti_fid3;
920         struct osd_inode_id *id = &oti->oti_id3;
921         struct osd_device *dev = oclb->oclb_dev;
922         struct inode *inode;
923
924         oclb->oclb_items++;
925
926         if (name[0] == '.')
927                 return 0;
928
929         fid_zero(fid);
930         sscanf(name + 1, SFID, RFID(fid));
931         if (!fid_is_sane(fid))
932                 return 0;
933
934         if (osd_remote_fid(oti->oti_env, dev, fid))
935                 return 0;
936
937         osd_id_gen(id, ino, OSD_OII_NOGEN);
938         inode = osd_iget(oti, dev, id);
939         if (IS_ERR(inode))
940                 return PTR_ERR(inode);
941
942         iput(inode);
943         osd_add_oi_cache(oti, dev, id, fid);
944         osd_oii_insert(dev, fid, id, true);
945         oclb->oclb_found = true;
946
947         return 1;
948 }
949
950 /*
951  * When lookup item under striped directory, we need to locate the master
952  * MDT-object of the striped directory firstly, then the client will send
953  * lookup (getattr_by_name) RPC to the MDT with some slave MDT-object's FID
954  * and the item's name. If the system is restored from MDT file level backup,
955  * then before the OI scrub completely built the OI files, the OI mappings of
956  * the master MDT-object and slave MDT-object may be invalid. Usually, it is
957  * not a problem for the master MDT-object. Because when locate the master
958  * MDT-object, we will do name based lookup (for the striped directory itself)
959  * firstly, during such process we can setup the correct OI mapping for the
960  * master MDT-object. But it will be trouble for the slave MDT-object. Because
961  * the client will not trigger name based lookup on the MDT to locate the slave
962  * MDT-object before locating item under the striped directory, then when
963  * osd_fid_lookup(), it will find that the OI mapping for the slave MDT-object
964  * is invalid and does not know what the right OI mapping is, then the MDT has
965  * to return -EINPROGRESS to the client to notify that the OI scrub is rebuiding
966  * the OI file, related OI mapping is unknown yet, please try again later. And
967  * then client will re-try the RPC again and again until related OI mapping has
968  * been updated. That is quite inefficient.
969  *
970  * To resolve above trouble, we will handle it as the following two cases:
971  *
972  * 1) The slave MDT-object and the master MDT-object are on different MDTs.
973  *    It is relative easy. Be as one of remote MDT-objects, the slave MDT-object
974  *    is linked under /REMOTE_PARENT_DIR with the name of its FID string.
975  *    We can locate the slave MDT-object via lookup the /REMOTE_PARENT_DIR
976  *    directly. Please check osd_fid_lookup().
977  *
978  * 2) The slave MDT-object and the master MDT-object reside on the same MDT.
979  *    Under such case, during lookup the master MDT-object, we will lookup the
980  *    slave MDT-object via readdir against the master MDT-object, because the
981  *    slave MDT-objects information are stored as sub-directories with the name
982  *    "${FID}:${index}". Then when find the local slave MDT-object, its OI
983  *    mapping will be recorded. Then subsequent osd_fid_lookup() will know
984  *    the correct OI mapping for the slave MDT-object.
985  */
986 static int osd_check_lmv(struct osd_thread_info *oti, struct osd_device *dev,
987                          struct inode *inode)
988 {
989         struct lu_buf *buf = &oti->oti_big_buf;
990         struct file *filp;
991         struct lmv_mds_md_v1 *lmv1;
992         struct osd_check_lmv_buf oclb = {
993                 .ctx.actor = osd_stripe_dir_filldir,
994                 .oclb_info = oti,
995                 .oclb_dev = dev,
996                 .oclb_found = false,
997         };
998         int rc = 0;
999
1000         ENTRY;
1001         /* We should use the VFS layer to create a real dentry. */
1002         oti->oti_obj_dentry.d_inode = inode;
1003         oti->oti_obj_dentry.d_sb = inode->i_sb;
1004
1005         filp = alloc_file_pseudo(inode, dev->od_mnt, "/", O_NOATIME,
1006                                  inode->i_fop);
1007         if (IS_ERR(filp))
1008                 RETURN(-ENOMEM);
1009
1010         filp->f_mode |= FMODE_64BITHASH;
1011         filp->f_pos = 0;
1012         ihold(inode);
1013 again:
1014         rc = __osd_xattr_get(inode, filp->f_path.dentry, XATTR_NAME_LMV,
1015                              buf->lb_buf, buf->lb_len);
1016         if (rc == -ERANGE) {
1017                 rc = __osd_xattr_get(inode, filp->f_path.dentry,
1018                                      XATTR_NAME_LMV, NULL, 0);
1019                 if (rc > 0) {
1020                         lu_buf_realloc(buf, rc);
1021                         if (buf->lb_buf == NULL)
1022                                 GOTO(out, rc = -ENOMEM);
1023
1024                         goto again;
1025                 }
1026         }
1027
1028         if (unlikely(rc == 0 || rc == -ENODATA))
1029                 GOTO(out, rc = 0);
1030
1031         if (rc < 0)
1032                 GOTO(out, rc);
1033
1034         if (unlikely(buf->lb_buf == NULL)) {
1035                 lu_buf_realloc(buf, rc);
1036                 if (buf->lb_buf == NULL)
1037                         GOTO(out, rc = -ENOMEM);
1038
1039                 goto again;
1040         }
1041
1042         lmv1 = buf->lb_buf;
1043         if (le32_to_cpu(lmv1->lmv_magic) != LMV_MAGIC_V1)
1044                 GOTO(out, rc = 0);
1045
1046         do {
1047                 oclb.oclb_items = 0;
1048                 rc = iterate_dir(filp, &oclb.ctx);
1049         } while (rc >= 0 && oclb.oclb_items > 0 && !oclb.oclb_found &&
1050                  filp->f_pos != LDISKFS_HTREE_EOF_64BIT);
1051 out:
1052         fput(filp);
1053         if (rc < 0)
1054                 CDEBUG(D_LFSCK,
1055                        "%s: cannot check LMV, ino = %lu/%u: rc = %d\n",
1056                        osd_ino2name(inode), inode->i_ino, inode->i_generation,
1057                        rc);
1058         else
1059                 rc = 0;
1060
1061         RETURN(rc);
1062 }
1063
1064 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
1065                           const struct lu_fid *fid,
1066                           const struct lu_object_conf *conf)
1067 {
1068         struct osd_thread_info *info;
1069         struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1070         struct osd_device *dev;
1071         struct osd_idmap_cache *oic;
1072         struct osd_inode_id *id;
1073         struct inode *inode = NULL;
1074         struct lustre_scrub *scrub;
1075         struct scrub_file *sf;
1076         __u32 flags = SS_CLEAR_DRYRUN | SS_CLEAR_FAILOUT | SS_AUTO_FULL;
1077         __u32 saved_ino;
1078         __u32 saved_gen;
1079         int result = 0;
1080         int rc1 = 0;
1081         bool remote = false;
1082         bool trusted = true;
1083         bool updated = false;
1084         bool checked = false;
1085
1086         ENTRY;
1087
1088         LINVRNT(osd_invariant(obj));
1089         LASSERT(obj->oo_inode == NULL);
1090
1091         if (fid_is_sane(fid) == 0) {
1092                 CERROR("%s: invalid FID "DFID"\n", ldev->ld_obd->obd_name,
1093                        PFID(fid));
1094                 dump_stack();
1095                 RETURN(-EINVAL);
1096         }
1097
1098         dev = osd_dev(ldev);
1099         scrub = &dev->od_scrub.os_scrub;
1100         sf = &scrub->os_file;
1101         info = osd_oti_get(env);
1102         LASSERT(info);
1103         oic = &info->oti_cache;
1104
1105         if (OBD_FAIL_CHECK(OBD_FAIL_SRV_ENOENT))
1106                 RETURN(-ENOENT);
1107
1108         /*
1109          * For the object is created as locking anchor, or for the object to
1110          * be created on disk. No need to osd_oi_lookup() at here because FID
1111          * shouldn't never be re-used, if it's really a duplicate FID from
1112          * unexpected reason, we should be able to detect it later by calling
1113          * do_create->osd_oi_insert().
1114          */
1115         if (conf && conf->loc_flags & LOC_F_NEW)
1116                 GOTO(out, result = 0);
1117
1118         /* Search order: 1. per-thread cache. */
1119         if (lu_fid_eq(fid, &oic->oic_fid) && likely(oic->oic_dev == dev)) {
1120                 id = &oic->oic_lid;
1121                 goto iget;
1122         }
1123
1124         id = &info->oti_id;
1125         memset(id, 0, sizeof(struct osd_inode_id));
1126         if (!list_empty(&scrub->os_inconsistent_items)) {
1127                 /* Search order: 2. OI scrub pending list. */
1128                 result = osd_oii_lookup(dev, fid, id);
1129                 if (!result)
1130                         goto iget;
1131         }
1132
1133         /*
1134          * The OI mapping in the OI file can be updated by the OI scrub
1135          * when we locate the inode via FID. So it may be not trustable.
1136          */
1137         trusted = false;
1138
1139         /* Search order: 3. OI files. */
1140         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1141         if (result == -ENOENT) {
1142                 if (!fid_is_norm(fid) ||
1143                     fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
1144                     !ldiskfs_test_bit(osd_oi_fid2idx(dev, fid),
1145                                       sf->sf_oi_bitmap))
1146                         GOTO(out, result = 0);
1147
1148                 goto trigger;
1149         }
1150
1151         /* -ESTALE is returned if inode of OST object doesn't exist */
1152         if (result == -ESTALE &&
1153             fid_is_on_ost(info, dev, fid, OI_CHECK_FLD)) {
1154                 GOTO(out, result = 0);
1155         }
1156
1157         if (result)
1158                 GOTO(out, result);
1159
1160 iget:
1161         obj->oo_inode = NULL;
1162         /* for later passes through checks, not true on first pass */
1163         if (!IS_ERR_OR_NULL(inode))
1164                 iput(inode);
1165
1166         inode = osd_iget_check(info, dev, fid, id, trusted);
1167         if (!IS_ERR(inode)) {
1168                 obj->oo_inode = inode;
1169                 result = 0;
1170                 if (remote)
1171                         goto trigger;
1172
1173                 goto check_lma;
1174         }
1175
1176         result = PTR_ERR(inode);
1177         if (result == -ENOENT || result == -ESTALE)
1178                 GOTO(out, result = 0);
1179
1180         if (result != -EREMCHG)
1181                 GOTO(out, result);
1182
1183 trigger:
1184         /*
1185          * We still have chance to get the valid inode: for the
1186          * object which is referenced by remote name entry, the
1187          * object on the local MDT will be linked under the dir
1188          * of "/REMOTE_PARENT_DIR" with its FID string as name.
1189          *
1190          * We do not know whether the object for the given FID
1191          * is referenced by some remote name entry or not, and
1192          * especially for DNE II, a multiple-linked object may
1193          * have many name entries reside on many MDTs.
1194          *
1195          * To simplify the operation, OSD will not distinguish
1196          * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
1197          * only happened for the RPC from other MDT during the
1198          * OI scrub, or for the client side RPC with FID only,
1199          * such as FID to path, or from old connected client.
1200          */
1201         if (!remote) {
1202                 rc1 = osd_lookup_in_remote_parent(info, dev, fid, id);
1203                 if (!rc1) {
1204                         remote = true;
1205                         trusted = true;
1206                         flags |= SS_AUTO_PARTIAL;
1207                         flags &= ~SS_AUTO_FULL;
1208                         goto iget;
1209                 }
1210         }
1211
1212         if (scrub->os_running) {
1213                 if (scrub->os_partial_scan && !scrub->os_in_join)
1214                         goto join;
1215
1216                 if (IS_ERR_OR_NULL(inode) || result) {
1217                         osd_oii_insert(dev, fid, id, result == -ENOENT);
1218                         GOTO(out, result = -EINPROGRESS);
1219                 }
1220
1221                 LASSERT(remote);
1222                 LASSERT(obj->oo_inode == inode);
1223
1224                 osd_oii_insert(dev, fid, id, true);
1225                 goto found;
1226         }
1227
1228         if (dev->od_scrub.os_scrub.os_auto_scrub_interval == AS_NEVER) {
1229                 if (!remote)
1230                         GOTO(out, result = -EREMCHG);
1231
1232                 LASSERT(!result);
1233                 LASSERT(obj->oo_inode == inode);
1234
1235                 osd_add_oi_cache(info, dev, id, fid);
1236                 goto found;
1237         }
1238
1239 join:
1240         rc1 = osd_scrub_start(env, dev, flags);
1241         CDEBUG_LIMIT(D_LFSCK | D_CONSOLE | D_WARNING,
1242                      "%s: trigger OI scrub by RPC for "DFID"/%u with flags %#x: rc = %d\n",
1243                      osd_name(dev), PFID(fid), id->oii_ino, flags, rc1);
1244         if (rc1 && rc1 != -EALREADY)
1245                 GOTO(out, result = -EREMCHG);
1246
1247         if (IS_ERR_OR_NULL(inode) || result) {
1248                 osd_oii_insert(dev, fid, id, result == -ENOENT);
1249                 GOTO(out, result = -EINPROGRESS);
1250         }
1251
1252         LASSERT(remote);
1253         LASSERT(obj->oo_inode == inode);
1254
1255         osd_oii_insert(dev, fid, id, true);
1256         goto found;
1257
1258 check_lma:
1259         checked = true;
1260         if (unlikely(obj->oo_header))
1261                 goto found;
1262
1263         result = osd_check_lma(env, obj);
1264         if (!result)
1265                 goto found;
1266
1267         LASSERTF(id->oii_ino == inode->i_ino &&
1268                  id->oii_gen == inode->i_generation,
1269                  "locate wrong inode for FID: "DFID", %u/%u => %ld/%u\n",
1270                  PFID(fid), id->oii_ino, id->oii_gen,
1271                  inode->i_ino, inode->i_generation);
1272
1273         saved_ino = inode->i_ino;
1274         saved_gen = inode->i_generation;
1275
1276         if (unlikely(result == -ENODATA)) {
1277                 /*
1278                  * If the OI scrub updated the OI mapping by race, it
1279                  * must be valid. Trust the inode that has no LMA EA.
1280                  */
1281                 if (updated)
1282                         goto found;
1283
1284                 result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1285                 if (!result) {
1286                         /*
1287                          * The OI mapping is still there, the inode is still
1288                          * valid. It is just becaues the inode has no LMA EA.
1289                          */
1290                         if (saved_ino == id->oii_ino &&
1291                             saved_gen == id->oii_gen)
1292                                 goto found;
1293
1294                         /*
1295                          * It is the OI scrub updated the OI mapping by race.
1296                          * The new OI mapping must be valid.
1297                          */
1298                         trusted = true;
1299                         updated = true;
1300                         goto iget;
1301                 }
1302
1303                 /*
1304                  * "result == -ENOENT" means that the OI mappinghas been
1305                  * removed by race, so the inode belongs to other object.
1306                  *
1307                  * Others error can be returned  directly.
1308                  */
1309                 if (result == -ENOENT) {
1310                         LASSERT(trusted);
1311
1312                         obj->oo_inode = NULL;
1313                         result = 0;
1314                 }
1315         }
1316
1317         if (result != -EREMCHG)
1318                 GOTO(out, result);
1319
1320         LASSERT(!updated);
1321
1322         /*
1323          * if two OST objects map to the same inode, and inode mode is
1324          * (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666), which means it's
1325          * reserved by precreate, and not written yet, in this case, don't
1326          * set inode for the object whose FID mismatch, so that it can create
1327          * inode and not block precreate.
1328          */
1329         if (fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) &&
1330             inode->i_mode == (S_IFREG | S_ISUID | S_ISGID | S_ISVTX | 0666)) {
1331                 obj->oo_inode = NULL;
1332                 GOTO(out, result = 0);
1333         }
1334
1335         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
1336         /*
1337          * "result == -ENOENT" means the cached OI mapping has been removed
1338          * from the OI file by race, above inode belongs to other object.
1339          */
1340         if (result == -ENOENT) {
1341                 LASSERT(trusted);
1342
1343                 obj->oo_inode = NULL;
1344                 GOTO(out, result = 0);
1345         }
1346
1347         if (result)
1348                 GOTO(out, result);
1349
1350         if (saved_ino == id->oii_ino && saved_gen == id->oii_gen) {
1351                 result = -EREMCHG;
1352                 osd_scrub_refresh_mapping(info, dev, fid, id, DTO_INDEX_DELETE,
1353                                           true, 0, NULL);
1354                 goto trigger;
1355         }
1356
1357         /*
1358          * It is the OI scrub updated the OI mapping by race.
1359          * The new OI mapping must be valid.
1360          */
1361         trusted = true;
1362         updated = true;
1363         goto iget;
1364
1365 found:
1366         if (!checked) {
1367                 struct lustre_ost_attrs *loa = &info->oti_ost_attrs;
1368                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
1369
1370                 result = osd_get_lma(info, inode, &info->oti_obj_dentry, loa);
1371                 if (!result) {
1372                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
1373                             dev->od_is_ost)
1374                                 obj->oo_pfid_in_lma = 1;
1375                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
1376                             !dev->od_is_ost)
1377                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
1378                 } else if (result != -ENODATA) {
1379                         GOTO(out, result);
1380                 }
1381         }
1382
1383         obj->oo_compat_dot_created = 1;
1384         obj->oo_compat_dotdot_created = 1;
1385
1386         if (S_ISDIR(inode->i_mode) &&
1387             (flags & SS_AUTO_PARTIAL || sf->sf_status == SS_SCANNING))
1388                 osd_check_lmv(info, dev, inode);
1389
1390         result = osd_attach_jinode(inode);
1391         if (result)
1392                 GOTO(out, result);
1393
1394         if (!ldiskfs_pdo)
1395                 GOTO(out, result = 0);
1396
1397         LASSERT(!obj->oo_hl_head);
1398         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1399
1400         GOTO(out, result = (!obj->oo_hl_head ? -ENOMEM : 0));
1401
1402 out:
1403         if (result || !obj->oo_inode) {
1404                 if (!IS_ERR_OR_NULL(inode))
1405                         iput(inode);
1406
1407                 obj->oo_inode = NULL;
1408                 if (trusted)
1409                         fid_zero(&oic->oic_fid);
1410         }
1411
1412         LINVRNT(osd_invariant(obj));
1413         return result;
1414 }
1415
1416 /*
1417  * Concurrency: shouldn't matter.
1418  */
1419 static void osd_object_init0(struct osd_object *obj)
1420 {
1421         LASSERT(obj->oo_inode != NULL);
1422         obj->oo_dt.do_body_ops = &osd_body_ops;
1423         obj->oo_dt.do_lu.lo_header->loh_attr |=
1424                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
1425 }
1426
1427 /*
1428  * Concurrency: no concurrent access is possible that early in object
1429  * life-cycle.
1430  */
1431 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
1432                            const struct lu_object_conf *conf)
1433 {
1434         struct osd_object *obj = osd_obj(l);
1435         int result;
1436
1437         LINVRNT(osd_invariant(obj));
1438
1439         if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_LLOG_UMOUNT_RACE) &&
1440             cfs_fail_val == 2) {
1441                 struct osd_thread_info *info = osd_oti_get(env);
1442                 struct osd_idmap_cache *oic = &info->oti_cache;
1443                 /* invalidate thread cache */
1444                 memset(&oic->oic_fid, 0, sizeof(oic->oic_fid));
1445         }
1446         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
1447                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
1448                 l->lo_header->loh_attr |= LOHA_EXISTS;
1449                 return 0;
1450         }
1451
1452         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
1453         obj->oo_dt.do_body_ops = &osd_body_ops_new;
1454         if (result == 0 && obj->oo_inode != NULL) {
1455                 struct osd_thread_info *oti = osd_oti_get(env);
1456                 struct lustre_ost_attrs *loa = &oti->oti_ost_attrs;
1457
1458                 osd_object_init0(obj);
1459                 if (unlikely(obj->oo_header))
1460                         return 0;
1461
1462                 result = osd_get_lma(oti, obj->oo_inode,
1463                                      &oti->oti_obj_dentry, loa);
1464                 if (!result) {
1465                         /*
1466                          * Convert LMAI flags to lustre LMA flags
1467                          * and cache it to oo_lma_flags
1468                          */
1469                         obj->oo_lma_flags =
1470                                 lma_to_lustre_flags(loa->loa_lma.lma_incompat);
1471                 } else if (result == -ENODATA) {
1472                         result = 0;
1473                 }
1474         }
1475         obj->oo_dirent_count = LU_DIRENT_COUNT_UNSET;
1476
1477         LINVRNT(osd_invariant(obj));
1478         return result;
1479 }
1480
1481 /*
1482  * The first part of oxe_buf is xattr name, and is '\0' terminated.
1483  * The left part is for value, binary mode.
1484  */
1485 struct osd_xattr_entry {
1486         struct list_head        oxe_list;
1487         size_t                  oxe_len;
1488         size_t                  oxe_namelen;
1489         bool                    oxe_exist;
1490         struct rcu_head         oxe_rcu;
1491         char                    oxe_buf[0];
1492 };
1493
1494 static int osd_oxc_get(struct osd_object *obj, const char *name,
1495                        struct lu_buf *buf)
1496 {
1497         struct osd_xattr_entry *tmp;
1498         struct osd_xattr_entry *oxe = NULL;
1499         size_t namelen = strlen(name);
1500         int rc;
1501
1502         rcu_read_lock();
1503         list_for_each_entry_rcu(tmp, &obj->oo_xattr_list, oxe_list) {
1504                 if (namelen == tmp->oxe_namelen &&
1505                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1506                         oxe = tmp;
1507                         break;
1508                 }
1509         }
1510
1511         if (oxe == NULL)
1512                 GOTO(out, rc = -ENOENT);
1513
1514         if (!oxe->oxe_exist)
1515                 GOTO(out, rc = -ENODATA);
1516
1517         /* vallen */
1518         rc = oxe->oxe_len - sizeof(*oxe) - oxe->oxe_namelen - 1;
1519         LASSERT(rc > 0);
1520
1521         if (buf->lb_buf == NULL)
1522                 GOTO(out, rc);
1523
1524         if (buf->lb_len < rc)
1525                 GOTO(out, rc = -ERANGE);
1526
1527         memcpy(buf->lb_buf, &oxe->oxe_buf[namelen + 1], rc);
1528 out:
1529         rcu_read_unlock();
1530
1531         return rc;
1532 }
1533
1534 static void osd_oxc_free(struct rcu_head *head)
1535 {
1536         struct osd_xattr_entry *oxe;
1537
1538         oxe = container_of(head, struct osd_xattr_entry, oxe_rcu);
1539         OBD_FREE(oxe, oxe->oxe_len);
1540 }
1541
1542 static void osd_oxc_add(struct osd_object *obj, const char *name,
1543                         const char *buf, int buflen)
1544 {
1545         struct osd_xattr_entry *oxe;
1546         struct osd_xattr_entry *old = NULL;
1547         struct osd_xattr_entry *tmp;
1548         size_t namelen = strlen(name);
1549         size_t len = sizeof(*oxe) + namelen + 1 + buflen;
1550
1551         OBD_ALLOC(oxe, len);
1552         if (oxe == NULL)
1553                 return;
1554
1555         INIT_LIST_HEAD(&oxe->oxe_list);
1556         oxe->oxe_len = len;
1557         oxe->oxe_namelen = namelen;
1558         memcpy(oxe->oxe_buf, name, namelen);
1559         if (buflen > 0) {
1560                 LASSERT(buf != NULL);
1561                 memcpy(oxe->oxe_buf + namelen + 1, buf, buflen);
1562                 oxe->oxe_exist = true;
1563         } else {
1564                 oxe->oxe_exist = false;
1565         }
1566
1567         /* this should be rarely called, just remove old and add new */
1568         spin_lock(&obj->oo_guard);
1569         list_for_each_entry(tmp, &obj->oo_xattr_list, oxe_list) {
1570                 if (namelen == tmp->oxe_namelen &&
1571                     strncmp(name, tmp->oxe_buf, namelen) == 0) {
1572                         old = tmp;
1573                         break;
1574                 }
1575         }
1576         if (old != NULL) {
1577                 list_replace_rcu(&old->oxe_list, &oxe->oxe_list);
1578                 call_rcu(&old->oxe_rcu, osd_oxc_free);
1579         } else {
1580                 list_add_tail_rcu(&oxe->oxe_list, &obj->oo_xattr_list);
1581         }
1582         spin_unlock(&obj->oo_guard);
1583 }
1584
1585 static void osd_oxc_del(struct osd_object *obj, const char *name)
1586 {
1587         struct osd_xattr_entry *oxe;
1588         size_t namelen = strlen(name);
1589
1590         spin_lock(&obj->oo_guard);
1591         list_for_each_entry(oxe, &obj->oo_xattr_list, oxe_list) {
1592                 if (namelen == oxe->oxe_namelen &&
1593                     strncmp(name, oxe->oxe_buf, namelen) == 0) {
1594                         list_del_rcu(&oxe->oxe_list);
1595                         call_rcu(&oxe->oxe_rcu, osd_oxc_free);
1596                         break;
1597                 }
1598         }
1599         spin_unlock(&obj->oo_guard);
1600 }
1601
1602 static void osd_oxc_fini(struct osd_object *obj)
1603 {
1604         struct osd_xattr_entry *oxe, *next;
1605
1606         list_for_each_entry_safe(oxe, next, &obj->oo_xattr_list, oxe_list) {
1607                 list_del(&oxe->oxe_list);
1608                 OBD_FREE(oxe, oxe->oxe_len);
1609         }
1610 }
1611
1612 /*
1613  * Concurrency: no concurrent access is possible that late in object
1614  * life-cycle.
1615  */
1616 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
1617 {
1618         struct osd_object *obj = osd_obj(l);
1619         struct lu_object_header *h = obj->oo_header;
1620
1621         LINVRNT(osd_invariant(obj));
1622
1623         osd_oxc_fini(obj);
1624         dt_object_fini(&obj->oo_dt);
1625         if (obj->oo_hl_head != NULL)
1626                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1627         /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
1628         OBD_FREE_PTR(obj);
1629         if (unlikely(h))
1630                 lu_object_header_free(h);
1631 }
1632
1633 /*
1634  * Concurrency: no concurrent access is possible that late in object
1635  * life-cycle.
1636  */
1637 static void osd_index_fini(struct osd_object *o)
1638 {
1639         struct iam_container *bag;
1640
1641         if (o->oo_dir != NULL) {
1642                 bag = &o->oo_dir->od_container;
1643                 if (o->oo_inode != NULL) {
1644                         if (bag->ic_object == o->oo_inode)
1645                                 iam_container_fini(bag);
1646                 }
1647                 OBD_FREE_PTR(o->oo_dir);
1648                 o->oo_dir = NULL;
1649         }
1650 }
1651
1652 enum {
1653         OSD_TXN_OI_DELETE_CREDITS    = 20,
1654         OSD_TXN_INODE_DELETE_CREDITS = 20
1655 };
1656
1657 /*
1658  * Journal
1659  */
1660
1661 #if OSD_THANDLE_STATS
1662 /**
1663  * Set time when the handle is allocated
1664  */
1665 static void osd_th_alloced(struct osd_thandle *oth)
1666 {
1667         oth->oth_alloced = ktime_get();
1668 }
1669
1670 /**
1671  * Set time when the handle started
1672  */
1673 static void osd_th_started(struct osd_thandle *oth)
1674 {
1675         oth->oth_started = ktime_get();
1676 }
1677
1678 /**
1679  * Check whether the we deal with this handle for too long.
1680  */
1681 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
1682                                 ktime_t alloced, ktime_t started,
1683                                 ktime_t closed)
1684 {
1685         ktime_t now = ktime_get();
1686
1687         LASSERT(dev != NULL);
1688
1689         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
1690                             ktime_us_delta(started, alloced));
1691         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
1692                             ktime_us_delta(closed, started));
1693         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
1694                             ktime_us_delta(now, closed));
1695
1696         if (ktime_before(ktime_add_ns(alloced, 30 * NSEC_PER_SEC), now)) {
1697                 CWARN("transaction handle %p was open for too long: now %lld, alloced %lld, started %lld, closed %lld\n",
1698                                 oth, now, alloced, started, closed);
1699                 libcfs_debug_dumpstack(NULL);
1700         }
1701 }
1702
1703 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
1704 {                                                                       \
1705         ktime_t __closed = ktime_get();                                 \
1706         ktime_t __alloced = oth->oth_alloced;                           \
1707         ktime_t __started = oth->oth_started;                           \
1708                                                                         \
1709         expr;                                                           \
1710         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
1711 }
1712
1713 #else /* OSD_THANDLE_STATS */
1714
1715 #define osd_th_alloced(h)                  do {} while(0)
1716 #define osd_th_started(h)                  do {} while(0)
1717 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
1718
1719 #endif /* OSD_THANDLE_STATS */
1720
1721 /*
1722  * Concurrency: doesn't access mutable data.
1723  */
1724 static int osd_param_is_not_sane(const struct osd_device *dev,
1725                                  const struct thandle *th)
1726 {
1727         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
1728
1729         return oh->ot_credits > osd_transaction_size(dev);
1730 }
1731
1732 /*
1733  * Concurrency: shouldn't matter.
1734  */
1735 static void osd_trans_commit_cb(struct super_block *sb,
1736                                 struct ldiskfs_journal_cb_entry *jcb, int error)
1737 {
1738         struct osd_thandle *oh = container_of(jcb, struct osd_thandle, ot_jcb);
1739         struct thandle *th = &oh->ot_super;
1740         struct lu_device *lud = &th->th_dev->dd_lu_dev;
1741         struct osd_device *osd = osd_dev(lud);
1742         struct dt_txn_commit_cb *dcb, *tmp;
1743
1744         LASSERT(oh->ot_handle == NULL);
1745
1746         if (error)
1747                 CERROR("transaction @0x%p commit error: %d\n", th, error);
1748
1749         /* call per-transaction callbacks if any */
1750         list_for_each_entry_safe(dcb, tmp, &oh->ot_commit_dcb_list,
1751                                  dcb_linkage) {
1752                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1753                          "commit callback entry: magic=%x name='%s'\n",
1754                          dcb->dcb_magic, dcb->dcb_name);
1755                 list_del_init(&dcb->dcb_linkage);
1756                 dcb->dcb_func(NULL, th, dcb, error);
1757         }
1758
1759         lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
1760         if (atomic_dec_and_test(&osd->od_commit_cb_in_flight))
1761                 wake_up(&osd->od_commit_cb_done);
1762         th->th_dev = NULL;
1763
1764         OBD_FREE_PTR(oh);
1765 }
1766
1767 static struct thandle *osd_trans_create(const struct lu_env *env,
1768                                         struct dt_device *d)
1769 {
1770         struct osd_thread_info *oti = osd_oti_get(env);
1771         struct osd_iobuf *iobuf = &oti->oti_iobuf;
1772         struct osd_thandle *oh;
1773         struct thandle *th;
1774
1775         ENTRY;
1776
1777         if (d->dd_rdonly) {
1778                 CERROR("%s: someone try to start transaction under "
1779                        "readonly mode, should be disabled.\n",
1780                        osd_name(osd_dt_dev(d)));
1781                 dump_stack();
1782                 RETURN(ERR_PTR(-EROFS));
1783         }
1784
1785         /* on pending IO in this thread should left from prev. request */
1786         LASSERT(atomic_read(&iobuf->dr_numreqs) == 0);
1787
1788         sb_start_write(osd_sb(osd_dt_dev(d)));
1789
1790         OBD_ALLOC_GFP(oh, sizeof(*oh), GFP_NOFS);
1791         if (!oh) {
1792                 sb_end_write(osd_sb(osd_dt_dev(d)));
1793                 RETURN(ERR_PTR(-ENOMEM));
1794         }
1795
1796         oh->ot_quota_trans = &oti->oti_quota_trans;
1797         memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
1798         th = &oh->ot_super;
1799         th->th_dev = d;
1800         th->th_result = 0;
1801         oh->ot_credits = 0;
1802         oh->oh_declared_ext = 0;
1803         INIT_LIST_HEAD(&oh->ot_commit_dcb_list);
1804         INIT_LIST_HEAD(&oh->ot_stop_dcb_list);
1805         INIT_LIST_HEAD(&oh->ot_trunc_locks);
1806         osd_th_alloced(oh);
1807
1808         memset(oti->oti_declare_ops, 0,
1809                sizeof(oti->oti_declare_ops));
1810         memset(oti->oti_declare_ops_cred, 0,
1811                sizeof(oti->oti_declare_ops_cred));
1812         memset(oti->oti_declare_ops_used, 0,
1813                sizeof(oti->oti_declare_ops_used));
1814
1815         oti->oti_ins_cache_depth++;
1816
1817         RETURN(th);
1818 }
1819
1820 void osd_trans_dump_creds(const struct lu_env *env, struct thandle *th)
1821 {
1822         struct osd_thread_info *oti = osd_oti_get(env);
1823         struct osd_thandle *oh;
1824
1825         oh = container_of(th, struct osd_thandle, ot_super);
1826         LASSERT(oh != NULL);
1827
1828         CWARN("  create: %u/%u/%u, destroy: %u/%u/%u\n",
1829               oti->oti_declare_ops[OSD_OT_CREATE],
1830               oti->oti_declare_ops_cred[OSD_OT_CREATE],
1831               oti->oti_declare_ops_used[OSD_OT_CREATE],
1832               oti->oti_declare_ops[OSD_OT_DESTROY],
1833               oti->oti_declare_ops_cred[OSD_OT_DESTROY],
1834               oti->oti_declare_ops_used[OSD_OT_DESTROY]);
1835         CWARN("  attr_set: %u/%u/%u, xattr_set: %u/%u/%u\n",
1836               oti->oti_declare_ops[OSD_OT_ATTR_SET],
1837               oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
1838               oti->oti_declare_ops_used[OSD_OT_ATTR_SET],
1839               oti->oti_declare_ops[OSD_OT_XATTR_SET],
1840               oti->oti_declare_ops_cred[OSD_OT_XATTR_SET],
1841               oti->oti_declare_ops_used[OSD_OT_XATTR_SET]);
1842         CWARN("  write: %u/%u/%u, punch: %u/%u/%u, quota %u/%u/%u\n",
1843               oti->oti_declare_ops[OSD_OT_WRITE],
1844               oti->oti_declare_ops_cred[OSD_OT_WRITE],
1845               oti->oti_declare_ops_used[OSD_OT_WRITE],
1846               oti->oti_declare_ops[OSD_OT_PUNCH],
1847               oti->oti_declare_ops_cred[OSD_OT_PUNCH],
1848               oti->oti_declare_ops_used[OSD_OT_PUNCH],
1849               oti->oti_declare_ops[OSD_OT_QUOTA],
1850               oti->oti_declare_ops_cred[OSD_OT_QUOTA],
1851               oti->oti_declare_ops_used[OSD_OT_QUOTA]);
1852         CWARN("  insert: %u/%u/%u, delete: %u/%u/%u\n",
1853               oti->oti_declare_ops[OSD_OT_INSERT],
1854               oti->oti_declare_ops_cred[OSD_OT_INSERT],
1855               oti->oti_declare_ops_used[OSD_OT_INSERT],
1856               oti->oti_declare_ops[OSD_OT_DELETE],
1857               oti->oti_declare_ops_cred[OSD_OT_DELETE],
1858               oti->oti_declare_ops_used[OSD_OT_DELETE]);
1859         CWARN("  ref_add: %u/%u/%u, ref_del: %u/%u/%u\n",
1860               oti->oti_declare_ops[OSD_OT_REF_ADD],
1861               oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
1862               oti->oti_declare_ops_used[OSD_OT_REF_ADD],
1863               oti->oti_declare_ops[OSD_OT_REF_DEL],
1864               oti->oti_declare_ops_cred[OSD_OT_REF_DEL],
1865               oti->oti_declare_ops_used[OSD_OT_REF_DEL]);
1866 }
1867
1868 /*
1869  * Concurrency: shouldn't matter.
1870  */
1871 static int osd_trans_start(const struct lu_env *env, struct dt_device *d,
1872                            struct thandle *th)
1873 {
1874         struct osd_thread_info *oti = osd_oti_get(env);
1875         struct osd_device *dev = osd_dt_dev(d);
1876         handle_t *jh;
1877         struct osd_thandle *oh;
1878         int rc;
1879
1880         ENTRY;
1881
1882         LASSERT(current->journal_info == NULL);
1883
1884         oh = container_of(th, struct osd_thandle, ot_super);
1885         LASSERT(oh != NULL);
1886         LASSERT(oh->ot_handle == NULL);
1887
1888         rc = dt_txn_hook_start(env, d, th);
1889         if (rc != 0)
1890                 GOTO(out, rc);
1891
1892         if (unlikely(osd_param_is_not_sane(dev, th))) {
1893                 static unsigned long last_printed;
1894                 static int last_credits;
1895
1896                 /*
1897                  * don't make noise on a tiny testing systems
1898                  * actual credits misuse will be caught anyway
1899                  */
1900                 if (last_credits != oh->ot_credits &&
1901                     time_after(jiffies, last_printed +
1902                                cfs_time_seconds(60)) &&
1903                     osd_transaction_size(dev) > 512) {
1904                         CWARN("%s: credits %u > trans_max %u\n", osd_name(dev),
1905                               oh->ot_credits, osd_transaction_size(dev));
1906                         osd_trans_dump_creds(env, th);
1907                         libcfs_debug_dumpstack(NULL);
1908                         last_credits = oh->ot_credits;
1909                         last_printed = jiffies;
1910                 }
1911                 /*
1912                  * XXX Limit the credits to 'max_transaction_buffers', and
1913                  *     let the underlying filesystem to catch the error if
1914                  *     we really need so many credits.
1915                  *
1916                  *     This should be removed when we can calculate the
1917                  *     credits precisely.
1918                  */
1919                 oh->ot_credits = osd_transaction_size(dev);
1920         } else if (ldiskfs_track_declares_assert != 0) {
1921                 /*
1922                  * reserve few credits to prevent an assertion in JBD
1923                  * our debugging mechanism will be able to detected
1924                  * overuse. this can help to debug single-update
1925                  * transactions
1926                  */
1927                 oh->ot_credits += 10;
1928                 if (unlikely(osd_param_is_not_sane(dev, th)))
1929                         oh->ot_credits = osd_transaction_size(dev);
1930         }
1931
1932         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_TXN_START))
1933                 GOTO(out, rc = -EIO);
1934
1935         /*
1936          * XXX temporary stuff. Some abstraction layer should
1937          * be used.
1938          */
1939         jh = osd_journal_start_sb(osd_sb(dev), LDISKFS_HT_MISC, oh->ot_credits);
1940         osd_th_started(oh);
1941         if (!IS_ERR(jh)) {
1942                 oh->ot_handle = jh;
1943                 LASSERT(oti->oti_txns == 0);
1944
1945                 atomic_inc(&dev->od_commit_cb_in_flight);
1946                 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
1947                               "osd-tx", th);
1948                 oti->oti_txns++;
1949                 rc = 0;
1950         } else {
1951                 rc = PTR_ERR(jh);
1952         }
1953 out:
1954         RETURN(rc);
1955 }
1956
1957 static int osd_seq_exists(const struct lu_env *env,
1958                           struct osd_device *osd, u64 seq)
1959 {
1960         struct lu_seq_range *range = &osd_oti_get(env)->oti_seq_range;
1961         struct seq_server_site *ss = osd_seq_site(osd);
1962         int rc;
1963
1964         ENTRY;
1965
1966         LASSERT(ss != NULL);
1967         LASSERT(ss->ss_server_fld != NULL);
1968
1969         rc = osd_fld_lookup(env, osd, seq, range);
1970         if (rc != 0) {
1971                 if (rc != -ENOENT)
1972                         CERROR("%s: can't lookup FLD sequence %#llx: rc = %d\n",
1973                                osd_name(osd), seq, rc);
1974                 RETURN(0);
1975         }
1976
1977         RETURN(ss->ss_node_id == range->lsr_index);
1978 }
1979
1980 static void osd_trans_stop_cb(struct osd_thandle *oth, int result)
1981 {
1982         struct dt_txn_commit_cb *dcb;
1983         struct dt_txn_commit_cb *tmp;
1984
1985         /* call per-transaction stop callbacks if any */
1986         list_for_each_entry_safe(dcb, tmp, &oth->ot_stop_dcb_list,
1987                                  dcb_linkage) {
1988                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
1989                          "commit callback entry: magic=%x name='%s'\n",
1990                          dcb->dcb_magic, dcb->dcb_name);
1991                 list_del_init(&dcb->dcb_linkage);
1992                 dcb->dcb_func(NULL, &oth->ot_super, dcb, result);
1993         }
1994 }
1995
1996 /*
1997  * Concurrency: shouldn't matter.
1998  */
1999 static int osd_trans_stop(const struct lu_env *env, struct dt_device *dt,
2000                           struct thandle *th)
2001 {
2002         struct osd_thread_info *oti = osd_oti_get(env);
2003         struct osd_thandle *oh;
2004         struct osd_iobuf *iobuf = &oti->oti_iobuf;
2005         struct osd_device *osd = osd_dt_dev(th->th_dev);
2006         struct qsd_instance *qsd = osd_def_qsd(osd);
2007         struct lquota_trans *qtrans;
2008         LIST_HEAD(truncates);
2009         int rc = 0, remove_agents = 0;
2010
2011         ENTRY;
2012
2013         oh = container_of(th, struct osd_thandle, ot_super);
2014
2015         remove_agents = oh->ot_remove_agents;
2016
2017         qtrans = oh->ot_quota_trans;
2018         oh->ot_quota_trans = NULL;
2019
2020         /* move locks to local list, stop tx, execute truncates */
2021         list_splice(&oh->ot_trunc_locks, &truncates);
2022
2023         if (oh->ot_handle != NULL) {
2024                 int rc2;
2025
2026                 handle_t *hdl = oh->ot_handle;
2027
2028                 /*
2029                  * add commit callback
2030                  * notice we don't do this in osd_trans_start()
2031                  * as underlying transaction can change during truncate
2032                  */
2033                 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
2034                                              &oh->ot_jcb);
2035
2036                 LASSERT(oti->oti_txns == 1);
2037                 oti->oti_txns--;
2038
2039                 rc = dt_txn_hook_stop(env, th);
2040                 if (rc != 0)
2041                         CERROR("%s: failed in transaction hook: rc = %d\n",
2042                                osd_name(osd), rc);
2043
2044                 osd_trans_stop_cb(oh, rc);
2045                 /* hook functions might modify th_sync */
2046                 hdl->h_sync = th->th_sync;
2047
2048                 oh->ot_handle = NULL;
2049                 OSD_CHECK_SLOW_TH(oh, osd, rc2 = ldiskfs_journal_stop(hdl));
2050                 if (rc2 != 0)
2051                         CERROR("%s: failed to stop transaction: rc = %d\n",
2052                                osd_name(osd), rc2);
2053                 if (!rc)
2054                         rc = rc2;
2055
2056                 /* We preserve the origin behavior of ignoring any
2057                  * failures with the underlying punch / truncate
2058                  * operation. We do record for debugging if an error
2059                  * does occur in the lctl dk logs.
2060                  */
2061                 rc2 = osd_process_truncates(env, &truncates);
2062                 if (rc2 != 0)
2063                         CERROR("%s: failed truncate process: rc = %d\n",
2064                                osd_name(osd), rc2);
2065         } else {
2066                 osd_trans_stop_cb(oh, th->th_result);
2067                 OBD_FREE_PTR(oh);
2068         }
2069
2070         osd_trunc_unlock_all(env, &truncates);
2071
2072         /* inform the quota slave device that the transaction is stopping */
2073         qsd_op_end(env, qsd, qtrans);
2074
2075         /*
2076          * as we want IO to journal and data IO be concurrent, we don't block
2077          * awaiting data IO completion in osd_do_bio(), instead we wait here
2078          * once transaction is submitted to the journal. all reqular requests
2079          * don't do direct IO (except read/write), thus this wait_event becomes
2080          * no-op for them.
2081          *
2082          * IMPORTANT: we have to wait till any IO submited by the thread is
2083          * completed otherwise iobuf may be corrupted by different request
2084          */
2085         wait_event(iobuf->dr_wait,
2086                        atomic_read(&iobuf->dr_numreqs) == 0);
2087         osd_fini_iobuf(osd, iobuf);
2088         if (!rc)
2089                 rc = iobuf->dr_error;
2090
2091         if (unlikely(remove_agents != 0))
2092                 osd_process_scheduled_agent_removals(env, osd);
2093
2094         LASSERT(oti->oti_ins_cache_depth > 0);
2095         oti->oti_ins_cache_depth--;
2096         /* reset OI cache for safety */
2097         if (oti->oti_ins_cache_depth == 0)
2098                 oti->oti_ins_cache_used = 0;
2099
2100         sb_end_write(osd_sb(osd));
2101
2102         RETURN(rc);
2103 }
2104
2105 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
2106 {
2107         struct osd_thandle *oh = container_of(th, struct osd_thandle,
2108                                               ot_super);
2109
2110         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
2111         LASSERT(&dcb->dcb_func != NULL);
2112         if (dcb->dcb_flags & DCB_TRANS_STOP)
2113                 list_add(&dcb->dcb_linkage, &oh->ot_stop_dcb_list);
2114         else
2115                 list_add(&dcb->dcb_linkage, &oh->ot_commit_dcb_list);
2116
2117         return 0;
2118 }
2119
2120 /*
2121  * Called just before object is freed. Releases all resources except for
2122  * object itself (that is released by osd_object_free()).
2123  *
2124  * Concurrency: no concurrent access is possible that late in object
2125  * life-cycle.
2126  */
2127 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
2128 {
2129         struct osd_object *obj = osd_obj(l);
2130         struct qsd_instance *qsd = osd_def_qsd(osd_obj2dev(obj));
2131         struct inode *inode = obj->oo_inode;
2132         __u64 projid;
2133         qid_t uid;
2134         qid_t gid;
2135
2136         LINVRNT(osd_invariant(obj));
2137
2138         /*
2139          * If object is unlinked remove fid->ino mapping from object index.
2140          */
2141
2142         osd_index_fini(obj);
2143
2144         if (!inode)
2145                 return;
2146
2147         if (osd_has_index(obj) &&  obj->oo_dt.do_index_ops == &osd_index_iam_ops)
2148                 ldiskfs_set_inode_flag(inode, LDISKFS_INODE_JOURNAL_DATA);
2149
2150         uid = i_uid_read(inode);
2151         gid = i_gid_read(inode);
2152         projid = i_projid_read(inode);
2153
2154         obj->oo_inode = NULL;
2155         iput(inode);
2156
2157         /* do not rebalance quota if the caller needs to release memory
2158          * otherwise qsd_refresh_usage() may went into a new ldiskfs
2159          * transaction and risk to deadlock - LU-12178 */
2160         if (current->flags & (PF_MEMALLOC | PF_KSWAPD))
2161                 return;
2162
2163         if (!obj->oo_header && qsd) {
2164                 struct osd_thread_info *info = osd_oti_get(env);
2165                 struct lquota_id_info *qi = &info->oti_qi;
2166
2167                 /* Release granted quota to master if necessary */
2168                 qi->lqi_id.qid_uid = uid;
2169                 qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
2170
2171                 qi->lqi_id.qid_uid = gid;
2172                 qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
2173
2174                 qi->lqi_id.qid_uid = projid;
2175                 qsd_op_adjust(env, qsd, &qi->lqi_id, PRJQUOTA);
2176         }
2177 }
2178
2179 /*
2180  * Concurrency: ->loo_object_release() is called under site spin-lock.
2181  */
2182 static void osd_object_release(const struct lu_env *env,
2183                                struct lu_object *l)
2184 {
2185         struct osd_object *o = osd_obj(l);
2186
2187         /*
2188          * nobody should be releasing a non-destroyed object with nlink=0
2189          * the API allows this, but ldiskfs doesn't like and then report
2190          * this inode as deleted
2191          */
2192         LASSERT(!(o->oo_destroyed == 0 && o->oo_inode &&
2193                   o->oo_inode->i_nlink == 0));
2194 }
2195
2196 /*
2197  * Concurrency: shouldn't matter.
2198  */
2199 static int osd_object_print(const struct lu_env *env, void *cookie,
2200                             lu_printer_t p, const struct lu_object *l)
2201 {
2202         struct osd_object *o = osd_obj(l);
2203         struct iam_descr *d;
2204
2205         if (o->oo_dir != NULL)
2206                 d = o->oo_dir->od_container.ic_descr;
2207         else
2208                 d = NULL;
2209         return (*p)(env, cookie,
2210                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
2211                     o, o->oo_inode,
2212                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
2213                     o->oo_inode ? o->oo_inode->i_generation : 0,
2214                     d ? d->id_ops->id_name : "plain");
2215 }
2216
2217 /*
2218  * Concurrency: shouldn't matter.
2219  */
2220 int osd_statfs(const struct lu_env *env, struct dt_device *d,
2221                 struct obd_statfs *sfs, struct obd_statfs_info *info)
2222 {
2223         struct osd_device *osd = osd_dt_dev(d);
2224         struct super_block *sb = osd_sb(osd);
2225         struct kstatfs *ksfs;
2226         __u64 reserved;
2227         int result = 0;
2228
2229         if (unlikely(osd->od_mnt == NULL))
2230                 return -EINPROGRESS;
2231
2232         /* osd_lproc.c call this without env, allocate ksfs for that case */
2233         if (unlikely(env == NULL)) {
2234                 OBD_ALLOC_PTR(ksfs);
2235                 if (ksfs == NULL)
2236                         return -ENOMEM;
2237         } else {
2238                 ksfs = &osd_oti_get(env)->oti_ksfs;
2239         }
2240
2241         result = sb->s_op->statfs(sb->s_root, ksfs);
2242         if (result)
2243                 goto out;
2244
2245         statfs_pack(sfs, ksfs);
2246         if (unlikely(sb->s_flags & SB_RDONLY))
2247                 sfs->os_state |= OS_STATFS_READONLY;
2248
2249         sfs->os_state |= osd->od_nonrotational ? OS_STATFS_NONROT : 0;
2250
2251         if (ldiskfs_has_feature_extents(sb))
2252                 sfs->os_maxbytes = sb->s_maxbytes;
2253         else
2254                 sfs->os_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2255
2256         /*
2257          * Reserve some space so to avoid fragmenting the filesystem too much.
2258          * Fragmentation not only impacts performance, but can also increase
2259          * metadata overhead significantly, causing grant calculation to be
2260          * wrong.
2261          *
2262          * Reserve 0.78% of total space, at least 8MB for small filesystems.
2263          */
2264         BUILD_BUG_ON(OSD_STATFS_RESERVED <= LDISKFS_MAX_BLOCK_SIZE);
2265         reserved = OSD_STATFS_RESERVED >> sb->s_blocksize_bits;
2266         if (likely(sfs->os_blocks >= reserved << OSD_STATFS_RESERVED_SHIFT))
2267                 reserved = sfs->os_blocks >> OSD_STATFS_RESERVED_SHIFT;
2268
2269         sfs->os_blocks -= reserved;
2270         sfs->os_bfree  -= min(reserved, sfs->os_bfree);
2271         sfs->os_bavail -= min(reserved, sfs->os_bavail);
2272
2273 out:
2274         if (unlikely(env == NULL))
2275                 OBD_FREE_PTR(ksfs);
2276         return result;
2277 }
2278
2279 /**
2280  * Estimate space needed for file creations. We assume the largest filename
2281  * which is 2^64 - 1, hence a filename of 20 chars.
2282  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
2283  */
2284 #ifdef __LDISKFS_DIR_REC_LEN
2285 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
2286 #else
2287 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
2288 #endif
2289
2290 /*
2291  * Concurrency: doesn't access mutable data.
2292  */
2293 static void osd_conf_get(const struct lu_env *env,
2294                          const struct dt_device *dev,
2295                          struct dt_device_param *param)
2296 {
2297         struct osd_device *d = osd_dt_dev(dev);
2298         struct super_block *sb = osd_sb(d);
2299         struct blk_integrity *bi = bdev_get_integrity(sb->s_bdev);
2300         const char *name;
2301         int ea_overhead;
2302
2303         /*
2304          * XXX should be taken from not-yet-existing fs abstraction layer.
2305          */
2306         param->ddp_max_name_len = LDISKFS_NAME_LEN;
2307         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
2308         param->ddp_symlink_max  = sb->s_blocksize;
2309         param->ddp_mount_type   = LDD_MT_LDISKFS;
2310         if (ldiskfs_has_feature_extents(sb))
2311                 param->ddp_maxbytes = sb->s_maxbytes;
2312         else
2313                 param->ddp_maxbytes = LDISKFS_SB(sb)->s_bitmap_maxbytes;
2314         /*
2315          * inode are statically allocated, so per-inode space consumption
2316          * is the space consumed by the directory entry
2317          */
2318         param->ddp_inodespace     = PER_OBJ_USAGE;
2319         /*
2320          * EXT_INIT_MAX_LEN is the theoretical maximum extent size (32k blocks
2321          * is 128MB) which is unlikely to be hit in real life. Report a smaller
2322          * maximum length to not under-count the actual number of extents
2323          * needed for writing a file if there are sub-optimal block allocations.
2324          */
2325         param->ddp_max_extent_blks = EXT_INIT_MAX_LEN >> 1;
2326         /* worst-case extent insertion metadata overhead */
2327         param->ddp_extent_tax = 6 * LDISKFS_BLOCK_SIZE(sb);
2328         param->ddp_mntopts = 0;
2329         if (test_opt(sb, XATTR_USER))
2330                 param->ddp_mntopts |= MNTOPT_USERXATTR;
2331         if (test_opt(sb, POSIX_ACL))
2332                 param->ddp_mntopts |= MNTOPT_ACL;
2333
2334         /*
2335          * LOD might calculate the max stripe count based on max_ea_size,
2336          * so we need take account in the overhead as well,
2337          * xattr_header + magic + xattr_entry_head
2338          */
2339         ea_overhead = sizeof(struct ldiskfs_xattr_header) + sizeof(__u32) +
2340                       LDISKFS_XATTR_LEN(XATTR_NAME_MAX_LEN);
2341
2342 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
2343         if (ldiskfs_has_feature_ea_inode(sb))
2344                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE -
2345                                                                 ea_overhead;
2346         else
2347 #endif
2348                 param->ddp_max_ea_size = sb->s_blocksize - ea_overhead;
2349
2350         if (param->ddp_max_ea_size > OBD_MAX_EA_SIZE)
2351                 param->ddp_max_ea_size = OBD_MAX_EA_SIZE;
2352
2353         /*
2354          * Preferred RPC size for efficient disk IO.  4MB shows good
2355          * all-around performance for ldiskfs, but use bigalloc chunk size
2356          * by default if larger.
2357          */
2358 #if defined(LDISKFS_CLUSTER_SIZE)
2359         if (LDISKFS_CLUSTER_SIZE(sb) > DT_DEF_BRW_SIZE)
2360                 param->ddp_brw_size = LDISKFS_CLUSTER_SIZE(sb);
2361         else
2362 #endif
2363                 param->ddp_brw_size = DT_DEF_BRW_SIZE;
2364
2365         param->ddp_t10_cksum_type = 0;
2366         if (bi) {
2367                 unsigned short interval = blk_integrity_interval(bi);
2368                 name = blk_integrity_name(bi);
2369                 /*
2370                  * Expected values:
2371                  * T10-DIF-TYPE1-CRC
2372                  * T10-DIF-TYPE3-CRC
2373                  * T10-DIF-TYPE1-IP
2374                  * T10-DIF-TYPE3-IP
2375                  */
2376                 if (strncmp(name, "T10-DIF-TYPE",
2377                             sizeof("T10-DIF-TYPE") - 1) == 0) {
2378                         /* also skip "1/3-" at end */
2379                         const int type_off = sizeof("T10-DIF-TYPE.");
2380                         char type_number = name[type_off - 2];
2381
2382                         if (interval != 512 && interval != 4096) {
2383                                 CERROR("%s: unsupported T10PI sector size %u\n",
2384                                        d->od_svname, interval);
2385                         } else if (type_number != '1' && type_number != '3') {
2386                                 CERROR("%s: unsupported T10PI type %s\n",
2387                                        d->od_svname, name);
2388                         } else if (strcmp(name + type_off, "CRC") == 0) {
2389                                 d->od_t10_type = type_number == '1' ?
2390                                         OSD_T10_TYPE1_CRC : OSD_T10_TYPE3_CRC;
2391                                 param->ddp_t10_cksum_type = interval == 512 ?
2392                                         OBD_CKSUM_T10CRC512 :
2393                                         OBD_CKSUM_T10CRC4K;
2394                         } else if (strcmp(name + type_off, "IP") == 0) {
2395                                 d->od_t10_type = type_number == '1' ?
2396                                         OSD_T10_TYPE1_IP : OSD_T10_TYPE3_IP;
2397                                 param->ddp_t10_cksum_type = interval == 512 ?
2398                                         OBD_CKSUM_T10IP512 :
2399                                         OBD_CKSUM_T10IP4K;
2400                         } else {
2401                                 CERROR("%s: unsupported checksum type of T10PI type '%s'\n",
2402                                        d->od_svname, name);
2403                         }
2404
2405                 } else {
2406                         CERROR("%s: unsupported T10PI type '%s'\n",
2407                                d->od_svname, name);
2408                 }
2409         }
2410
2411         param->ddp_has_lseek_data_hole = true;
2412 }
2413
2414 static struct super_block *osd_mnt_sb_get(const struct dt_device *d)
2415 {
2416         return osd_sb(osd_dt_dev(d));
2417 }
2418
2419 /*
2420  * Concurrency: shouldn't matter.
2421  */
2422 static int osd_sync(const struct lu_env *env, struct dt_device *d)
2423 {
2424         int rc;
2425         struct super_block *s = osd_sb(osd_dt_dev(d));
2426         ENTRY;
2427
2428         down_read(&s->s_umount);
2429         rc = s->s_op->sync_fs(s, 1);
2430         up_read(&s->s_umount);
2431
2432         CDEBUG(D_CACHE, "%s: synced OSD: rc = %d\n", osd_dt_dev(d)->od_svname,
2433                rc);
2434
2435         return rc;
2436 }
2437
2438 /**
2439  * Start commit for OSD device.
2440  *
2441  * An implementation of dt_commit_async method for OSD device.
2442  * Asychronously starts underlayng fs sync and thereby a transaction
2443  * commit.
2444  *
2445  * \param env environment
2446  * \param d dt device
2447  *
2448  * \see dt_device_operations
2449  */
2450 static int osd_commit_async(const struct lu_env *env,
2451                             struct dt_device *d)
2452 {
2453         struct super_block *s = osd_sb(osd_dt_dev(d));
2454         int rc;
2455
2456         ENTRY;
2457
2458         CDEBUG(D_HA, "%s: async commit OSD\n", osd_dt_dev(d)->od_svname);
2459         down_read(&s->s_umount);
2460         rc = s->s_op->sync_fs(s, 0);
2461         up_read(&s->s_umount);
2462
2463         RETURN(rc);
2464 }
2465
2466 /*
2467  * Concurrency: shouldn't matter.
2468  */
2469 static int osd_ro(const struct lu_env *env, struct dt_device *d)
2470 {
2471         struct super_block *sb = osd_sb(osd_dt_dev(d));
2472         struct block_device *dev = sb->s_bdev;
2473         int rc = -EOPNOTSUPP;
2474
2475         ENTRY;
2476
2477         CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
2478                osd_dt_dev(d)->od_svname, (long)dev, rc);
2479
2480         RETURN(rc);
2481 }
2482
2483 /**
2484  * Note: we do not count into QUOTA here.
2485  * If we mount with --data_journal we may need more.
2486  */
2487 const int osd_dto_credits_noquota[DTO_NR] = {
2488         /**
2489          * Insert.
2490          * INDEX_EXTRA_TRANS_BLOCKS(8) +
2491          * SINGLEDATA_TRANS_BLOCKS(8)
2492          * XXX Note: maybe iam need more, since iam have more level than
2493          *           EXT3 htree.
2494          */
2495         [DTO_INDEX_INSERT]  = 16,
2496         /**
2497          * Delete
2498          * just modify a single entry, probably merge few within a block
2499          */
2500         [DTO_INDEX_DELETE]  = 1,
2501         /**
2502          * Used for OI scrub
2503          */
2504         [DTO_INDEX_UPDATE]  = 16,
2505         /**
2506          * 4(inode, inode bits, groups, GDT)
2507          *   notice: OI updates are counted separately with DTO_INDEX_INSERT
2508          */
2509         [DTO_OBJECT_CREATE] = 4,
2510         /**
2511          * 4(inode, inode bits, groups, GDT)
2512          *   notice: OI updates are counted separately with DTO_INDEX_DELETE
2513          */
2514         [DTO_OBJECT_DELETE] = 4,
2515         /**
2516          * Attr set credits (inode)
2517          */
2518         [DTO_ATTR_SET_BASE] = 1,
2519         /**
2520          * Xattr set. The same as xattr of EXT3.
2521          * DATA_TRANS_BLOCKS(14)
2522          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
2523          * are also counted in. Do not know why?
2524          */
2525         [DTO_XATTR_SET]     = 14,
2526         /**
2527          * credits for inode change during write.
2528          */
2529         [DTO_WRITE_BASE]    = 3,
2530         /**
2531          * credits for single block write.
2532          */
2533         [DTO_WRITE_BLOCK]   = 14,
2534         /**
2535          * Attr set credits for chown.
2536          * This is extra credits for setattr, and it is null without quota
2537          */
2538         [DTO_ATTR_SET_CHOWN] = 0
2539 };
2540
2541 /* reserve or free quota for some operation */
2542 static int osd_reserve_or_free_quota(const struct lu_env *env,
2543                                      struct dt_device *dev,
2544                                      struct lquota_id_info *qi)
2545 {
2546         struct osd_device       *osd = osd_dt_dev(dev);
2547         struct qsd_instance     *qsd = NULL;
2548         int rc;
2549
2550         ENTRY;
2551
2552         if (qi->lqi_is_blk)
2553                 qsd = osd->od_quota_slave_dt;
2554         else
2555                 qsd = osd->od_quota_slave_md;
2556
2557         rc = qsd_reserve_or_free_quota(env, qsd, qi);
2558         RETURN(rc);
2559 }
2560
2561 static const struct dt_device_operations osd_dt_ops = {
2562         .dt_root_get              = osd_root_get,
2563         .dt_statfs                = osd_statfs,
2564         .dt_trans_create          = osd_trans_create,
2565         .dt_trans_start           = osd_trans_start,
2566         .dt_trans_stop            = osd_trans_stop,
2567         .dt_trans_cb_add          = osd_trans_cb_add,
2568         .dt_conf_get              = osd_conf_get,
2569         .dt_mnt_sb_get            = osd_mnt_sb_get,
2570         .dt_sync                  = osd_sync,
2571         .dt_ro                    = osd_ro,
2572         .dt_commit_async          = osd_commit_async,
2573         .dt_reserve_or_free_quota = osd_reserve_or_free_quota,
2574 };
2575
2576 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
2577                           unsigned int role)
2578 {
2579         struct osd_object *obj = osd_dt_obj(dt);
2580         struct osd_thread_info *oti = osd_oti_get(env);
2581
2582         LINVRNT(osd_invariant(obj));
2583
2584         LASSERT(obj->oo_owner != env);
2585         down_read_nested(&obj->oo_sem, role);
2586
2587         LASSERT(obj->oo_owner == NULL);
2588         oti->oti_r_locks++;
2589 }
2590
2591 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
2592                            unsigned int role)
2593 {
2594         struct osd_object *obj = osd_dt_obj(dt);
2595         struct osd_thread_info *oti = osd_oti_get(env);
2596
2597         LINVRNT(osd_invariant(obj));
2598
2599         LASSERT(obj->oo_owner != env);
2600         down_write_nested(&obj->oo_sem, role);
2601
2602         LASSERT(obj->oo_owner == NULL);
2603         obj->oo_owner = env;
2604         oti->oti_w_locks++;
2605 }
2606
2607 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
2608 {
2609         struct osd_object *obj = osd_dt_obj(dt);
2610         struct osd_thread_info *oti = osd_oti_get(env);
2611
2612         LINVRNT(osd_invariant(obj));
2613
2614         LASSERT(oti->oti_r_locks > 0);
2615         oti->oti_r_locks--;
2616         up_read(&obj->oo_sem);
2617 }
2618
2619 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
2620 {
2621         struct osd_object *obj = osd_dt_obj(dt);
2622         struct osd_thread_info *oti = osd_oti_get(env);
2623
2624         LINVRNT(osd_invariant(obj));
2625
2626         LASSERT(obj->oo_owner == env);
2627         LASSERT(oti->oti_w_locks > 0);
2628         oti->oti_w_locks--;
2629         obj->oo_owner = NULL;
2630         up_write(&obj->oo_sem);
2631 }
2632
2633 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
2634 {
2635         struct osd_object *obj = osd_dt_obj(dt);
2636
2637         LINVRNT(osd_invariant(obj));
2638
2639         return obj->oo_owner == env;
2640 }
2641
2642 static void osd_inode_getattr(const struct lu_env *env,
2643                               struct inode *inode, struct lu_attr *attr)
2644 {
2645         attr->la_valid  |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2646                            LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2647                            LA_PROJID | LA_FLAGS | LA_NLINK | LA_RDEV |
2648                            LA_BLKSIZE | LA_TYPE | LA_BTIME;
2649
2650         attr->la_atime = inode->i_atime.tv_sec;
2651         attr->la_mtime = inode->i_mtime.tv_sec;
2652         attr->la_ctime = inode->i_ctime.tv_sec;
2653         attr->la_btime = LDISKFS_I(inode)->i_crtime.tv_sec;
2654         attr->la_mode    = inode->i_mode;
2655         attr->la_size    = i_size_read(inode);
2656         attr->la_blocks  = inode->i_blocks;
2657         attr->la_uid     = i_uid_read(inode);
2658         attr->la_gid     = i_gid_read(inode);
2659         attr->la_projid  = i_projid_read(inode);
2660         attr->la_flags   = ll_inode_to_ext_flags(inode->i_flags);
2661         attr->la_nlink   = inode->i_nlink;
2662         attr->la_rdev    = inode->i_rdev;
2663         attr->la_blksize = 1 << inode->i_blkbits;
2664         attr->la_blkbits = inode->i_blkbits;
2665         /*
2666          * Ext4 did not transfer inherit flags from raw inode
2667          * to inode flags, and ext4 internally test raw inode
2668          * @i_flags directly. Instead of patching ext4, we do it here.
2669          */
2670         if (LDISKFS_I(inode)->i_flags & LUSTRE_PROJINHERIT_FL)
2671                 attr->la_flags |= LUSTRE_PROJINHERIT_FL;
2672 }
2673
2674 static int osd_dirent_count(const struct lu_env *env, struct dt_object *dt,
2675                             u64 *count)
2676 {
2677         struct osd_object *obj = osd_dt_obj(dt);
2678         const struct dt_it_ops *iops;
2679         struct dt_it *it;
2680         int rc;
2681
2682         ENTRY;
2683
2684         LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2685         LASSERT(fid_is_namespace_visible(lu_object_fid(&obj->oo_dt.do_lu)));
2686
2687         if (obj->oo_dirent_count != LU_DIRENT_COUNT_UNSET) {
2688                 *count = obj->oo_dirent_count;
2689                 RETURN(0);
2690         }
2691
2692         /* directory not initialized yet */
2693         if (!dt->do_index_ops) {
2694                 *count = 0;
2695                 RETURN(0);
2696         }
2697
2698         iops = &dt->do_index_ops->dio_it;
2699         it = iops->init(env, dt, LUDA_64BITHASH);
2700         if (IS_ERR(it))
2701                 RETURN(PTR_ERR(it));
2702
2703         rc = iops->load(env, it, 0);
2704         if (rc < 0) {
2705                 if (rc == -ENODATA) {
2706                         rc = 0;
2707                         *count = 0;
2708                 }
2709                 GOTO(out, rc);
2710         }
2711         if (rc > 0)
2712                 rc = iops->next(env, it);
2713
2714         for (*count = 0; rc == 0 || rc == -ESTALE; rc = iops->next(env, it)) {
2715                 if (rc == -ESTALE)
2716                         continue;
2717
2718                 if (iops->key_size(env, it) == 0)
2719                         continue;
2720
2721                 (*count)++;
2722         }
2723         if (rc == 1) {
2724                 obj->oo_dirent_count = *count;
2725                 rc = 0;
2726         }
2727 out:
2728         iops->put(env, it);
2729         iops->fini(env, it);
2730
2731         RETURN(rc);
2732 }
2733
2734 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
2735                         struct lu_attr *attr)
2736 {
2737         struct osd_object *obj = osd_dt_obj(dt);
2738         int rc = 0;
2739
2740         if (unlikely(!dt_object_exists(dt)))
2741                 return -ENOENT;
2742         if (unlikely(obj->oo_destroyed))
2743                 return -ENOENT;
2744
2745         LASSERT(!dt_object_remote(dt));
2746         LINVRNT(osd_invariant(obj));
2747
2748         spin_lock(&obj->oo_guard);
2749         osd_inode_getattr(env, obj->oo_inode, attr);
2750         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
2751                 attr->la_valid |= LA_FLAGS;
2752                 attr->la_flags |= LUSTRE_ORPHAN_FL;
2753         }
2754         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
2755                 attr->la_valid |= LA_FLAGS;
2756                 attr->la_flags |= LUSTRE_ENCRYPT_FL;
2757         }
2758         spin_unlock(&obj->oo_guard);
2759
2760         if (S_ISDIR(obj->oo_inode->i_mode) &&
2761             fid_is_namespace_visible(lu_object_fid(&dt->do_lu)))
2762                 rc = osd_dirent_count(env, dt, &attr->la_dirent_count);
2763
2764         return rc;
2765 }
2766
2767 static int osd_declare_attr_qid(const struct lu_env *env,
2768                                 struct osd_object *obj,
2769                                 struct osd_thandle *oh, long long bspace,
2770                                 qid_t old_id, qid_t new_id, bool enforce,
2771                                 unsigned int type)
2772 {
2773         int rc;
2774         struct osd_thread_info *info = osd_oti_get(env);
2775         struct lquota_id_info  *qi = &info->oti_qi;
2776
2777         qi->lqi_type = type;
2778         /* inode accounting */
2779         qi->lqi_is_blk = false;
2780
2781         /* one more inode for the new id ... */
2782         qi->lqi_id.qid_uid = new_id;
2783         qi->lqi_space      = 1;
2784         /* Reserve credits for the new id */
2785         rc = osd_declare_qid(env, oh, qi, NULL, enforce, NULL);
2786         if (rc == -EDQUOT || rc == -EINPROGRESS)
2787                 rc = 0;
2788         if (rc)
2789                 RETURN(rc);
2790
2791         /* and one less inode for the current id */
2792         qi->lqi_id.qid_uid = old_id;
2793         qi->lqi_space = -1;
2794         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2795         if (rc == -EDQUOT || rc == -EINPROGRESS)
2796                 rc = 0;
2797         if (rc)
2798                 RETURN(rc);
2799
2800         /* block accounting */
2801         qi->lqi_is_blk = true;
2802
2803         /* more blocks for the new id ... */
2804         qi->lqi_id.qid_uid = new_id;
2805         qi->lqi_space      = bspace;
2806         /*
2807          * Credits for the new uid has been reserved, re-use "obj"
2808          * to save credit reservation.
2809          */
2810         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2811         if (rc == -EDQUOT || rc == -EINPROGRESS)
2812                 rc = 0;
2813         if (rc)
2814                 RETURN(rc);
2815
2816         /* and finally less blocks for the current uid */
2817         qi->lqi_id.qid_uid = old_id;
2818         qi->lqi_space      = -bspace;
2819         rc = osd_declare_qid(env, oh, qi, obj, enforce, NULL);
2820         if (rc == -EDQUOT || rc == -EINPROGRESS)
2821                 rc = 0;
2822
2823         RETURN(rc);
2824 }
2825
2826 static int osd_declare_attr_set(const struct lu_env *env,
2827                                 struct dt_object *dt,
2828                                 const struct lu_attr *attr,
2829                                 struct thandle *handle)
2830 {
2831         struct osd_thandle *oh;
2832         struct osd_object *obj;
2833         qid_t uid;
2834         qid_t gid;
2835         long long bspace;
2836         int rc = 0;
2837         bool enforce;
2838
2839         ENTRY;
2840
2841         LASSERT(dt != NULL);
2842         LASSERT(handle != NULL);
2843
2844         obj = osd_dt_obj(dt);
2845         LASSERT(osd_invariant(obj));
2846
2847         oh = container_of(handle, struct osd_thandle, ot_super);
2848         LASSERT(oh->ot_handle == NULL);
2849
2850         osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
2851                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2852
2853         osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2854                              osd_dto_credits_noquota[DTO_XATTR_SET]);
2855
2856         if (attr == NULL || obj->oo_inode == NULL)
2857                 RETURN(rc);
2858
2859         bspace   = obj->oo_inode->i_blocks << 9;
2860         bspace   = toqb(bspace);
2861
2862         /*
2863          * Changing ownership is always preformed by super user, it should not
2864          * fail with EDQUOT unless required explicitly.
2865          *
2866          * We still need to call the osd_declare_qid() to calculate the journal
2867          * credits for updating quota accounting files and to trigger quota
2868          * space adjustment once the operation is completed.
2869          */
2870         if (attr->la_valid & LA_UID || attr->la_valid & LA_GID) {
2871                 /* USERQUOTA */
2872                 uid = i_uid_read(obj->oo_inode);
2873                 enforce = (attr->la_valid & LA_UID) && (attr->la_uid != uid);
2874                 rc = osd_declare_attr_qid(env, obj, oh, bspace, uid,
2875                                           attr->la_uid, enforce, USRQUOTA);
2876                 if (rc)
2877                         RETURN(rc);
2878
2879                 gid = i_gid_read(obj->oo_inode);
2880                 CDEBUG(D_QUOTA, "declare uid %d -> %d gid %d -> %d\n", uid,
2881                        attr->la_uid, gid, attr->la_gid);
2882                 enforce = (attr->la_valid & LA_GID) && (attr->la_gid != gid);
2883                 rc = osd_declare_attr_qid(env, obj, oh, bspace, gid,
2884                                           attr->la_gid, enforce, GRPQUOTA);
2885                 if (rc)
2886                         RETURN(rc);
2887
2888         }
2889 #ifdef HAVE_PROJECT_QUOTA
2890         if (attr->la_valid & LA_PROJID) {
2891                 __u32 projid = i_projid_read(obj->oo_inode);
2892
2893                 enforce = (attr->la_valid & LA_PROJID) &&
2894                                         (attr->la_projid != projid);
2895                 rc = osd_declare_attr_qid(env, obj, oh, bspace,
2896                                           (qid_t)projid, (qid_t)attr->la_projid,
2897                                           enforce, PRJQUOTA);
2898                 if (rc)
2899                         RETURN(rc);
2900         }
2901 #endif
2902         RETURN(rc);
2903 }
2904
2905 static int osd_inode_setattr(const struct lu_env *env,
2906                              struct inode *inode, const struct lu_attr *attr)
2907 {
2908         __u64 bits = attr->la_valid;
2909
2910         /* Only allow set size for regular file */
2911         if (!S_ISREG(inode->i_mode))
2912                 bits &= ~(LA_SIZE | LA_BLOCKS);
2913
2914         if (bits == 0)
2915                 return 0;
2916
2917         if (bits & LA_ATIME)
2918                 inode->i_atime = osd_inode_time(inode, attr->la_atime);
2919         if (bits & LA_CTIME)
2920                 inode->i_ctime = osd_inode_time(inode, attr->la_ctime);
2921         if (bits & LA_MTIME)
2922                 inode->i_mtime = osd_inode_time(inode, attr->la_mtime);
2923         if (bits & LA_SIZE) {
2924                 spin_lock(&inode->i_lock);
2925                 LDISKFS_I(inode)->i_disksize = attr->la_size;
2926                 i_size_write(inode, attr->la_size);
2927                 spin_unlock(&inode->i_lock);
2928         }
2929
2930         /*
2931          * OSD should not change "i_blocks" which is used by quota.
2932          * "i_blocks" should be changed by ldiskfs only.
2933          */
2934         if (bits & LA_MODE)
2935                 inode->i_mode = (inode->i_mode & S_IFMT) |
2936                                 (attr->la_mode & ~S_IFMT);
2937         if (bits & LA_UID)
2938                 i_uid_write(inode, attr->la_uid);
2939         if (bits & LA_GID)
2940                 i_gid_write(inode, attr->la_gid);
2941         if (bits & LA_PROJID)
2942                 i_projid_write(inode, attr->la_projid);
2943         if (bits & LA_NLINK)
2944                 set_nlink(inode, attr->la_nlink);
2945         if (bits & LA_RDEV)
2946                 inode->i_rdev = attr->la_rdev;
2947
2948         if (bits & LA_FLAGS) {
2949                 /* always keep S_NOCMTIME */
2950                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
2951                                  S_NOCMTIME;
2952 #if defined(S_ENCRYPTED)
2953                 /* Always remove S_ENCRYPTED, because ldiskfs must not be
2954                  * aware of encryption status. It is just stored into LMA
2955                  * so that it can be forwared to client side.
2956                  */
2957                 inode->i_flags &= ~S_ENCRYPTED;
2958 #endif
2959                 /*
2960                  * Ext4 did not transfer inherit flags from
2961                  * @inode->i_flags to raw inode i_flags when writing
2962                  * flags, we do it explictly here.
2963                  */
2964                 if (attr->la_flags & LUSTRE_PROJINHERIT_FL)
2965                         LDISKFS_I(inode)->i_flags |= LUSTRE_PROJINHERIT_FL;
2966                 else
2967                         LDISKFS_I(inode)->i_flags &= ~LUSTRE_PROJINHERIT_FL;
2968         }
2969         return 0;
2970 }
2971
2972 #ifdef HAVE_PROJECT_QUOTA
2973 static int osd_transfer_project(struct inode *inode, __u32 projid,
2974                                 struct thandle *handle)
2975 {
2976         struct super_block *sb = inode->i_sb;
2977         struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2978         int err;
2979         kprojid_t kprojid;
2980         struct ldiskfs_iloc iloc;
2981         struct ldiskfs_inode *raw_inode;
2982         struct dquot *transfer_to[LDISKFS_MAXQUOTAS] = { };
2983
2984         if (!ldiskfs_has_feature_project(sb)) {
2985                 LASSERT(__kprojid_val(LDISKFS_I(inode)->i_projid)
2986                         == LDISKFS_DEF_PROJID);
2987                 if (projid != LDISKFS_DEF_PROJID)
2988                         return -EOPNOTSUPP;
2989                 else
2990                         return 0;
2991         }
2992
2993         if (LDISKFS_INODE_SIZE(sb) <= LDISKFS_GOOD_OLD_INODE_SIZE)
2994                 return -EOPNOTSUPP;
2995
2996         kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2997         if (projid_eq(kprojid, LDISKFS_I(inode)->i_projid))
2998                 return 0;
2999
3000         err = ldiskfs_get_inode_loc(inode, &iloc);
3001         if (err)
3002                 return err;
3003
3004         raw_inode = ldiskfs_raw_inode(&iloc);
3005         if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
3006                 struct osd_thandle *oh = container_of(handle,
3007                                                       struct osd_thandle,
3008                                                       ot_super);
3009                 /**
3010                  * try to expand inode size automatically.
3011                  */
3012                 ldiskfs_mark_inode_dirty(oh->ot_handle, inode);
3013                 if (!LDISKFS_FITS_IN_INODE(raw_inode, ei, i_projid)) {
3014                         err = -EOVERFLOW;
3015                         brelse(iloc.bh);
3016                         return err;
3017                 }
3018         }
3019         brelse(iloc.bh);
3020
3021         dquot_initialize(inode);
3022         transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3023         if (transfer_to[PRJQUOTA]) {
3024                 lock_dquot_transfer(inode);
3025                 err = __dquot_transfer(inode, transfer_to);
3026                 unlock_dquot_transfer(inode);
3027                 dqput(transfer_to[PRJQUOTA]);
3028                 if (err)
3029                         return err;
3030         }
3031
3032         return err;
3033 }
3034 #endif
3035
3036 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr,
3037                               struct thandle *handle)
3038 {
3039         int rc;
3040
3041         if ((attr->la_valid & LA_UID && attr->la_uid != i_uid_read(inode)) ||
3042             (attr->la_valid & LA_GID && attr->la_gid != i_gid_read(inode))) {
3043                 struct iattr iattr;
3044
3045                 CDEBUG(D_QUOTA,
3046                        "executing dquot_transfer inode %ld uid %d -> %d gid %d -> %d\n",
3047                        inode->i_ino, i_uid_read(inode), attr->la_uid,
3048                        i_gid_read(inode), attr->la_gid);
3049
3050                 dquot_initialize(inode);
3051                 iattr.ia_valid = 0;
3052                 if (attr->la_valid & LA_UID)
3053                         iattr.ia_valid |= ATTR_UID;
3054                 if (attr->la_valid & LA_GID)
3055                         iattr.ia_valid |= ATTR_GID;
3056                 iattr.ia_uid = make_kuid(&init_user_ns, attr->la_uid);
3057                 iattr.ia_gid = make_kgid(&init_user_ns, attr->la_gid);
3058
3059                 lock_dquot_transfer(inode);
3060                 rc = dquot_transfer(inode, &iattr);
3061                 unlock_dquot_transfer(inode);
3062                 if (rc) {
3063                         CERROR("%s: quota transfer failed. Is quota enforcement enabled on the ldiskfs filesystem? rc = %d\n",
3064                                osd_ino2name(inode), rc);
3065                         return rc;
3066                 }
3067         }
3068
3069         /* Handle project id transfer here properly */
3070         if (attr->la_valid & LA_PROJID &&
3071             attr->la_projid != i_projid_read(inode)) {
3072                 if (!projid_valid(make_kprojid(&init_user_ns, attr->la_projid)))
3073                         return -EINVAL;
3074 #ifdef HAVE_PROJECT_QUOTA
3075                 rc = osd_transfer_project(inode, attr->la_projid, handle);
3076 #else
3077                 rc = -ENOTSUPP;
3078 #endif
3079                 if (rc) {
3080                         CERROR("%s: quota transfer failed. Is project enforcement enabled on the ldiskfs filesystem? rc = %d\n",
3081                                osd_ino2name(inode), rc);
3082                         return rc;
3083                 }
3084         }
3085         return 0;
3086 }
3087
3088 static int osd_attr_set(const struct lu_env *env,
3089                         struct dt_object *dt,
3090                         const struct lu_attr *attr,
3091                         struct thandle *handle)
3092 {
3093         struct osd_object *obj = osd_dt_obj(dt);
3094         struct inode *inode;
3095         int rc;
3096
3097         if (!dt_object_exists(dt))
3098                 return -ENOENT;
3099
3100         LASSERT(handle != NULL);
3101         LASSERT(!dt_object_remote(dt));
3102         LASSERT(osd_invariant(obj));
3103
3104         osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
3105
3106         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) &&
3107             !osd_obj2dev(obj)->od_is_ost) {
3108                 struct osd_thread_info *oti = osd_oti_get(env);
3109                 const struct lu_fid *fid0 = lu_object_fid(&dt->do_lu);
3110                 struct lu_fid *fid1 = &oti->oti_fid;
3111                 struct osd_inode_id *id = &oti->oti_id;
3112                 struct iam_path_descr *ipd;
3113                 struct iam_container *bag;
3114                 struct osd_thandle *oh;
3115                 int rc;
3116
3117                 fid_cpu_to_be(fid1, fid0);
3118                 memset(id, 1, sizeof(*id));
3119                 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
3120                                   fid0)->oi_dir.od_container;
3121                 ipd = osd_idx_ipd_get(env, bag);
3122                 if (unlikely(ipd == NULL))
3123                         RETURN(-ENOMEM);
3124
3125                 oh = container_of(handle, struct osd_thandle, ot_super);
3126                 rc = iam_update(oh->ot_handle, bag,
3127                                 (const struct iam_key *)fid1,
3128                                 (const struct iam_rec *)id, ipd);
3129                 osd_ipd_put(env, bag, ipd);
3130                 return(rc > 0 ? 0 : rc);
3131         }
3132
3133         inode = obj->oo_inode;
3134
3135         rc = osd_quota_transfer(inode, attr, handle);
3136         if (rc)
3137                 return rc;
3138
3139         spin_lock(&obj->oo_guard);
3140         rc = osd_inode_setattr(env, inode, attr);
3141         spin_unlock(&obj->oo_guard);
3142         if (rc != 0)
3143                 GOTO(out, rc);
3144
3145         osd_dirty_inode(inode, I_DIRTY_DATASYNC);
3146
3147         osd_trans_exec_check(env, handle, OSD_OT_ATTR_SET);
3148
3149         if (!(attr->la_valid & LA_FLAGS))
3150                 GOTO(out, rc);
3151
3152         /* Let's check if there are extra flags need to be set into LMA */
3153         if (attr->la_flags & LUSTRE_LMA_FL_MASKS) {
3154                 struct osd_thread_info *info = osd_oti_get(env);
3155                 struct lustre_mdt_attrs *lma = &info->oti_ost_attrs.loa_lma;
3156
3157                 LASSERT(!obj->oo_pfid_in_lma);
3158
3159                 rc = osd_get_lma(info, inode, &info->oti_obj_dentry,
3160                                  &info->oti_ost_attrs);
3161                 if (rc)
3162                         GOTO(out, rc);
3163
3164                 lma->lma_incompat |=
3165                         lustre_to_lma_flags(attr->la_flags);
3166                 lustre_lma_swab(lma);
3167
3168                 osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
3169
3170                 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA,
3171                                      lma, sizeof(*lma), XATTR_REPLACE);
3172                 if (rc != 0) {
3173                         struct osd_device *osd = osd_obj2dev(obj);
3174
3175                         CWARN("%s: set "DFID" lma flags %u failed: rc = %d\n",
3176                               osd_name(osd), PFID(lu_object_fid(&dt->do_lu)),
3177                               lma->lma_incompat, rc);
3178                 } else {
3179                         obj->oo_lma_flags =
3180