Whamcloud - gitweb
LU-3934 scrub: detect upgraded from 1.8 correctly
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_handler.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd/osd_handler.c
37  *
38  * Top-level entry points into osd module
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  *         Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
42  */
43
44 #define DEBUG_SUBSYSTEM S_MDS
45
46 #include <linux/module.h>
47
48 /* LUSTRE_VERSION_CODE */
49 #include <lustre_ver.h>
50 /* prerequisite for linux/xattr.h */
51 #include <linux/types.h>
52 /* prerequisite for linux/xattr.h */
53 #include <linux/fs.h>
54 /* XATTR_{REPLACE,CREATE} */
55 #include <linux/xattr.h>
56
57 /*
58  * struct OBD_{ALLOC,FREE}*()
59  * OBD_FAIL_CHECK
60  */
61 #include <obd_support.h>
62 /* struct ptlrpc_thread */
63 #include <lustre_net.h>
64 #include <lustre_fid.h>
65
66 #include "osd_internal.h"
67 #include "osd_dynlocks.h"
68
69 /* llo_* api support */
70 #include <md_object.h>
71 #include <lustre_quota.h>
72
73 int ldiskfs_pdo = 1;
74 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
75                 "ldiskfs with parallel directory operations");
76
77 int ldiskfs_track_declares_assert;
78 CFS_MODULE_PARM(ldiskfs_track_declares_assert, "i", int, 0644,
79                 "LBUG during tracking of declares");
80
81 /* Slab to allocate dynlocks */
82 struct kmem_cache *dynlock_cachep;
83
84 static struct lu_kmem_descr ldiskfs_caches[] = {
85         {
86                 .ckd_cache = &dynlock_cachep,
87                 .ckd_name  = "dynlock_cache",
88                 .ckd_size  = sizeof(struct dynlock_handle)
89         },
90         {
91                 .ckd_cache = NULL
92         }
93 };
94
95 static const char dot[] = ".";
96 static const char dotdot[] = "..";
97 static const char remote_obj_dir[] = "REM_OBJ_DIR";
98
99 static const struct lu_object_operations      osd_lu_obj_ops;
100 static const struct dt_object_operations      osd_obj_ops;
101 static const struct dt_object_operations      osd_obj_ea_ops;
102 static const struct dt_object_operations      osd_obj_otable_it_ops;
103 static const struct dt_index_operations       osd_index_iam_ops;
104 static const struct dt_index_operations       osd_index_ea_ops;
105
106 int osd_trans_declare_op2rb[] = {
107         [OSD_OT_ATTR_SET]       = OSD_OT_ATTR_SET,
108         [OSD_OT_PUNCH]          = OSD_OT_MAX,
109         [OSD_OT_XATTR_SET]      = OSD_OT_XATTR_SET,
110         [OSD_OT_CREATE]         = OSD_OT_DESTROY,
111         [OSD_OT_DESTROY]        = OSD_OT_CREATE,
112         [OSD_OT_REF_ADD]        = OSD_OT_REF_DEL,
113         [OSD_OT_REF_DEL]        = OSD_OT_REF_ADD,
114         [OSD_OT_WRITE]          = OSD_OT_WRITE,
115         [OSD_OT_INSERT]         = OSD_OT_DELETE,
116         [OSD_OT_DELETE]         = OSD_OT_INSERT,
117         [OSD_OT_UPDATE]         = OSD_OT_MAX,
118         [OSD_OT_QUOTA]          = OSD_OT_MAX,
119 };
120
121 static int osd_has_index(const struct osd_object *obj)
122 {
123         return obj->oo_dt.do_index_ops != NULL;
124 }
125
126 static int osd_object_invariant(const struct lu_object *l)
127 {
128         return osd_invariant(osd_obj(l));
129 }
130
131 /*
132  * Concurrency: doesn't matter
133  */
134 static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
135 {
136         return osd_oti_get(env)->oti_r_locks > 0;
137 }
138
139 /*
140  * Concurrency: doesn't matter
141  */
142 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
143 {
144         struct osd_thread_info *oti = osd_oti_get(env);
145         return oti->oti_w_locks > 0 && o->oo_owner == env;
146 }
147
148 /*
149  * Concurrency: doesn't access mutable data
150  */
151 static int osd_root_get(const struct lu_env *env,
152                         struct dt_device *dev, struct lu_fid *f)
153 {
154         lu_local_obj_fid(f, OSD_FS_ROOT_OID);
155         return 0;
156 }
157
158 /*
159  * OSD object methods.
160  */
161
162 /*
163  * Concurrency: no concurrent access is possible that early in object
164  * life-cycle.
165  */
166 static struct lu_object *osd_object_alloc(const struct lu_env *env,
167                                           const struct lu_object_header *hdr,
168                                           struct lu_device *d)
169 {
170         struct osd_object *mo;
171
172         OBD_ALLOC_PTR(mo);
173         if (mo != NULL) {
174                 struct lu_object *l;
175
176                 l = &mo->oo_dt.do_lu;
177                 dt_object_init(&mo->oo_dt, NULL, d);
178                 mo->oo_dt.do_ops = &osd_obj_ea_ops;
179                 l->lo_ops = &osd_lu_obj_ops;
180                 init_rwsem(&mo->oo_sem);
181                 init_rwsem(&mo->oo_ext_idx_sem);
182                 spin_lock_init(&mo->oo_guard);
183                 return l;
184         } else {
185                 return NULL;
186         }
187 }
188
189 int osd_get_lma(struct osd_thread_info *info, struct inode *inode,
190                 struct dentry *dentry, struct lustre_mdt_attrs *lma)
191 {
192         int rc;
193
194         CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
195         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
196                              info->oti_mdt_attrs_old, LMA_OLD_SIZE);
197         if (rc > 0) {
198                 if ((void *)lma != (void *)info->oti_mdt_attrs_old)
199                         memcpy(lma, info->oti_mdt_attrs_old, sizeof(*lma));
200                 rc = 0;
201                 lustre_lma_swab(lma);
202                 /* Check LMA compatibility */
203                 if (lma->lma_incompat & ~LMA_INCOMPAT_SUPP) {
204                         CWARN("%.16s: unsupported incompat LMA feature(s) %#x "
205                               "for fid = "DFID", ino = %lu\n",
206                               LDISKFS_SB(inode->i_sb)->s_es->s_volume_name,
207                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
208                               PFID(&lma->lma_self_fid), inode->i_ino);
209                         rc = -EOPNOTSUPP;
210                 }
211         } else if (rc == 0) {
212                 rc = -ENODATA;
213         }
214
215         return rc;
216 }
217
218 /*
219  * retrieve object from backend ext fs.
220  **/
221 struct inode *osd_iget(struct osd_thread_info *info, struct osd_device *dev,
222                        struct osd_inode_id *id)
223 {
224         struct inode *inode = NULL;
225
226         inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
227         if (IS_ERR(inode)) {
228                 CDEBUG(D_INODE, "no inode: ino = %u, rc = %ld\n",
229                        id->oii_ino, PTR_ERR(inode));
230         } else if (id->oii_gen != OSD_OII_NOGEN &&
231                    inode->i_generation != id->oii_gen) {
232                 CDEBUG(D_INODE, "unmatched inode: ino = %u, gen0 = %u, "
233                        "gen1 = %u\n",
234                        id->oii_ino, id->oii_gen, inode->i_generation);
235                 iput(inode);
236                 inode = ERR_PTR(-ESTALE);
237         } else if (inode->i_nlink == 0) {
238                 /* due to parallel readdir and unlink,
239                 * we can have dead inode here. */
240                 CDEBUG(D_INODE, "stale inode: ino = %u\n", id->oii_ino);
241                 make_bad_inode(inode);
242                 iput(inode);
243                 inode = ERR_PTR(-ESTALE);
244         } else if (is_bad_inode(inode)) {
245                 CWARN("%.16s: bad inode: ino = %u\n",
246                 LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name, id->oii_ino);
247                 iput(inode);
248                 inode = ERR_PTR(-ENOENT);
249         } else {
250                 if (id->oii_gen == OSD_OII_NOGEN)
251                         osd_id_gen(id, inode->i_ino, inode->i_generation);
252
253                 /* Do not update file c/mtime in ldiskfs.
254                  * NB: we don't have any lock to protect this because we don't
255                  * have reference on osd_object now, but contention with
256                  * another lookup + attr_set can't happen in the tiny window
257                  * between if (...) and set S_NOCMTIME. */
258                 if (!(inode->i_flags & S_NOCMTIME))
259                         inode->i_flags |= S_NOCMTIME;
260         }
261         return inode;
262 }
263
264 static struct inode *
265 osd_iget_fid(struct osd_thread_info *info, struct osd_device *dev,
266              struct osd_inode_id *id, struct lu_fid *fid)
267 {
268         struct lustre_mdt_attrs *lma   = &info->oti_mdt_attrs;
269         struct inode            *inode;
270         int                      rc;
271
272         inode = osd_iget(info, dev, id);
273         if (IS_ERR(inode))
274                 return inode;
275
276         rc = osd_get_lma(info, inode, &info->oti_obj_dentry, lma);
277         if (rc == 0) {
278                 *fid = lma->lma_self_fid;
279         } else if (rc == -ENODATA) {
280                 if (unlikely(inode == osd_sb(dev)->s_root->d_inode))
281                         lu_local_obj_fid(fid, OSD_FS_ROOT_OID);
282                 else
283                         lu_igif_build(fid, inode->i_ino, inode->i_generation);
284         } else {
285                 iput(inode);
286                 inode = ERR_PTR(rc);
287         }
288         return inode;
289 }
290
291 /**
292  * \retval +v: new filter_fid, does not contain self-fid
293  * \retval 0:  filter_fid_old, contains self-fid
294  * \retval -v: other failure cases
295  */
296 int osd_get_idif(struct osd_thread_info *info, struct inode *inode,
297                  struct dentry *dentry, struct lu_fid *fid)
298 {
299         struct filter_fid_old   *ff     = &info->oti_ff;
300         struct ost_id           *ostid  = &info->oti_ostid;
301         int                      rc;
302
303         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_FID, ff, sizeof(*ff));
304         if (rc == sizeof(*ff)) {
305                 rc = 0;
306                 ostid_set_seq(ostid, le64_to_cpu(ff->ff_seq));
307                 ostid_set_id(ostid, le64_to_cpu(ff->ff_objid));
308                 /* XXX: should use real OST index in the future. LU-3569 */
309                 ostid_to_fid(fid, ostid, 0);
310         } else if (rc == sizeof(struct filter_fid)) {
311                 rc = 1;
312         } else if (rc >= 0) {
313                 rc = -EINVAL;
314         }
315
316         return rc;
317 }
318
319 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
320 {
321         struct osd_thread_info  *info   = osd_oti_get(env);
322         struct lustre_mdt_attrs *lma    = &info->oti_mdt_attrs;
323         struct inode            *inode  = obj->oo_inode;
324         struct dentry           *dentry = &info->oti_obj_dentry;
325         struct lu_fid           *fid    = NULL;
326         int                      rc;
327         ENTRY;
328
329         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
330                 RETURN(0);
331
332         CLASSERT(LMA_OLD_SIZE >= sizeof(*lma));
333         rc = __osd_xattr_get(inode, dentry, XATTR_NAME_LMA,
334                              info->oti_mdt_attrs_old, LMA_OLD_SIZE);
335         if (rc == -ENODATA && !fid_is_igif(lu_object_fid(&obj->oo_dt.do_lu)) &&
336             osd_obj2dev(obj)->od_check_ff) {
337                 fid = &lma->lma_self_fid;
338                 rc = osd_get_idif(info, inode, dentry, fid);
339                 if (rc > 0)
340                         RETURN(0);
341         }
342
343         if (unlikely(rc == -ENODATA))
344                 RETURN(0);
345
346         if (rc < 0)
347                 RETURN(rc);
348
349         if (rc > 0) {
350                 rc = 0;
351                 lustre_lma_swab(lma);
352                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
353                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
354                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
355                               "fid = "DFID", ino = %lu\n",
356                               osd_obj2dev(obj)->od_svname,
357                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
358                               PFID(lu_object_fid(&obj->oo_dt.do_lu)),
359                               inode->i_ino);
360                         rc = -EOPNOTSUPP;
361                 } else if (!(lma->lma_compat & LMAC_NOT_IN_OI)) {
362                         fid = &lma->lma_self_fid;
363                 }
364         }
365
366         if (fid != NULL &&
367             unlikely(!lu_fid_eq(lu_object_fid(&obj->oo_dt.do_lu), fid))) {
368                 CDEBUG(D_INODE, "%s: FID "DFID" != self_fid "DFID"\n",
369                        osd_obj2dev(obj)->od_svname,
370                        PFID(lu_object_fid(&obj->oo_dt.do_lu)),
371                        PFID(&lma->lma_self_fid));
372                 rc = -EREMCHG;
373         }
374
375         RETURN(rc);
376 }
377
378 static int osd_fid_lookup(const struct lu_env *env, struct osd_object *obj,
379                           const struct lu_fid *fid,
380                           const struct lu_object_conf *conf)
381 {
382         struct osd_thread_info *info;
383         struct lu_device       *ldev   = obj->oo_dt.do_lu.lo_dev;
384         struct osd_device      *dev;
385         struct osd_idmap_cache *oic;
386         struct osd_inode_id    *id;
387         struct inode           *inode;
388         struct osd_scrub       *scrub;
389         struct scrub_file      *sf;
390         int                     result;
391         int                     saved  = 0;
392         bool                    in_oi  = false;
393         bool                    triggered = false;
394         ENTRY;
395
396         LINVRNT(osd_invariant(obj));
397         LASSERT(obj->oo_inode == NULL);
398         LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID, PFID(fid));
399
400         dev = osd_dev(ldev);
401         scrub = &dev->od_scrub;
402         sf = &scrub->os_file;
403         info = osd_oti_get(env);
404         LASSERT(info);
405         oic = &info->oti_cache;
406
407         if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
408                 RETURN(-ENOENT);
409
410         /* For the object is created as locking anchor, or for the object to
411          * be created on disk. No need to osd_oi_lookup() at here because FID
412          * shouldn't never be re-used, if it's really a duplicate FID from
413          * unexpected reason, we should be able to detect it later by calling
414          * do_create->osd_oi_insert(). */
415         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
416                 GOTO(out, result = 0);
417
418         /* Search order: 1. per-thread cache. */
419         if (lu_fid_eq(fid, &oic->oic_fid) &&
420             likely(oic->oic_dev == dev)) {
421                 id = &oic->oic_lid;
422                 goto iget;
423         }
424
425         id = &info->oti_id;
426         if (!cfs_list_empty(&scrub->os_inconsistent_items)) {
427                 /* Search order: 2. OI scrub pending list. */
428                 result = osd_oii_lookup(dev, fid, id);
429                 if (result == 0)
430                         goto iget;
431         }
432
433         /* Search order: 3. OI files. */
434         result = osd_oi_lookup(info, dev, fid, id, OI_CHECK_FLD);
435         if (result == -ENOENT) {
436                 if (!fid_is_norm(fid) ||
437                     fid_is_on_ost(info, dev, fid, OI_CHECK_FLD) ||
438                     !ldiskfs_test_bit(osd_oi_fid2idx(dev,fid),
439                                       sf->sf_oi_bitmap))
440                         GOTO(out, result = 0);
441
442                 goto trigger;
443         }
444
445         if (result != 0)
446                 GOTO(out, result);
447
448         in_oi = true;
449
450 iget:
451         inode = osd_iget(info, dev, id);
452         if (IS_ERR(inode)) {
453                 result = PTR_ERR(inode);
454                 if (result == -ENOENT || result == -ESTALE) {
455                         if (!in_oi) {
456                                 fid_zero(&oic->oic_fid);
457                                 GOTO(out, result = -ENOENT);
458                         }
459
460                         /* XXX: There are three possible cases:
461                          *      1. Backup/restore caused the OI invalid.
462                          *      2. Someone unlinked the object but NOT removed
463                          *         the OI mapping, such as mount target device
464                          *         as ldiskfs, and modify something directly.
465                          *      3. Someone just removed the object between the
466                          *         former oi_lookup and the iget. It is normal.
467                          *
468                          *      It is diffcult to distinguish the 2nd from the
469                          *      1st case. Relatively speaking, the 1st case is
470                          *      common than the 2nd case, trigger OI scrub. */
471                         result = osd_oi_lookup(info, dev, fid, id, true);
472                         if (result == 0)
473                                 /* It is the case 1 or 2. */
474                                 goto trigger;
475                 } else if (result == -EREMCHG) {
476
477 trigger:
478                         if (unlikely(triggered))
479                                 GOTO(out, result = saved);
480
481                         triggered = true;
482                         if (thread_is_running(&scrub->os_thread)) {
483                                 result = -EINPROGRESS;
484                         } else if (!dev->od_noscrub) {
485                                 result = osd_scrub_start(dev);
486                                 LCONSOLE_ERROR("%.16s: trigger OI scrub by RPC "
487                                                "for "DFID", rc = %d [1]\n",
488                                                LDISKFS_SB(osd_sb(dev))->s_es->\
489                                                s_volume_name,PFID(fid), result);
490                                 if (result == 0 || result == -EALREADY)
491                                         result = -EINPROGRESS;
492                                 else
493                                         result = -EREMCHG;
494                         }
495
496                         /* We still have chance to get the valid inode: for the
497                          * object which is referenced by remote name entry, the
498                          * object on the local MDT will be linked under the dir
499                          * of "/REMOTE_PARENT_DIR" with its FID string as name.
500                          *
501                          * We do not know whether the object for the given FID
502                          * is referenced by some remote name entry or not, and
503                          * especially for DNE II, a multiple-linked object may
504                          * have many name entries reside on many MDTs.
505                          *
506                          * To simplify the operation, OSD will not distinguish
507                          * more, just lookup "/REMOTE_PARENT_DIR". Usually, it
508                          * only happened for the RPC from other MDT during the
509                          * OI scrub, or for the client side RPC with FID only,
510                          * such as FID to path, or from old connected client. */
511                         saved = result;
512                         result = osd_lookup_in_remote_parent(info, dev,
513                                                              fid, id);
514                         if (result == 0) {
515                                 in_oi = false;
516                                 goto iget;
517                         }
518
519                         result = saved;
520                 }
521
522                 GOTO(out, result);
523         }
524
525         obj->oo_inode = inode;
526         LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
527
528         result = osd_check_lma(env, obj);
529         if (result != 0) {
530                 iput(inode);
531                 obj->oo_inode = NULL;
532                 if (result == -EREMCHG)
533                         goto trigger;
534
535                 GOTO(out, result);
536         }
537
538         obj->oo_compat_dot_created = 1;
539         obj->oo_compat_dotdot_created = 1;
540
541         if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
542                 GOTO(out, result = 0);
543
544         LASSERT(obj->oo_hl_head == NULL);
545         obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
546         if (obj->oo_hl_head == NULL) {
547                 obj->oo_inode = NULL;
548                 iput(inode);
549                 GOTO(out, result = -ENOMEM);
550         }
551         GOTO(out, result = 0);
552
553 out:
554         LINVRNT(osd_invariant(obj));
555         return result;
556 }
557
558 /*
559  * Concurrency: shouldn't matter.
560  */
561 static void osd_object_init0(struct osd_object *obj)
562 {
563         LASSERT(obj->oo_inode != NULL);
564         obj->oo_dt.do_body_ops = &osd_body_ops;
565         obj->oo_dt.do_lu.lo_header->loh_attr |=
566                 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
567 }
568
569 /*
570  * Concurrency: no concurrent access is possible that early in object
571  * life-cycle.
572  */
573 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
574                            const struct lu_object_conf *conf)
575 {
576         struct osd_object *obj = osd_obj(l);
577         int result;
578
579         LINVRNT(osd_invariant(obj));
580
581         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
582                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
583                 l->lo_header->loh_attr |= LOHA_EXISTS;
584                 return 0;
585         }
586
587         result = osd_fid_lookup(env, obj, lu_object_fid(l), conf);
588         obj->oo_dt.do_body_ops = &osd_body_ops_new;
589         if (result == 0 && obj->oo_inode != NULL)
590                 osd_object_init0(obj);
591
592         LINVRNT(osd_invariant(obj));
593         return result;
594 }
595
596 /*
597  * Concurrency: no concurrent access is possible that late in object
598  * life-cycle.
599  */
600 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
601 {
602         struct osd_object *obj = osd_obj(l);
603
604         LINVRNT(osd_invariant(obj));
605
606         dt_object_fini(&obj->oo_dt);
607         if (obj->oo_hl_head != NULL)
608                 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
609         OBD_FREE_PTR(obj);
610 }
611
612 /*
613  * Concurrency: no concurrent access is possible that late in object
614  * life-cycle.
615  */
616 static void osd_index_fini(struct osd_object *o)
617 {
618         struct iam_container *bag;
619
620         if (o->oo_dir != NULL) {
621                 bag = &o->oo_dir->od_container;
622                 if (o->oo_inode != NULL) {
623                         if (bag->ic_object == o->oo_inode)
624                                 iam_container_fini(bag);
625                 }
626                 OBD_FREE_PTR(o->oo_dir);
627                 o->oo_dir = NULL;
628         }
629 }
630
631 /*
632  * Concurrency: no concurrent access is possible that late in object
633  * life-cycle (for all existing callers, that is. New callers have to provide
634  * their own locking.)
635  */
636 static int osd_inode_unlinked(const struct inode *inode)
637 {
638         return inode->i_nlink == 0;
639 }
640
641 enum {
642         OSD_TXN_OI_DELETE_CREDITS    = 20,
643         OSD_TXN_INODE_DELETE_CREDITS = 20
644 };
645
646 /*
647  * Journal
648  */
649
650 #if OSD_THANDLE_STATS
651 /**
652  * Set time when the handle is allocated
653  */
654 static void osd_th_alloced(struct osd_thandle *oth)
655 {
656         oth->oth_alloced = cfs_time_current();
657 }
658
659 /**
660  * Set time when the handle started
661  */
662 static void osd_th_started(struct osd_thandle *oth)
663 {
664         oth->oth_started = cfs_time_current();
665 }
666
667 /**
668  * Helper function to convert time interval to microseconds packed in
669  * long int.
670  */
671 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
672 {
673         struct timeval val;
674
675         cfs_duration_usec(cfs_time_sub(end, start), &val);
676         return val.tv_sec * 1000000 + val.tv_usec;
677 }
678
679 /**
680  * Check whether the we deal with this handle for too long.
681  */
682 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
683                                 cfs_time_t alloced, cfs_time_t started,
684                                 cfs_time_t closed)
685 {
686         cfs_time_t now = cfs_time_current();
687
688         LASSERT(dev != NULL);
689
690         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
691                             interval_to_usec(alloced, started));
692         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
693                             interval_to_usec(started, closed));
694         lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
695                             interval_to_usec(closed, now));
696
697         if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
698                 CWARN("transaction handle %p was open for too long: "
699                       "now "CFS_TIME_T" ,"
700                       "alloced "CFS_TIME_T" ,"
701                       "started "CFS_TIME_T" ,"
702                       "closed "CFS_TIME_T"\n",
703                       oth, now, alloced, started, closed);
704                 libcfs_debug_dumpstack(NULL);
705         }
706 }
707
708 #define OSD_CHECK_SLOW_TH(oth, dev, expr)                               \
709 {                                                                       \
710         cfs_time_t __closed = cfs_time_current();                       \
711         cfs_time_t __alloced = oth->oth_alloced;                        \
712         cfs_time_t __started = oth->oth_started;                        \
713                                                                         \
714         expr;                                                           \
715         __osd_th_check_slow(oth, dev, __alloced, __started, __closed);  \
716 }
717
718 #else /* OSD_THANDLE_STATS */
719
720 #define osd_th_alloced(h)                  do {} while(0)
721 #define osd_th_started(h)                  do {} while(0)
722 #define OSD_CHECK_SLOW_TH(oth, dev, expr)  expr
723
724 #endif /* OSD_THANDLE_STATS */
725
726 /*
727  * Concurrency: doesn't access mutable data.
728  */
729 static int osd_param_is_not_sane(const struct osd_device *dev,
730                                  const struct thandle *th)
731 {
732         struct osd_thandle *oh = container_of(th, typeof(*oh), ot_super);
733
734         return oh->ot_credits > osd_journal(dev)->j_max_transaction_buffers;
735 }
736
737 /*
738  * Concurrency: shouldn't matter.
739  */
740 static void osd_trans_commit_cb(struct super_block *sb,
741                                 struct ldiskfs_journal_cb_entry *jcb, int error)
742 {
743         struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
744         struct thandle     *th  = &oh->ot_super;
745         struct lu_device   *lud = &th->th_dev->dd_lu_dev;
746         struct dt_txn_commit_cb *dcb, *tmp;
747
748         LASSERT(oh->ot_handle == NULL);
749
750         if (error)
751                 CERROR("transaction @0x%p commit error: %d\n", th, error);
752
753         dt_txn_hook_commit(th);
754
755         /* call per-transaction callbacks if any */
756         cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage) {
757                 LASSERTF(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC,
758                          "commit callback entry: magic=%x name='%s'\n",
759                          dcb->dcb_magic, dcb->dcb_name);
760                 cfs_list_del_init(&dcb->dcb_linkage);
761                 dcb->dcb_func(NULL, th, dcb, error);
762         }
763
764         lu_ref_del_at(&lud->ld_reference, &oh->ot_dev_link, "osd-tx", th);
765         lu_device_put(lud);
766         th->th_dev = NULL;
767
768         lu_context_exit(&th->th_ctx);
769         lu_context_fini(&th->th_ctx);
770         OBD_FREE_PTR(oh);
771 }
772
773 static struct thandle *osd_trans_create(const struct lu_env *env,
774                                         struct dt_device *d)
775 {
776         struct osd_thread_info *oti = osd_oti_get(env);
777         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
778         struct osd_thandle     *oh;
779         struct thandle         *th;
780         ENTRY;
781
782         /* on pending IO in this thread should left from prev. request */
783         LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
784
785         th = ERR_PTR(-ENOMEM);
786         OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
787         if (oh != NULL) {
788                 oh->ot_quota_trans = &oti->oti_quota_trans;
789                 memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
790                 th = &oh->ot_super;
791                 th->th_dev = d;
792                 th->th_result = 0;
793                 th->th_tags = LCT_TX_HANDLE;
794                 oh->ot_credits = 0;
795                 oti->oti_dev = osd_dt_dev(d);
796                 CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
797                 osd_th_alloced(oh);
798
799                 memset(oti->oti_declare_ops, 0,
800                                         sizeof(oti->oti_declare_ops));
801                 memset(oti->oti_declare_ops_rb, 0,
802                                         sizeof(oti->oti_declare_ops_rb));
803                 memset(oti->oti_declare_ops_cred, 0,
804                                         sizeof(oti->oti_declare_ops_cred));
805                 oti->oti_rollback = false;
806         }
807         RETURN(th);
808 }
809
810 /*
811  * Concurrency: shouldn't matter.
812  */
813 int osd_trans_start(const struct lu_env *env, struct dt_device *d,
814                     struct thandle *th)
815 {
816         struct osd_thread_info *oti = osd_oti_get(env);
817         struct osd_device  *dev = osd_dt_dev(d);
818         handle_t           *jh;
819         struct osd_thandle *oh;
820         int rc;
821
822         ENTRY;
823
824         LASSERT(current->journal_info == NULL);
825
826         oh = container_of0(th, struct osd_thandle, ot_super);
827         LASSERT(oh != NULL);
828         LASSERT(oh->ot_handle == NULL);
829
830         rc = dt_txn_hook_start(env, d, th);
831         if (rc != 0)
832                 GOTO(out, rc);
833
834         if (unlikely(osd_param_is_not_sane(dev, th))) {
835                 static unsigned long last_printed;
836                 static int last_credits;
837
838                 CWARN("%.16s: too many transaction credits (%d > %d)\n",
839                       LDISKFS_SB(osd_sb(dev))->s_es->s_volume_name,
840                       oh->ot_credits,
841                       osd_journal(dev)->j_max_transaction_buffers);
842                 CWARN("  create: %u/%u, delete: %u/%u, destroy: %u/%u\n",
843                       oti->oti_declare_ops[OSD_OT_CREATE],
844                       oti->oti_declare_ops_cred[OSD_OT_CREATE],
845                       oti->oti_declare_ops[OSD_OT_DELETE],
846                       oti->oti_declare_ops_cred[OSD_OT_DELETE],
847                       oti->oti_declare_ops[OSD_OT_DESTROY],
848                       oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
849                 CWARN("  attr_set: %u/%u, xattr_set: %u/%u\n",
850                       oti->oti_declare_ops[OSD_OT_ATTR_SET],
851                       oti->oti_declare_ops_cred[OSD_OT_ATTR_SET],
852                       oti->oti_declare_ops[OSD_OT_XATTR_SET],
853                       oti->oti_declare_ops_cred[OSD_OT_XATTR_SET]);
854                 CWARN("  write: %u/%u, punch: %u/%u, quota %u/%u\n",
855                       oti->oti_declare_ops[OSD_OT_WRITE],
856                       oti->oti_declare_ops_cred[OSD_OT_WRITE],
857                       oti->oti_declare_ops[OSD_OT_PUNCH],
858                       oti->oti_declare_ops_cred[OSD_OT_PUNCH],
859                       oti->oti_declare_ops[OSD_OT_QUOTA],
860                       oti->oti_declare_ops_cred[OSD_OT_QUOTA]);
861                 CWARN("  insert: %u/%u, delete: %u/%u\n",
862                       oti->oti_declare_ops[OSD_OT_INSERT],
863                       oti->oti_declare_ops_cred[OSD_OT_INSERT],
864                       oti->oti_declare_ops[OSD_OT_DESTROY],
865                       oti->oti_declare_ops_cred[OSD_OT_DESTROY]);
866                 CWARN("  ref_add: %u/%u, ref_del: %u/%u\n",
867                       oti->oti_declare_ops[OSD_OT_REF_ADD],
868                       oti->oti_declare_ops_cred[OSD_OT_REF_ADD],
869                       oti->oti_declare_ops[OSD_OT_REF_DEL],
870                       oti->oti_declare_ops_cred[OSD_OT_REF_DEL]);
871
872                 if (last_credits != oh->ot_credits &&
873                     time_after(jiffies, last_printed + 60 * HZ)) {
874                         libcfs_debug_dumpstack(NULL);
875                         last_credits = oh->ot_credits;
876                         last_printed = jiffies;
877                 }
878                 /* XXX Limit the credits to 'max_transaction_buffers', and
879                  *     let the underlying filesystem to catch the error if
880                  *     we really need so many credits.
881                  *
882                  *     This should be removed when we can calculate the
883                  *     credits precisely. */
884                 oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
885         }
886
887         /*
888          * XXX temporary stuff. Some abstraction layer should
889          * be used.
890          */
891         jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
892         osd_th_started(oh);
893         if (!IS_ERR(jh)) {
894                 oh->ot_handle = jh;
895                 LASSERT(oti->oti_txns == 0);
896                 lu_context_init(&th->th_ctx, th->th_tags);
897                 lu_context_enter(&th->th_ctx);
898
899                 lu_device_get(&d->dd_lu_dev);
900                 lu_ref_add_at(&d->dd_lu_dev.ld_reference, &oh->ot_dev_link,
901                               "osd-tx", th);
902                 oti->oti_txns++;
903                 rc = 0;
904         } else {
905                 rc = PTR_ERR(jh);
906         }
907 out:
908         RETURN(rc);
909 }
910
911 /*
912  * Concurrency: shouldn't matter.
913  */
914 static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
915 {
916         int                     rc = 0;
917         struct osd_thandle     *oh;
918         struct osd_thread_info *oti = osd_oti_get(env);
919         struct osd_iobuf       *iobuf = &oti->oti_iobuf;
920         struct qsd_instance    *qsd = oti->oti_dev->od_quota_slave;
921         ENTRY;
922
923         oh = container_of0(th, struct osd_thandle, ot_super);
924
925         if (qsd != NULL)
926                 /* inform the quota slave device that the transaction is
927                  * stopping */
928                 qsd_op_end(env, qsd, oh->ot_quota_trans);
929         oh->ot_quota_trans = NULL;
930
931         if (oh->ot_handle != NULL) {
932                 handle_t *hdl = oh->ot_handle;
933
934                 /*
935                  * add commit callback
936                  * notice we don't do this in osd_trans_start()
937                  * as underlying transaction can change during truncate
938                  */
939                 ldiskfs_journal_callback_add(hdl, osd_trans_commit_cb,
940                                          &oh->ot_jcb);
941
942                 LASSERT(oti->oti_txns == 1);
943                 oti->oti_txns--;
944                 rc = dt_txn_hook_stop(env, th);
945                 if (rc != 0)
946                         CERROR("Failure in transaction hook: %d\n", rc);
947
948                 /* hook functions might modify th_sync */
949                 hdl->h_sync = th->th_sync;
950
951                 oh->ot_handle = NULL;
952                 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
953                                   rc = ldiskfs_journal_stop(hdl));
954                 if (rc != 0)
955                         CERROR("Failure to stop transaction: %d\n", rc);
956         } else {
957                 OBD_FREE_PTR(oh);
958         }
959
960         /* as we want IO to journal and data IO be concurrent, we don't block
961          * awaiting data IO completion in osd_do_bio(), instead we wait here
962          * once transaction is submitted to the journal. all reqular requests
963          * don't do direct IO (except read/write), thus this wait_event becomes
964          * no-op for them.
965          *
966          * IMPORTANT: we have to wait till any IO submited by the thread is
967          * completed otherwise iobuf may be corrupted by different request
968          */
969         wait_event(iobuf->dr_wait,
970                        cfs_atomic_read(&iobuf->dr_numreqs) == 0);
971         if (!rc)
972                 rc = iobuf->dr_error;
973
974         RETURN(rc);
975 }
976
977 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
978 {
979         struct osd_thandle *oh = container_of0(th, struct osd_thandle,
980                                                ot_super);
981
982         LASSERT(dcb->dcb_magic == TRANS_COMMIT_CB_MAGIC);
983         LASSERT(&dcb->dcb_func != NULL);
984         cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
985
986         return 0;
987 }
988
989 /*
990  * Called just before object is freed. Releases all resources except for
991  * object itself (that is released by osd_object_free()).
992  *
993  * Concurrency: no concurrent access is possible that late in object
994  * life-cycle.
995  */
996 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
997 {
998         struct osd_object *obj   = osd_obj(l);
999         struct inode      *inode = obj->oo_inode;
1000
1001         LINVRNT(osd_invariant(obj));
1002
1003         /*
1004          * If object is unlinked remove fid->ino mapping from object index.
1005          */
1006
1007         osd_index_fini(obj);
1008         if (inode != NULL) {
1009                 struct qsd_instance     *qsd = osd_obj2dev(obj)->od_quota_slave;
1010                 qid_t                    uid = inode->i_uid;
1011                 qid_t                    gid = inode->i_gid;
1012
1013                 iput(inode);
1014                 obj->oo_inode = NULL;
1015
1016                 if (qsd != NULL) {
1017                         struct osd_thread_info  *info = osd_oti_get(env);
1018                         struct lquota_id_info   *qi = &info->oti_qi;
1019
1020                         /* Release granted quota to master if necessary */
1021                         qi->lqi_id.qid_uid = uid;
1022                         qsd_op_adjust(env, qsd, &qi->lqi_id, USRQUOTA);
1023
1024                         qi->lqi_id.qid_uid = gid;
1025                         qsd_op_adjust(env, qsd, &qi->lqi_id, GRPQUOTA);
1026                 }
1027         }
1028 }
1029
1030 /*
1031  * Concurrency: ->loo_object_release() is called under site spin-lock.
1032  */
1033 static void osd_object_release(const struct lu_env *env,
1034                                struct lu_object *l)
1035 {
1036 }
1037
1038 /*
1039  * Concurrency: shouldn't matter.
1040  */
1041 static int osd_object_print(const struct lu_env *env, void *cookie,
1042                             lu_printer_t p, const struct lu_object *l)
1043 {
1044         struct osd_object *o = osd_obj(l);
1045         struct iam_descr  *d;
1046
1047         if (o->oo_dir != NULL)
1048                 d = o->oo_dir->od_container.ic_descr;
1049         else
1050                 d = NULL;
1051         return (*p)(env, cookie,
1052                     LUSTRE_OSD_LDISKFS_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1053                     o, o->oo_inode,
1054                     o->oo_inode ? o->oo_inode->i_ino : 0UL,
1055                     o->oo_inode ? o->oo_inode->i_generation : 0,
1056                     d ? d->id_ops->id_name : "plain");
1057 }
1058
1059 /*
1060  * Concurrency: shouldn't matter.
1061  */
1062 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1063                struct obd_statfs *sfs)
1064 {
1065         struct osd_device  *osd = osd_dt_dev(d);
1066         struct super_block *sb = osd_sb(osd);
1067         struct kstatfs     *ksfs;
1068         int result = 0;
1069
1070         if (unlikely(osd->od_mnt == NULL))
1071                 return -EINPROGRESS;
1072
1073         /* osd_lproc.c call this without env, allocate ksfs for that case */
1074         if (unlikely(env == NULL)) {
1075                 OBD_ALLOC_PTR(ksfs);
1076                 if (ksfs == NULL)
1077                         return -ENOMEM;
1078         } else {
1079                 ksfs = &osd_oti_get(env)->oti_ksfs;
1080         }
1081
1082         spin_lock(&osd->od_osfs_lock);
1083         /* cache 1 second */
1084         if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
1085                 result = sb->s_op->statfs(sb->s_root, ksfs);
1086                 if (likely(result == 0)) { /* N.B. statfs can't really fail */
1087                         osd->od_osfs_age = cfs_time_current_64();
1088                         statfs_pack(&osd->od_statfs, ksfs);
1089                         if (sb->s_flags & MS_RDONLY)
1090                                 sfs->os_state = OS_STATE_READONLY;
1091                 }
1092         }
1093
1094         if (likely(result == 0))
1095                 *sfs = osd->od_statfs;
1096         spin_unlock(&osd->od_osfs_lock);
1097
1098         if (unlikely(env == NULL))
1099                 OBD_FREE_PTR(ksfs);
1100
1101         return result;
1102 }
1103
1104 /**
1105  * Estimate space needed for file creations. We assume the largest filename
1106  * which is 2^64 - 1, hence a filename of 20 chars.
1107  * This is 28 bytes per object which is 28MB for 1M objects ... no so bad.
1108  */
1109 #ifdef __LDISKFS_DIR_REC_LEN
1110 #define PER_OBJ_USAGE __LDISKFS_DIR_REC_LEN(20)
1111 #else
1112 #define PER_OBJ_USAGE LDISKFS_DIR_REC_LEN(20)
1113 #endif
1114
1115 /*
1116  * Concurrency: doesn't access mutable data.
1117  */
1118 static void osd_conf_get(const struct lu_env *env,
1119                          const struct dt_device *dev,
1120                          struct dt_device_param *param)
1121 {
1122         struct super_block *sb = osd_sb(osd_dt_dev(dev));
1123
1124         /*
1125          * XXX should be taken from not-yet-existing fs abstraction layer.
1126          */
1127         param->ddp_mnt = osd_dt_dev(dev)->od_mnt;
1128         param->ddp_max_name_len = LDISKFS_NAME_LEN;
1129         param->ddp_max_nlink    = LDISKFS_LINK_MAX;
1130         param->ddp_block_shift  = sb->s_blocksize_bits;
1131         param->ddp_mount_type     = LDD_MT_LDISKFS;
1132         param->ddp_maxbytes       = sb->s_maxbytes;
1133         /* Overhead estimate should be fairly accurate, so we really take a tiny
1134          * error margin which also avoids fragmenting the filesystem too much */
1135         param->ddp_grant_reserved = 2; /* end up to be 1.9% after conversion */
1136         /* inode are statically allocated, so per-inode space consumption
1137          * is the space consumed by the directory entry */
1138         param->ddp_inodespace     = PER_OBJ_USAGE;
1139         /* per-fragment overhead to be used by the client code */
1140         param->ddp_grant_frag     = 6 * LDISKFS_BLOCK_SIZE(sb);
1141         param->ddp_mntopts      = 0;
1142         if (test_opt(sb, XATTR_USER))
1143                 param->ddp_mntopts |= MNTOPT_USERXATTR;
1144         if (test_opt(sb, POSIX_ACL))
1145                 param->ddp_mntopts |= MNTOPT_ACL;
1146
1147 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
1148         if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
1149                 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
1150         else
1151 #endif
1152                 param->ddp_max_ea_size = sb->s_blocksize;
1153
1154 }
1155
1156 /*
1157  * Concurrency: shouldn't matter.
1158  */
1159 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1160 {
1161         CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1162         return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1163 }
1164
1165 /**
1166  * Start commit for OSD device.
1167  *
1168  * An implementation of dt_commit_async method for OSD device.
1169  * Asychronously starts underlayng fs sync and thereby a transaction
1170  * commit.
1171  *
1172  * \param env environment
1173  * \param d dt device
1174  *
1175  * \see dt_device_operations
1176  */
1177 static int osd_commit_async(const struct lu_env *env,
1178                             struct dt_device *d)
1179 {
1180         struct super_block *s = osd_sb(osd_dt_dev(d));
1181         ENTRY;
1182
1183         CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_LDISKFS_NAME);
1184         RETURN(s->s_op->sync_fs(s, 0));
1185 }
1186
1187 /*
1188  * Concurrency: shouldn't matter.
1189  */
1190
1191 static int osd_ro(const struct lu_env *env, struct dt_device *d)
1192 {
1193         struct super_block *sb = osd_sb(osd_dt_dev(d));
1194         struct block_device *dev = sb->s_bdev;
1195 #ifdef HAVE_DEV_SET_RDONLY
1196         struct block_device *jdev = LDISKFS_SB(sb)->journal_bdev;
1197         int rc = 0;
1198 #else
1199         int rc = -EOPNOTSUPP;
1200 #endif
1201         ENTRY;
1202
1203 #ifdef HAVE_DEV_SET_RDONLY
1204         CERROR("*** setting %s read-only ***\n", osd_dt_dev(d)->od_svname);
1205
1206         if (jdev && (jdev != dev)) {
1207                 CDEBUG(D_IOCTL | D_HA, "set journal dev %lx rdonly\n",
1208                        (long)jdev);
1209                 dev_set_rdonly(jdev);
1210         }
1211         CDEBUG(D_IOCTL | D_HA, "set dev %lx rdonly\n", (long)dev);
1212         dev_set_rdonly(dev);
1213 #else
1214         CERROR("%s: %lx CANNOT BE SET READONLY: rc = %d\n",
1215                osd_dt_dev(d)->od_svname, (long)dev, rc);
1216 #endif
1217         RETURN(rc);
1218 }
1219
1220 /*
1221  * Concurrency: serialization provided by callers.
1222  */
1223 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
1224                               int mode, unsigned long timeout, __u32 alg,
1225                               struct lustre_capa_key *keys)
1226 {
1227         struct osd_device *dev = osd_dt_dev(d);
1228         ENTRY;
1229
1230         dev->od_fl_capa = mode;
1231         dev->od_capa_timeout = timeout;
1232         dev->od_capa_alg = alg;
1233         dev->od_capa_keys = keys;
1234         RETURN(0);
1235 }
1236
1237 /**
1238  * Note: we do not count into QUOTA here.
1239  * If we mount with --data_journal we may need more.
1240  */
1241 const int osd_dto_credits_noquota[DTO_NR] = {
1242         /**
1243          * Insert/Delete.
1244          * INDEX_EXTRA_TRANS_BLOCKS(8) +
1245          * SINGLEDATA_TRANS_BLOCKS(8)
1246          * XXX Note: maybe iam need more, since iam have more level than
1247          *           EXT3 htree.
1248          */
1249         [DTO_INDEX_INSERT]  = 16,
1250         [DTO_INDEX_DELETE]  = 16,
1251         /**
1252          * Used for OI scrub
1253          */
1254         [DTO_INDEX_UPDATE]  = 16,
1255         /**
1256          * Create a object. The same as create object in EXT3.
1257          * DATA_TRANS_BLOCKS(14) +
1258          * INDEX_EXTRA_BLOCKS(8) +
1259          * 3(inode bits, groups, GDT)
1260          */
1261         [DTO_OBJECT_CREATE] = 25,
1262         /**
1263          * XXX: real credits to be fixed
1264          */
1265         [DTO_OBJECT_DELETE] = 25,
1266         /**
1267          * Attr set credits (inode)
1268          */
1269         [DTO_ATTR_SET_BASE] = 1,
1270         /**
1271          * Xattr set. The same as xattr of EXT3.
1272          * DATA_TRANS_BLOCKS(14)
1273          * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1274          * are also counted in. Do not know why?
1275          */
1276         [DTO_XATTR_SET]     = 14,
1277         [DTO_LOG_REC]       = 14,
1278         /**
1279          * credits for inode change during write.
1280          */
1281         [DTO_WRITE_BASE]    = 3,
1282         /**
1283          * credits for single block write.
1284          */
1285         [DTO_WRITE_BLOCK]   = 14,
1286         /**
1287          * Attr set credits for chown.
1288          * This is extra credits for setattr, and it is null without quota
1289          */
1290         [DTO_ATTR_SET_CHOWN]= 0
1291 };
1292
1293 static const struct dt_device_operations osd_dt_ops = {
1294         .dt_root_get       = osd_root_get,
1295         .dt_statfs         = osd_statfs,
1296         .dt_trans_create   = osd_trans_create,
1297         .dt_trans_start    = osd_trans_start,
1298         .dt_trans_stop     = osd_trans_stop,
1299         .dt_trans_cb_add   = osd_trans_cb_add,
1300         .dt_conf_get       = osd_conf_get,
1301         .dt_sync           = osd_sync,
1302         .dt_ro             = osd_ro,
1303         .dt_commit_async   = osd_commit_async,
1304         .dt_init_capa_ctxt = osd_init_capa_ctxt,
1305 };
1306
1307 static void osd_object_read_lock(const struct lu_env *env,
1308                                  struct dt_object *dt, unsigned role)
1309 {
1310         struct osd_object *obj = osd_dt_obj(dt);
1311         struct osd_thread_info *oti = osd_oti_get(env);
1312
1313         LINVRNT(osd_invariant(obj));
1314
1315         LASSERT(obj->oo_owner != env);
1316         down_read_nested(&obj->oo_sem, role);
1317
1318         LASSERT(obj->oo_owner == NULL);
1319         oti->oti_r_locks++;
1320 }
1321
1322 static void osd_object_write_lock(const struct lu_env *env,
1323                                   struct dt_object *dt, unsigned role)
1324 {
1325         struct osd_object *obj = osd_dt_obj(dt);
1326         struct osd_thread_info *oti = osd_oti_get(env);
1327
1328         LINVRNT(osd_invariant(obj));
1329
1330         LASSERT(obj->oo_owner != env);
1331         down_write_nested(&obj->oo_sem, role);
1332
1333         LASSERT(obj->oo_owner == NULL);
1334         obj->oo_owner = env;
1335         oti->oti_w_locks++;
1336 }
1337
1338 static void osd_object_read_unlock(const struct lu_env *env,
1339                                    struct dt_object *dt)
1340 {
1341         struct osd_object *obj = osd_dt_obj(dt);
1342         struct osd_thread_info *oti = osd_oti_get(env);
1343
1344         LINVRNT(osd_invariant(obj));
1345
1346         LASSERT(oti->oti_r_locks > 0);
1347         oti->oti_r_locks--;
1348         up_read(&obj->oo_sem);
1349 }
1350
1351 static void osd_object_write_unlock(const struct lu_env *env,
1352                                     struct dt_object *dt)
1353 {
1354         struct osd_object *obj = osd_dt_obj(dt);
1355         struct osd_thread_info *oti = osd_oti_get(env);
1356
1357         LINVRNT(osd_invariant(obj));
1358
1359         LASSERT(obj->oo_owner == env);
1360         LASSERT(oti->oti_w_locks > 0);
1361         oti->oti_w_locks--;
1362         obj->oo_owner = NULL;
1363         up_write(&obj->oo_sem);
1364 }
1365
1366 static int osd_object_write_locked(const struct lu_env *env,
1367                                    struct dt_object *dt)
1368 {
1369         struct osd_object *obj = osd_dt_obj(dt);
1370
1371         LINVRNT(osd_invariant(obj));
1372
1373         return obj->oo_owner == env;
1374 }
1375
1376 static int capa_is_sane(const struct lu_env *env,
1377                         struct osd_device *dev,
1378                         struct lustre_capa *capa,
1379                         struct lustre_capa_key *keys)
1380 {
1381         struct osd_thread_info *oti = osd_oti_get(env);
1382         struct lustre_capa *tcapa = &oti->oti_capa;
1383         struct obd_capa *oc;
1384         int i, rc = 0;
1385         ENTRY;
1386
1387         oc = capa_lookup(dev->od_capa_hash, capa, 0);
1388         if (oc) {
1389                 if (capa_is_expired(oc)) {
1390                         DEBUG_CAPA(D_ERROR, capa, "expired");
1391                         rc = -ESTALE;
1392                 }
1393                 capa_put(oc);
1394                 RETURN(rc);
1395         }
1396
1397         if (capa_is_expired_sec(capa)) {
1398                 DEBUG_CAPA(D_ERROR, capa, "expired");
1399                 RETURN(-ESTALE);
1400         }
1401
1402         spin_lock(&capa_lock);
1403         for (i = 0; i < 2; i++) {
1404                 if (keys[i].lk_keyid == capa->lc_keyid) {
1405                         oti->oti_capa_key = keys[i];
1406                         break;
1407                 }
1408         }
1409         spin_unlock(&capa_lock);
1410
1411         if (i == 2) {
1412                 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1413                 RETURN(-ESTALE);
1414         }
1415
1416         rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1417         if (rc)
1418                 RETURN(rc);
1419
1420         if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1421                 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1422                 RETURN(-EACCES);
1423         }
1424
1425         oc = capa_add(dev->od_capa_hash, capa);
1426         capa_put(oc);
1427
1428         RETURN(0);
1429 }
1430
1431 int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1432                     struct lustre_capa *capa, __u64 opc)
1433 {
1434         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1435         struct osd_device *osd = osd_dev(dt->do_lu.lo_dev);
1436         struct lu_capainfo *lci;
1437         int rc;
1438
1439         if (!osd->od_fl_capa)
1440                 return 0;
1441
1442         if (capa == BYPASS_CAPA)
1443                 return 0;
1444
1445         lci = lu_capainfo_get(env);
1446         if (unlikely(lci == NULL))
1447                 return 0;
1448
1449         if (lci->lci_auth == LC_ID_NONE)
1450                 return 0;
1451
1452         if (capa == NULL) {
1453                 CERROR("%s: no capability provided for FID "DFID": rc = %d\n",
1454                        osd_name(osd), PFID(fid), -EACCES);
1455                 return -EACCES;
1456         }
1457
1458         if (!lu_fid_eq(fid, &capa->lc_fid)) {
1459                 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1460                            PFID(fid));
1461                 return -EACCES;
1462         }
1463
1464         if (!capa_opc_supported(capa, opc)) {
1465                 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1466                 return -EACCES;
1467         }
1468
1469         rc = capa_is_sane(env, osd, capa, osd->od_capa_keys);
1470         if (rc != 0) {
1471                 DEBUG_CAPA(D_ERROR, capa, "insane: rc = %d", rc);
1472                 return -EACCES;
1473         }
1474
1475         return 0;
1476 }
1477
1478 static struct timespec *osd_inode_time(const struct lu_env *env,
1479                                        struct inode *inode, __u64 seconds)
1480 {
1481         struct osd_thread_info  *oti = osd_oti_get(env);
1482         struct timespec         *t   = &oti->oti_time;
1483
1484         t->tv_sec = seconds;
1485         t->tv_nsec = 0;
1486         *t = timespec_trunc(*t, inode->i_sb->s_time_gran);
1487         return t;
1488 }
1489
1490
1491 static void osd_inode_getattr(const struct lu_env *env,
1492                               struct inode *inode, struct lu_attr *attr)
1493 {
1494         attr->la_valid      |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1495                                LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1496                                LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE |
1497                                LA_TYPE;
1498
1499         attr->la_atime      = LTIME_S(inode->i_atime);
1500         attr->la_mtime      = LTIME_S(inode->i_mtime);
1501         attr->la_ctime      = LTIME_S(inode->i_ctime);
1502         attr->la_mode       = inode->i_mode;
1503         attr->la_size       = i_size_read(inode);
1504         attr->la_blocks     = inode->i_blocks;
1505         attr->la_uid        = inode->i_uid;
1506         attr->la_gid        = inode->i_gid;
1507         attr->la_flags      = LDISKFS_I(inode)->i_flags;
1508         attr->la_nlink      = inode->i_nlink;
1509         attr->la_rdev       = inode->i_rdev;
1510         attr->la_blksize    = 1 << inode->i_blkbits;
1511         attr->la_blkbits    = inode->i_blkbits;
1512 }
1513
1514 static int osd_attr_get(const struct lu_env *env,
1515                         struct dt_object *dt,
1516                         struct lu_attr *attr,
1517                         struct lustre_capa *capa)
1518 {
1519         struct osd_object *obj = osd_dt_obj(dt);
1520
1521         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
1522         LINVRNT(osd_invariant(obj));
1523
1524         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1525                 return -EACCES;
1526
1527         spin_lock(&obj->oo_guard);
1528         osd_inode_getattr(env, obj->oo_inode, attr);
1529         spin_unlock(&obj->oo_guard);
1530         return 0;
1531 }
1532
1533 static int osd_declare_attr_set(const struct lu_env *env,
1534                                 struct dt_object *dt,
1535                                 const struct lu_attr *attr,
1536                                 struct thandle *handle)
1537 {
1538         struct osd_thandle     *oh;
1539         struct osd_object      *obj;
1540         struct osd_thread_info *info = osd_oti_get(env);
1541         struct lquota_id_info  *qi = &info->oti_qi;
1542         long long               bspace;
1543         int                     rc = 0;
1544         bool                    allocated;
1545         ENTRY;
1546
1547         LASSERT(dt != NULL);
1548         LASSERT(handle != NULL);
1549
1550         obj = osd_dt_obj(dt);
1551         LASSERT(osd_invariant(obj));
1552
1553         oh = container_of0(handle, struct osd_thandle, ot_super);
1554         LASSERT(oh->ot_handle == NULL);
1555
1556         osd_trans_declare_op(env, oh, OSD_OT_ATTR_SET,
1557                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
1558
1559         if (attr == NULL || obj->oo_inode == NULL)
1560                 RETURN(rc);
1561
1562         bspace   = obj->oo_inode->i_blocks;
1563         bspace <<= obj->oo_inode->i_sb->s_blocksize_bits;
1564         bspace   = toqb(bspace);
1565
1566         /* Changing ownership is always preformed by super user, it should not
1567          * fail with EDQUOT.
1568          *
1569          * We still need to call the osd_declare_qid() to calculate the journal
1570          * credits for updating quota accounting files and to trigger quota
1571          * space adjustment once the operation is completed.*/
1572         if ((attr->la_valid & LA_UID) != 0 &&
1573              attr->la_uid != obj->oo_inode->i_uid) {
1574                 qi->lqi_type = USRQUOTA;
1575
1576                 /* inode accounting */
1577                 qi->lqi_is_blk = false;
1578
1579                 /* one more inode for the new owner ... */
1580                 qi->lqi_id.qid_uid = attr->la_uid;
1581                 qi->lqi_space      = 1;
1582                 allocated = (attr->la_uid == 0) ? true : false;
1583                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1584                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1585                         rc = 0;
1586                 if (rc)
1587                         RETURN(rc);
1588
1589                 /* and one less inode for the current uid */
1590                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1591                 qi->lqi_space      = -1;
1592                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1593                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1594                         rc = 0;
1595                 if (rc)
1596                         RETURN(rc);
1597
1598                 /* block accounting */
1599                 qi->lqi_is_blk = true;
1600
1601                 /* more blocks for the new owner ... */
1602                 qi->lqi_id.qid_uid = attr->la_uid;
1603                 qi->lqi_space      = bspace;
1604                 allocated = (attr->la_uid == 0) ? true : false;
1605                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1606                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1607                         rc = 0;
1608                 if (rc)
1609                         RETURN(rc);
1610
1611                 /* and finally less blocks for the current owner */
1612                 qi->lqi_id.qid_uid = obj->oo_inode->i_uid;
1613                 qi->lqi_space      = -bspace;
1614                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1615                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1616                         rc = 0;
1617                 if (rc)
1618                         RETURN(rc);
1619         }
1620
1621         if (attr->la_valid & LA_GID &&
1622             attr->la_gid != obj->oo_inode->i_gid) {
1623                 qi->lqi_type = GRPQUOTA;
1624
1625                 /* inode accounting */
1626                 qi->lqi_is_blk = false;
1627
1628                 /* one more inode for the new group owner ... */
1629                 qi->lqi_id.qid_gid = attr->la_gid;
1630                 qi->lqi_space      = 1;
1631                 allocated = (attr->la_gid == 0) ? true : false;
1632                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1633                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1634                         rc = 0;
1635                 if (rc)
1636                         RETURN(rc);
1637
1638                 /* and one less inode for the current gid */
1639                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1640                 qi->lqi_space      = -1;
1641                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1642                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1643                         rc = 0;
1644                 if (rc)
1645                         RETURN(rc);
1646
1647                 /* block accounting */
1648                 qi->lqi_is_blk = true;
1649
1650                 /* more blocks for the new owner ... */
1651                 qi->lqi_id.qid_gid = attr->la_gid;
1652                 qi->lqi_space      = bspace;
1653                 allocated = (attr->la_gid == 0) ? true : false;
1654                 rc = osd_declare_qid(env, oh, qi, allocated, NULL);
1655                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1656                         rc = 0;
1657                 if (rc)
1658                         RETURN(rc);
1659
1660                 /* and finally less blocks for the current owner */
1661                 qi->lqi_id.qid_gid = obj->oo_inode->i_gid;
1662                 qi->lqi_space      = -bspace;
1663                 rc = osd_declare_qid(env, oh, qi, true, NULL);
1664                 if (rc == -EDQUOT || rc == -EINPROGRESS)
1665                         rc = 0;
1666                 if (rc)
1667                         RETURN(rc);
1668         }
1669
1670         RETURN(rc);
1671 }
1672
1673 static int osd_inode_setattr(const struct lu_env *env,
1674                              struct inode *inode, const struct lu_attr *attr)
1675 {
1676         __u64 bits;
1677
1678         bits = attr->la_valid;
1679
1680         if (bits & LA_ATIME)
1681                 inode->i_atime  = *osd_inode_time(env, inode, attr->la_atime);
1682         if (bits & LA_CTIME)
1683                 inode->i_ctime  = *osd_inode_time(env, inode, attr->la_ctime);
1684         if (bits & LA_MTIME)
1685                 inode->i_mtime  = *osd_inode_time(env, inode, attr->la_mtime);
1686         if (bits & LA_SIZE) {
1687                 LDISKFS_I(inode)->i_disksize = attr->la_size;
1688                 i_size_write(inode, attr->la_size);
1689         }
1690
1691 #if 0
1692         /* OSD should not change "i_blocks" which is used by quota.
1693          * "i_blocks" should be changed by ldiskfs only. */
1694         if (bits & LA_BLOCKS)
1695                 inode->i_blocks = attr->la_blocks;
1696 #endif
1697         if (bits & LA_MODE)
1698                 inode->i_mode   = (inode->i_mode & S_IFMT) |
1699                         (attr->la_mode & ~S_IFMT);
1700         if (bits & LA_UID)
1701                 inode->i_uid    = attr->la_uid;
1702         if (bits & LA_GID)
1703                 inode->i_gid    = attr->la_gid;
1704         if (bits & LA_NLINK)
1705                 set_nlink(inode, attr->la_nlink);
1706         if (bits & LA_RDEV)
1707                 inode->i_rdev   = attr->la_rdev;
1708
1709         if (bits & LA_FLAGS) {
1710                 /* always keep S_NOCMTIME */
1711                 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1712                                  S_NOCMTIME;
1713         }
1714         return 0;
1715 }
1716
1717 static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
1718 {
1719         if ((attr->la_valid & LA_UID && attr->la_uid != inode->i_uid) ||
1720             (attr->la_valid & LA_GID && attr->la_gid != inode->i_gid)) {
1721                 struct iattr    iattr;
1722                 int             rc;
1723
1724                 iattr.ia_valid = 0;
1725                 if (attr->la_valid & LA_UID)
1726                         iattr.ia_valid |= ATTR_UID;
1727                 if (attr->la_valid & LA_GID)
1728                         iattr.ia_valid |= ATTR_GID;
1729                 iattr.ia_uid = attr->la_uid;
1730                 iattr.ia_gid = attr->la_gid;
1731
1732                 rc = ll_vfs_dq_transfer(inode, &iattr);
1733                 if (rc) {
1734                         CERROR("%s: quota transfer failed: rc = %d. Is quota "
1735                                "enforcement enabled on the ldiskfs filesystem?",
1736                                inode->i_sb->s_id, rc);
1737                         return rc;
1738                 }
1739         }
1740         return 0;
1741 }
1742
1743 static int osd_attr_set(const struct lu_env *env,
1744                         struct dt_object *dt,
1745                         const struct lu_attr *attr,
1746                         struct thandle *handle,
1747                         struct lustre_capa *capa)
1748 {
1749         struct osd_object *obj = osd_dt_obj(dt);
1750         struct inode      *inode;
1751         int rc;
1752
1753         LASSERT(handle != NULL);
1754         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
1755         LASSERT(osd_invariant(obj));
1756
1757         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1758                 return -EACCES;
1759
1760         osd_trans_exec_op(env, handle, OSD_OT_ATTR_SET);
1761
1762         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING)) {
1763                 struct osd_thread_info  *oti  = osd_oti_get(env);
1764                 const struct lu_fid     *fid0 = lu_object_fid(&dt->do_lu);
1765                 struct lu_fid           *fid1 = &oti->oti_fid;
1766                 struct osd_inode_id     *id   = &oti->oti_id;
1767                 struct iam_path_descr   *ipd;
1768                 struct iam_container    *bag;
1769                 struct osd_thandle      *oh;
1770                 int                      rc;
1771
1772                 fid_cpu_to_be(fid1, fid0);
1773                 memset(id, 1, sizeof(*id));
1774                 bag = &osd_fid2oi(osd_dev(dt->do_lu.lo_dev),
1775                                   fid0)->oi_dir.od_container;
1776                 ipd = osd_idx_ipd_get(env, bag);
1777                 if (unlikely(ipd == NULL))
1778                         RETURN(-ENOMEM);
1779
1780                 oh = container_of0(handle, struct osd_thandle, ot_super);
1781                 rc = iam_update(oh->ot_handle, bag, (const struct iam_key *)fid1,
1782                                 (const struct iam_rec *)id, ipd);
1783                 osd_ipd_put(env, bag, ipd);
1784                 return(rc > 0 ? 0 : rc);
1785         }
1786
1787         inode = obj->oo_inode;
1788         ll_vfs_dq_init(inode);
1789
1790         rc = osd_quota_transfer(inode, attr);
1791         if (rc)
1792                 return rc;
1793
1794         spin_lock(&obj->oo_guard);
1795         rc = osd_inode_setattr(env, inode, attr);
1796         spin_unlock(&obj->oo_guard);
1797
1798         if (!rc)
1799                 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
1800         return rc;
1801 }
1802
1803 struct dentry *osd_child_dentry_get(const struct lu_env *env,
1804                                     struct osd_object *obj,
1805                                     const char *name, const int namelen)
1806 {
1807         return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
1808 }
1809
1810 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1811                       umode_t mode, struct dt_allocation_hint *hint,
1812                       struct thandle *th)
1813 {
1814         int result;
1815         struct osd_device  *osd = osd_obj2dev(obj);
1816         struct osd_thandle *oth;
1817         struct dt_object   *parent = NULL;
1818         struct inode       *inode;
1819
1820         LINVRNT(osd_invariant(obj));
1821         LASSERT(obj->oo_inode == NULL);
1822         LASSERT(obj->oo_hl_head == NULL);
1823
1824         if (S_ISDIR(mode) && ldiskfs_pdo) {
1825                 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1826                 if (obj->oo_hl_head == NULL)
1827                         return -ENOMEM;
1828         }
1829
1830         oth = container_of(th, struct osd_thandle, ot_super);
1831         LASSERT(oth->ot_handle->h_transaction != NULL);
1832
1833         if (hint && hint->dah_parent)
1834                 parent = hint->dah_parent;
1835
1836         inode = ldiskfs_create_inode(oth->ot_handle,
1837                                      parent ? osd_dt_obj(parent)->oo_inode :
1838                                               osd_sb(osd)->s_root->d_inode,
1839                                      mode);
1840         if (!IS_ERR(inode)) {
1841                 /* Do not update file c/mtime in ldiskfs.
1842                  * NB: don't need any lock because no contention at this
1843                  * early stage */
1844                 inode->i_flags |= S_NOCMTIME;
1845
1846                 /* For new created object, it must be consistent,
1847                  * and it is unnecessary to scrub against it. */
1848                 ldiskfs_set_inode_state(inode, LDISKFS_STATE_LUSTRE_NOSCRUB);
1849                 obj->oo_inode = inode;
1850                 result = 0;
1851         } else {
1852                 if (obj->oo_hl_head != NULL) {
1853                         ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1854                         obj->oo_hl_head = NULL;
1855                 }
1856                 result = PTR_ERR(inode);
1857         }
1858         LINVRNT(osd_invariant(obj));
1859         return result;
1860 }
1861
1862 enum {
1863         OSD_NAME_LEN = 255
1864 };
1865
1866 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1867                      struct lu_attr *attr,
1868                      struct dt_allocation_hint *hint,
1869                      struct dt_object_format *dof,
1870                      struct thandle *th)
1871 {
1872         int result;
1873         struct osd_thandle *oth;
1874         __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1875
1876         LASSERT(S_ISDIR(attr->la_mode));
1877
1878         oth = container_of(th, struct osd_thandle, ot_super);
1879         LASSERT(oth->ot_handle->h_transaction != NULL);
1880         result = osd_mkfile(info, obj, mode, hint, th);
1881
1882         return result;
1883 }
1884
1885 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
1886                         struct lu_attr *attr,
1887                         struct dt_allocation_hint *hint,
1888                         struct dt_object_format *dof,
1889                         struct thandle *th)
1890 {
1891         int result;
1892         struct osd_thandle *oth;
1893         const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
1894
1895         __u32 mode = (attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX));
1896
1897         LASSERT(S_ISREG(attr->la_mode));
1898
1899         oth = container_of(th, struct osd_thandle, ot_super);
1900         LASSERT(oth->ot_handle->h_transaction != NULL);
1901
1902         result = osd_mkfile(info, obj, mode, hint, th);
1903         if (result == 0) {
1904                 LASSERT(obj->oo_inode != NULL);
1905                 if (feat->dif_flags & DT_IND_VARKEY)
1906                         result = iam_lvar_create(obj->oo_inode,
1907                                                  feat->dif_keysize_max,
1908                                                  feat->dif_ptrsize,
1909                                                  feat->dif_recsize_max,
1910                                                  oth->ot_handle);
1911                 else
1912                         result = iam_lfix_create(obj->oo_inode,
1913                                                  feat->dif_keysize_max,
1914                                                  feat->dif_ptrsize,
1915                                                  feat->dif_recsize_max,
1916                                                  oth->ot_handle);
1917
1918         }
1919         return result;
1920 }
1921
1922 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1923                      struct lu_attr *attr,
1924                      struct dt_allocation_hint *hint,
1925                      struct dt_object_format *dof,
1926                      struct thandle *th)
1927 {
1928         LASSERT(S_ISREG(attr->la_mode));
1929         return osd_mkfile(info, obj, (attr->la_mode &
1930                                (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1931 }
1932
1933 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1934                      struct lu_attr *attr,
1935                      struct dt_allocation_hint *hint,
1936                      struct dt_object_format *dof,
1937                      struct thandle *th)
1938 {
1939         LASSERT(S_ISLNK(attr->la_mode));
1940         return osd_mkfile(info, obj, (attr->la_mode &
1941                               (S_IFMT | S_IALLUGO | S_ISVTX)), hint, th);
1942 }
1943
1944 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1945                      struct lu_attr *attr,
1946                      struct dt_allocation_hint *hint,
1947                      struct dt_object_format *dof,
1948                      struct thandle *th)
1949 {
1950         umode_t mode = attr->la_mode & (S_IFMT | S_IALLUGO | S_ISVTX);
1951         int result;
1952
1953         LINVRNT(osd_invariant(obj));
1954         LASSERT(obj->oo_inode == NULL);
1955         LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1956                 S_ISFIFO(mode) || S_ISSOCK(mode));
1957
1958         result = osd_mkfile(info, obj, mode, hint, th);
1959         if (result == 0) {
1960                 LASSERT(obj->oo_inode != NULL);
1961                 /*
1962                  * This inode should be marked dirty for i_rdev.  Currently
1963                  * that is done in the osd_attr_init().
1964                  */
1965                 init_special_inode(obj->oo_inode, obj->oo_inode->i_mode,
1966                                    attr->la_rdev);
1967         }
1968         LINVRNT(osd_invariant(obj));
1969         return result;
1970 }
1971
1972 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1973                               struct lu_attr *,
1974                               struct dt_allocation_hint *hint,
1975                               struct dt_object_format *dof,
1976                               struct thandle *);
1977
1978 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1979 {
1980         osd_obj_type_f result;
1981
1982         switch (type) {
1983         case DFT_DIR:
1984                 result = osd_mkdir;
1985                 break;
1986         case DFT_REGULAR:
1987                 result = osd_mkreg;
1988                 break;
1989         case DFT_SYM:
1990                 result = osd_mksym;
1991                 break;
1992         case DFT_NODE:
1993                 result = osd_mknod;
1994                 break;
1995         case DFT_INDEX:
1996                 result = osd_mk_index;
1997                 break;
1998
1999         default:
2000                 LBUG();
2001                 break;
2002         }
2003         return result;
2004 }
2005
2006
2007 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
2008                         struct dt_object *parent, struct dt_object *child,
2009                         umode_t child_mode)
2010 {
2011         LASSERT(ah);
2012
2013         memset(ah, 0, sizeof(*ah));
2014         ah->dah_parent = parent;
2015         ah->dah_mode = child_mode;
2016 }
2017
2018 static void osd_attr_init(struct osd_thread_info *info, struct osd_object *obj,
2019                           struct lu_attr *attr, struct dt_object_format *dof)
2020 {
2021         struct inode   *inode = obj->oo_inode;
2022         __u64           valid = attr->la_valid;
2023         int             result;
2024
2025         attr->la_valid &= ~(LA_TYPE | LA_MODE);
2026
2027         if (dof->dof_type != DFT_NODE)
2028                 attr->la_valid &= ~LA_RDEV;
2029         if ((valid & LA_ATIME) && (attr->la_atime == LTIME_S(inode->i_atime)))
2030                 attr->la_valid &= ~LA_ATIME;
2031         if ((valid & LA_CTIME) && (attr->la_ctime == LTIME_S(inode->i_ctime)))
2032                 attr->la_valid &= ~LA_CTIME;
2033         if ((valid & LA_MTIME) && (attr->la_mtime == LTIME_S(inode->i_mtime)))
2034                 attr->la_valid &= ~LA_MTIME;
2035
2036         result = osd_quota_transfer(inode, attr);
2037         if (result)
2038                 return;
2039
2040         if (attr->la_valid != 0) {
2041                 result = osd_inode_setattr(info->oti_env, inode, attr);
2042                 /*
2043                  * The osd_inode_setattr() should always succeed here.  The
2044                  * only error that could be returned is EDQUOT when we are
2045                  * trying to change the UID or GID of the inode. However, this
2046                  * should not happen since quota enforcement is no longer
2047                  * enabled on ldiskfs (lquota takes care of it).
2048                  */
2049                 LASSERTF(result == 0, "%d", result);
2050                 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2051         }
2052
2053         attr->la_valid = valid;
2054 }
2055
2056 /**
2057  * Helper function for osd_object_create()
2058  *
2059  * \retval 0, on success
2060  */
2061 static int __osd_object_create(struct osd_thread_info *info,
2062                                struct osd_object *obj, struct lu_attr *attr,
2063                                struct dt_allocation_hint *hint,
2064                                struct dt_object_format *dof,
2065                                struct thandle *th)
2066 {
2067         int     result;
2068         __u32   umask;
2069
2070         /* we drop umask so that permissions we pass are not affected */
2071         umask = current->fs->umask;
2072         current->fs->umask = 0;
2073
2074         result = osd_create_type_f(dof->dof_type)(info, obj, attr, hint, dof,
2075                                                   th);
2076         if (result == 0) {
2077                 osd_attr_init(info, obj, attr, dof);
2078                 osd_object_init0(obj);
2079                 /* bz 24037 */
2080                 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
2081                         unlock_new_inode(obj->oo_inode);
2082         }
2083
2084         /* restore previous umask value */
2085         current->fs->umask = umask;
2086
2087         return result;
2088 }
2089
2090 /**
2091  * Helper function for osd_object_create()
2092  *
2093  * \retval 0, on success
2094  */
2095 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
2096                            const struct lu_fid *fid, struct thandle *th)
2097 {
2098         struct osd_thread_info *info = osd_oti_get(env);
2099         struct osd_inode_id    *id   = &info->oti_id;
2100         struct osd_device      *osd  = osd_obj2dev(obj);
2101
2102         LASSERT(obj->oo_inode != NULL);
2103
2104         osd_id_gen(id, obj->oo_inode->i_ino, obj->oo_inode->i_generation);
2105         return osd_oi_insert(info, osd, fid, id, th, OI_CHECK_FLD);
2106 }
2107
2108 int osd_fld_lookup(const struct lu_env *env, struct osd_device *osd,
2109                    obd_seq seq, struct lu_seq_range *range)
2110 {
2111         struct seq_server_site  *ss = osd_seq_site(osd);
2112         int                     rc;
2113
2114         if (fid_seq_is_idif(seq)) {
2115                 fld_range_set_ost(range);
2116                 range->lsr_index = idif_ost_idx(seq);
2117                 return 0;
2118         }
2119
2120         if (!fid_seq_in_fldb(seq)) {
2121                 fld_range_set_mdt(range);
2122                 if (ss != NULL)
2123                         /* FIXME: If ss is NULL, it suppose not get lsr_index
2124                          * at all */
2125                         range->lsr_index = ss->ss_node_id;
2126                 return 0;
2127         }
2128
2129         LASSERT(ss != NULL);
2130         fld_range_set_any(range);
2131         rc = fld_server_lookup(env, ss->ss_server_fld, seq, range);
2132         if (rc != 0) {
2133                 CERROR("%s: cannot find FLD range for "LPX64": rc = %d\n",
2134                        osd_name(osd), seq, rc);
2135         }
2136         return rc;
2137 }
2138
2139 /*
2140  * Concurrency: no external locking is necessary.
2141  */
2142 static int osd_declare_object_create(const struct lu_env *env,
2143                                      struct dt_object *dt,
2144                                      struct lu_attr *attr,
2145                                      struct dt_allocation_hint *hint,
2146                                      struct dt_object_format *dof,
2147                                      struct thandle *handle)
2148 {
2149         struct lu_seq_range     *range = &osd_oti_get(env)->oti_seq_range;
2150         struct osd_thandle      *oh;
2151         int                      rc;
2152         ENTRY;
2153
2154         LASSERT(handle != NULL);
2155
2156         oh = container_of0(handle, struct osd_thandle, ot_super);
2157         LASSERT(oh->ot_handle == NULL);
2158
2159         osd_trans_declare_op(env, oh, OSD_OT_CREATE,
2160                              osd_dto_credits_noquota[DTO_OBJECT_CREATE]);
2161         if (!fid_is_on_ost(osd_oti_get(env), osd_dt_dev(handle->th_dev),
2162                            lu_object_fid(&dt->do_lu), OI_CHECK_FLD))
2163                 /* Reuse idle OI block may cause additional one OI block
2164                  * to be changed. */
2165                 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2166                                 osd_dto_credits_noquota[DTO_INDEX_INSERT] + 1);
2167
2168         /* If this is directory, then we expect . and .. to be inserted as
2169          * well. The one directory block always needs to be created for the
2170          * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
2171          * block), there is no danger of needing a tree for the first block.
2172          */
2173         if (attr && S_ISDIR(attr->la_mode)) {
2174                 osd_trans_declare_op(env, oh, OSD_OT_INSERT,
2175                                      osd_dto_credits_noquota[DTO_WRITE_BASE]);
2176                 osd_trans_declare_op(env, oh, OSD_OT_INSERT, 0);
2177         }
2178
2179         if (!attr)
2180                 RETURN(0);
2181
2182         rc = osd_declare_inode_qid(env, attr->la_uid, attr->la_gid, 1, oh,
2183                                    false, false, NULL, false);
2184         if (rc != 0)
2185                 RETURN(rc);
2186
2187         /* It does fld look up inside declare, and the result will be
2188          * added to fld cache, so the following fld lookup inside insert
2189          * does not need send RPC anymore, so avoid send rpc with holding
2190          * transaction */
2191         if (fid_is_norm(lu_object_fid(&dt->do_lu)) &&
2192                 !fid_is_last_id(lu_object_fid(&dt->do_lu)))
2193                 osd_fld_lookup(env, osd_dt_dev(handle->th_dev),
2194                                fid_seq(lu_object_fid(&dt->do_lu)), range);
2195
2196
2197         RETURN(rc);
2198 }
2199
2200 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
2201                              struct lu_attr *attr,
2202                              struct dt_allocation_hint *hint,
2203                              struct dt_object_format *dof,
2204                              struct thandle *th)
2205 {
2206         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
2207         struct osd_object      *obj    = osd_dt_obj(dt);
2208         struct osd_thread_info *info   = osd_oti_get(env);
2209         int result;
2210
2211         ENTRY;
2212
2213         LINVRNT(osd_invariant(obj));
2214         LASSERT(!dt_object_exists(dt) && !dt_object_remote(dt));
2215         LASSERT(osd_write_locked(env, obj));
2216         LASSERT(th != NULL);
2217
2218         if (unlikely(fid_is_acct(fid)))
2219                 /* Quota files can't be created from the kernel any more,
2220                  * 'tune2fs -O quota' will take care of creating them */
2221                 RETURN(-EPERM);
2222
2223         osd_trans_exec_op(env, th, OSD_OT_CREATE);
2224         osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2225
2226         result = __osd_object_create(info, obj, attr, hint, dof, th);
2227         if (result == 0)
2228                 result = __osd_oi_insert(env, obj, fid, th);
2229
2230         LASSERT(ergo(result == 0,
2231                      dt_object_exists(dt) && !dt_object_remote(dt)));
2232
2233         LASSERT(osd_invariant(obj));
2234         RETURN(result);
2235 }
2236
2237 /**
2238  * Called to destroy on-disk representation of the object
2239  *
2240  * Concurrency: must be locked
2241  */
2242 static int osd_declare_object_destroy(const struct lu_env *env,
2243                                       struct dt_object *dt,
2244                                       struct thandle *th)
2245 {
2246         struct osd_object  *obj = osd_dt_obj(dt);
2247         struct inode       *inode = obj->oo_inode;
2248         struct osd_thandle *oh;
2249         int                 rc;
2250         ENTRY;
2251
2252         oh = container_of0(th, struct osd_thandle, ot_super);
2253         LASSERT(oh->ot_handle == NULL);
2254         LASSERT(inode);
2255
2256         osd_trans_declare_op(env, oh, OSD_OT_DESTROY,
2257                              osd_dto_credits_noquota[DTO_OBJECT_DELETE]);
2258         /* Recycle idle OI leaf may cause additional three OI blocks
2259          * to be changed. */
2260         osd_trans_declare_op(env, oh, OSD_OT_DELETE,
2261                              osd_dto_credits_noquota[DTO_INDEX_DELETE] + 3);
2262         /* one less inode */
2263         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, -1, oh,
2264                                    false, true, NULL, false);
2265         if (rc)
2266                 RETURN(rc);
2267         /* data to be truncated */
2268         rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
2269                                    true, true, NULL, false);
2270         RETURN(rc);
2271 }
2272
2273 static int osd_object_destroy(const struct lu_env *env,
2274                               struct dt_object *dt,
2275                               struct thandle *th)
2276 {
2277         const struct lu_fid    *fid = lu_object_fid(&dt->do_lu);
2278         struct osd_object      *obj = osd_dt_obj(dt);
2279         struct inode           *inode = obj->oo_inode;
2280         struct osd_device      *osd = osd_obj2dev(obj);
2281         struct osd_thandle     *oh;
2282         int                     result;
2283         ENTRY;
2284
2285         oh = container_of0(th, struct osd_thandle, ot_super);
2286         LASSERT(oh->ot_handle);
2287         LASSERT(inode);
2288         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2289
2290         if (unlikely(fid_is_acct(fid)))
2291                 RETURN(-EPERM);
2292
2293         if (S_ISDIR(inode->i_mode)) {
2294                 LASSERT(osd_inode_unlinked(inode) || inode->i_nlink == 1);
2295                 /* it will check/delete the inode from remote parent,
2296                  * how to optimize it? unlink performance impaction XXX */
2297                 result = osd_delete_from_remote_parent(env, osd, obj, oh);
2298                 if (result != 0 && result != -ENOENT) {
2299                         CERROR("%s: delete inode "DFID": rc = %d\n",
2300                                osd_name(osd), PFID(fid), result);
2301                 }
2302                 spin_lock(&obj->oo_guard);
2303                 clear_nlink(inode);
2304                 spin_unlock(&obj->oo_guard);
2305                 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2306         }
2307
2308         osd_trans_exec_op(env, th, OSD_OT_DESTROY);
2309
2310         result = osd_oi_delete(osd_oti_get(env), osd, fid, th, OI_CHECK_FLD);
2311
2312         /* XXX: add to ext3 orphan list */
2313         /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2314
2315         /* not needed in the cache anymore */
2316         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2317
2318         RETURN(0);
2319 }
2320
2321 /**
2322  * Put the fid into lustre_mdt_attrs, and then place the structure
2323  * inode's ea. This fid should not be altered during the life time
2324  * of the inode.
2325  *
2326  * \retval +ve, on success
2327  * \retval -ve, on error
2328  *
2329  * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2330  */
2331 int osd_ea_fid_set(struct osd_thread_info *info, struct inode *inode,
2332                    const struct lu_fid *fid, __u32 compat, __u32 incompat)
2333 {
2334         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
2335         int                      rc;
2336         ENTRY;
2337
2338         if (OBD_FAIL_CHECK(OBD_FAIL_FID_INLMA))
2339                 RETURN(0);
2340
2341         lustre_lma_init(lma, fid, compat, incompat);
2342         lustre_lma_swab(lma);
2343
2344         rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma, sizeof(*lma),
2345                              XATTR_CREATE);
2346         /* LMA may already exist, but we need to check that all the
2347          * desired compat/incompat flags have been added. */
2348         if (unlikely(rc == -EEXIST)) {
2349                 if (compat == 0 && incompat == 0)
2350                         RETURN(0);
2351
2352                 rc = __osd_xattr_get(inode, &info->oti_obj_dentry,
2353                                      XATTR_NAME_LMA, info->oti_mdt_attrs_old,
2354                                      LMA_OLD_SIZE);
2355                 if (rc <= 0)
2356                         RETURN(-EINVAL);
2357
2358                 lustre_lma_swab(lma);
2359                 if (!(~lma->lma_compat & compat) &&
2360                     !(~lma->lma_incompat & incompat))
2361                         RETURN(0);
2362
2363                 lma->lma_compat |= compat;
2364                 lma->lma_incompat |= incompat;
2365                 lustre_lma_swab(lma);
2366                 rc = __osd_xattr_set(info, inode, XATTR_NAME_LMA, lma,
2367                                      sizeof(*lma), XATTR_REPLACE);
2368         }
2369
2370         RETURN(rc);
2371 }
2372
2373 /**
2374  * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2375  * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2376  * To have compatilibility with 1.8 ldiskfs driver we need to have
2377  * magic number at start of fid data.
2378  * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2379  * its inmemory API.
2380  */
2381 void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2382                                   const struct dt_rec *fid)
2383 {
2384         if (!fid_is_namespace_visible((const struct lu_fid *)fid) ||
2385             OBD_FAIL_CHECK(OBD_FAIL_FID_IGIF)) {
2386                 param->edp_magic = 0;
2387                 return;
2388         }
2389
2390         param->edp_magic = LDISKFS_LUFID_MAGIC;
2391         param->edp_len =  sizeof(struct lu_fid) + 1;
2392         fid_cpu_to_be((struct lu_fid *)param->edp_data, (struct lu_fid *)fid);
2393 }
2394
2395 /**
2396  * Try to read the fid from inode ea into dt_rec.
2397  *
2398  * \param fid object fid.
2399  *
2400  * \retval 0 on success
2401  */
2402 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2403                           __u32 ino, struct lu_fid *fid,
2404                           struct osd_inode_id *id)
2405 {
2406         struct osd_thread_info *info  = osd_oti_get(env);
2407         struct inode           *inode;
2408         ENTRY;
2409
2410         osd_id_gen(id, ino, OSD_OII_NOGEN);
2411         inode = osd_iget_fid(info, osd_obj2dev(obj), id, fid);
2412         if (IS_ERR(inode))
2413                 RETURN(PTR_ERR(inode));
2414
2415         iput(inode);
2416         RETURN(0);
2417 }
2418
2419 static int osd_add_dot_dotdot_internal(struct osd_thread_info *info,
2420                                         struct inode *dir,
2421                                         struct inode  *parent_dir,
2422                                         const struct dt_rec *dot_fid,
2423                                         const struct dt_rec *dot_dot_fid,
2424                                         struct osd_thandle *oth)
2425 {
2426         struct ldiskfs_dentry_param *dot_ldp;
2427         struct ldiskfs_dentry_param *dot_dot_ldp;
2428
2429         dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
2430         osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
2431
2432         dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
2433         dot_ldp->edp_magic = 0;
2434         return ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
2435                                         dir, dot_ldp, dot_dot_ldp);
2436 }
2437
2438 /**
2439  * Create an local agent inode for remote entry
2440  */
2441 static struct inode *osd_create_local_agent_inode(const struct lu_env *env,
2442                                                   struct osd_device *osd,
2443                                                   struct osd_object *pobj,
2444                                                   const struct lu_fid *fid,
2445                                                   struct thandle *th)
2446 {
2447         struct osd_thread_info  *info = osd_oti_get(env);
2448         struct inode            *local;
2449         struct osd_thandle      *oh;
2450         int                     rc;
2451         ENTRY;
2452
2453         LASSERT(th);
2454         oh = container_of(th, struct osd_thandle, ot_super);
2455         LASSERT(oh->ot_handle->h_transaction != NULL);
2456
2457         /* FIXME: Insert index api needs to know the mode of
2458          * the remote object. Just use S_IFDIR for now */
2459         local = ldiskfs_create_inode(oh->ot_handle, pobj->oo_inode, S_IFDIR);
2460         if (IS_ERR(local)) {
2461                 CERROR("%s: create local error %d\n", osd_name(osd),
2462                        (int)PTR_ERR(local));
2463                 RETURN(local);
2464         }
2465
2466         /* Set special LMA flag for local agent inode */
2467         rc = osd_ea_fid_set(info, local, fid, 0, LMAI_AGENT);
2468         if (rc != 0) {
2469                 CERROR("%s: set LMA for "DFID" remote inode failed: rc = %d\n",
2470                        osd_name(osd), PFID(fid), rc);
2471                 RETURN(ERR_PTR(rc));
2472         }
2473
2474         rc = osd_add_dot_dotdot_internal(info, local, pobj->oo_inode,
2475                 (const struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
2476                 (const struct dt_rec *)fid, oh);
2477         if (rc != 0) {
2478                 CERROR("%s: "DFID" add dot dotdot error: rc = %d\n",
2479                         osd_name(osd), PFID(fid), rc);
2480                 RETURN(ERR_PTR(rc));
2481         }
2482
2483         RETURN(local);
2484 }
2485
2486 /**
2487  * Delete local agent inode for remote entry
2488  */
2489 static int osd_delete_local_agent_inode(const struct lu_env *env,
2490                                         struct osd_device *osd,
2491                                         const struct lu_fid *fid,
2492                                         __u32 ino, struct osd_thandle *oh)
2493 {
2494         struct osd_thread_info  *oti = osd_oti_get(env);
2495         struct osd_inode_id     *id = &oti->oti_id;
2496         struct inode            *inode;
2497         ENTRY;
2498
2499         id->oii_ino = le32_to_cpu(ino);
2500         id->oii_gen = OSD_OII_NOGEN;
2501         inode = osd_iget(oti, osd, id);
2502         if (IS_ERR(inode)) {
2503                 CERROR("%s: iget error "DFID" id %u:%u\n", osd_name(osd),
2504                        PFID(fid), id->oii_ino, id->oii_gen);
2505                 RETURN(PTR_ERR(inode));
2506         }
2507
2508         clear_nlink(inode);
2509         mark_inode_dirty(inode);
2510         CDEBUG(D_INODE, "%s: delete remote inode "DFID" %lu\n",
2511                 osd_name(osd), PFID(fid), inode->i_ino);
2512         iput(inode);
2513         RETURN(0);
2514 }
2515
2516 /**
2517  * OSD layer object create function for interoperability mode (b11826).
2518  * This is mostly similar to osd_object_create(). Only difference being, fid is
2519  * inserted into inode ea here.
2520  *
2521  * \retval   0, on success
2522  * \retval -ve, on error
2523  */
2524 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2525                                 struct lu_attr *attr,
2526                                 struct dt_allocation_hint *hint,
2527                                 struct dt_object_format *dof,
2528                                 struct thandle *th)
2529 {
2530         const struct lu_fid    *fid    = lu_object_fid(&dt->do_lu);
2531         struct osd_object      *obj    = osd_dt_obj(dt);
2532         struct osd_thread_info *info   = osd_oti_get(env);
2533         int                     result;
2534
2535         ENTRY;
2536
2537         LASSERT(osd_invariant(obj));
2538         LASSERT(!dt_object_exists(dt) && !dt_object_remote(dt));
2539         LASSERT(osd_write_locked(env, obj));
2540         LASSERT(th != NULL);
2541
2542         if (unlikely(fid_is_acct(fid)))
2543                 /* Quota files can't be created from the kernel any more,
2544                  * 'tune2fs -O quota' will take care of creating them */
2545                 RETURN(-EPERM);
2546
2547         osd_trans_exec_op(env, th, OSD_OT_CREATE);
2548         osd_trans_declare_rb(env, th, OSD_OT_REF_ADD);
2549
2550         result = __osd_object_create(info, obj, attr, hint, dof, th);
2551         if (result == 0)
2552                 result = osd_ea_fid_set(info, obj->oo_inode, fid,
2553                                 fid_is_on_ost(info, osd_obj2dev(obj),
2554                                               fid, OI_CHECK_FLD) ?
2555                                 LMAC_FID_ON_OST : 0, 0);
2556
2557         if (result == 0)
2558                 result = __osd_oi_insert(env, obj, fid, th);
2559
2560         LASSERT(ergo(result == 0,
2561                      dt_object_exists(dt) && !dt_object_remote(dt)));
2562         LINVRNT(osd_invariant(obj));
2563         RETURN(result);
2564 }
2565
2566 static int osd_declare_object_ref_add(const struct lu_env *env,
2567                                       struct dt_object *dt,
2568                                       struct thandle *handle)
2569 {
2570         struct osd_thandle       *oh;
2571
2572         /* it's possible that object doesn't exist yet */
2573         LASSERT(handle != NULL);
2574
2575         oh = container_of0(handle, struct osd_thandle, ot_super);
2576         LASSERT(oh->ot_handle == NULL);
2577
2578         osd_trans_declare_op(env, oh, OSD_OT_REF_ADD,
2579                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2580
2581         return 0;
2582 }
2583
2584 /*
2585  * Concurrency: @dt is write locked.
2586  */
2587 static int osd_object_ref_add(const struct lu_env *env,
2588                               struct dt_object *dt, struct thandle *th)
2589 {
2590         struct osd_object *obj = osd_dt_obj(dt);
2591         struct inode      *inode = obj->oo_inode;
2592         bool               need_dirty = false;
2593         int                rc = 0;
2594
2595         LINVRNT(osd_invariant(obj));
2596         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2597         LASSERT(osd_write_locked(env, obj));
2598         LASSERT(th != NULL);
2599
2600         osd_trans_exec_op(env, th, OSD_OT_REF_ADD);
2601
2602         /* This based on ldiskfs_inc_count(), which is not exported.
2603          *
2604          * The DIR_NLINK feature allows directories to exceed LDISKFS_LINK_MAX
2605          * (65000) subdirectories by storing "1" in i_nlink if the link count
2606          * would otherwise overflow. Directory tranversal tools understand
2607          * that (st_nlink == 1) indicates that the filesystem dose not track
2608          * hard links count on the directory, and will not abort subdirectory
2609          * scanning early once (st_nlink - 2) subdirs have been found.
2610          *
2611          * This also has to properly handle the case of inodes with nlink == 0
2612          * in case they are being linked into the PENDING directory
2613          */
2614         spin_lock(&obj->oo_guard);
2615         if (unlikely(!S_ISDIR(inode->i_mode) &&
2616                      inode->i_nlink >= LDISKFS_LINK_MAX)) {
2617                 /* MDD should have checked this, but good to be safe */
2618                 rc = -EMLINK;
2619         } else if (unlikely(inode->i_nlink == 0 ||
2620                             (S_ISDIR(inode->i_mode) &&
2621                              inode->i_nlink >= LDISKFS_LINK_MAX))) {
2622                 /* inc_nlink from 0 may cause WARN_ON */
2623                 set_nlink(inode, 1);
2624                 need_dirty = true;
2625         } else if (!S_ISDIR(inode->i_mode) ||
2626                    (S_ISDIR(inode->i_mode) && inode->i_nlink >= 2)) {
2627                 inc_nlink(inode);
2628                 need_dirty = true;
2629         } /* else (S_ISDIR(inode->i_mode) && inode->i_nlink == 1) { ; } */
2630
2631         LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
2632         spin_unlock(&obj->oo_guard);
2633
2634         if (need_dirty)
2635                 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2636
2637         LINVRNT(osd_invariant(obj));
2638
2639         return rc;
2640 }
2641
2642 static int osd_declare_object_ref_del(const struct lu_env *env,
2643                                       struct dt_object *dt,
2644                                       struct thandle *handle)
2645 {
2646         struct osd_thandle *oh;
2647
2648         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2649         LASSERT(handle != NULL);
2650
2651         oh = container_of0(handle, struct osd_thandle, ot_super);
2652         LASSERT(oh->ot_handle == NULL);
2653
2654         osd_trans_declare_op(env, oh, OSD_OT_REF_DEL,
2655                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE]);
2656
2657         return 0;
2658 }
2659
2660 /*
2661  * Concurrency: @dt is write locked.
2662  */
2663 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2664                               struct thandle *th)
2665 {
2666         struct osd_object       *obj = osd_dt_obj(dt);
2667         struct inode            *inode = obj->oo_inode;
2668         struct osd_device       *osd = osd_dev(dt->do_lu.lo_dev);
2669
2670         LINVRNT(osd_invariant(obj));
2671         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2672         LASSERT(osd_write_locked(env, obj));
2673         LASSERT(th != NULL);
2674
2675         osd_trans_exec_op(env, th, OSD_OT_REF_DEL);
2676
2677         spin_lock(&obj->oo_guard);
2678         /* That can be result of upgrade from old Lustre version and
2679          * applied only to local files.  Just skip this ref_del call.
2680          * ext4_unlink() only treats this as a warning, don't LASSERT here.*/
2681         if (inode->i_nlink == 0) {
2682                 CDEBUG_LIMIT(fid_is_norm(lu_object_fid(&dt->do_lu)) ?
2683                              D_ERROR : D_INODE, "%s: nlink == 0 on "DFID
2684                              ", maybe an upgraded file? (LU-3915)\n",
2685                              osd_name(osd), PFID(lu_object_fid(&dt->do_lu)));
2686                 spin_unlock(&obj->oo_guard);
2687                 return 0;
2688         }
2689
2690         /* This based on ldiskfs_dec_count(), which is not exported.
2691          *
2692          * If a directory already has nlink == 1, then do not drop the nlink
2693          * count to 0, even temporarily, to avoid race conditions with other
2694          * threads not holding oo_guard seeing i_nlink == 0 in rare cases.
2695          *
2696          * nlink == 1 means the directory has/had > EXT4_LINK_MAX subdirs.
2697          */
2698         if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 1) {
2699                 drop_nlink(inode);
2700
2701                 spin_unlock(&obj->oo_guard);
2702                 ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2703                 LINVRNT(osd_invariant(obj));
2704         } else {
2705                 spin_unlock(&obj->oo_guard);
2706         }
2707
2708         return 0;
2709 }
2710
2711 /*
2712  * Get the 64-bit version for an inode.
2713  */
2714 static int osd_object_version_get(const struct lu_env *env,
2715                                   struct dt_object *dt, dt_obj_version_t *ver)
2716 {
2717         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2718
2719         CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2720                LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2721         *ver = LDISKFS_I(inode)->i_fs_version;
2722         return 0;
2723 }
2724
2725 /*
2726  * Concurrency: @dt is read locked.
2727  */
2728 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2729                          struct lu_buf *buf, const char *name,
2730                          struct lustre_capa *capa)
2731 {
2732         struct osd_object      *obj    = osd_dt_obj(dt);
2733         struct inode           *inode  = obj->oo_inode;
2734         struct osd_thread_info *info   = osd_oti_get(env);
2735         struct dentry          *dentry = &info->oti_obj_dentry;
2736
2737         /* version get is not real XATTR but uses xattr API */
2738         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2739                 /* for version we are just using xattr API but change inode
2740                  * field instead */
2741                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2742                 osd_object_version_get(env, dt, buf->lb_buf);
2743                 return sizeof(dt_obj_version_t);
2744         }
2745
2746         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2747         LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
2748
2749         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2750                 return -EACCES;
2751
2752         return __osd_xattr_get(inode, dentry, name, buf->lb_buf, buf->lb_len);
2753 }
2754
2755
2756 static int osd_declare_xattr_set(const struct lu_env *env,
2757                                  struct dt_object *dt,
2758                                  const struct lu_buf *buf, const char *name,
2759                                  int fl, struct thandle *handle)
2760 {
2761         struct osd_thandle *oh;
2762
2763         LASSERT(handle != NULL);
2764
2765         oh = container_of0(handle, struct osd_thandle, ot_super);
2766         LASSERT(oh->ot_handle == NULL);
2767
2768         osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2769                              strcmp(name, XATTR_NAME_VERSION) == 0 ?
2770                              osd_dto_credits_noquota[DTO_ATTR_SET_BASE] :
2771                              osd_dto_credits_noquota[DTO_XATTR_SET]);
2772
2773         return 0;
2774 }
2775
2776 /*
2777  * Set the 64-bit version for object
2778  */
2779 static void osd_object_version_set(const struct lu_env *env,
2780                                    struct dt_object *dt,
2781                                    dt_obj_version_t *new_version)
2782 {
2783         struct inode *inode = osd_dt_obj(dt)->oo_inode;
2784
2785         CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
2786                *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2787
2788         LDISKFS_I(inode)->i_fs_version = *new_version;
2789         /** Version is set after all inode operations are finished,
2790          *  so we should mark it dirty here */
2791         ll_dirty_inode(inode, I_DIRTY_DATASYNC);
2792 }
2793
2794 /*
2795  * Concurrency: @dt is write locked.
2796  */
2797 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2798                          const struct lu_buf *buf, const char *name, int fl,
2799                          struct thandle *handle, struct lustre_capa *capa)
2800 {
2801         struct osd_object      *obj      = osd_dt_obj(dt);
2802         struct inode           *inode    = obj->oo_inode;
2803         struct osd_thread_info *info     = osd_oti_get(env);
2804         int                     fs_flags = 0;
2805         ENTRY;
2806
2807         LASSERT(handle != NULL);
2808
2809         /* version set is not real XATTR */
2810         if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2811                 /* for version we are just using xattr API but change inode
2812                  * field instead */
2813                 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2814                 osd_object_version_set(env, dt, buf->lb_buf);
2815                 return sizeof(dt_obj_version_t);
2816         }
2817
2818         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2819                 return -EACCES;
2820
2821         osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
2822         if (fl & LU_XATTR_REPLACE)
2823                 fs_flags |= XATTR_REPLACE;
2824
2825         if (fl & LU_XATTR_CREATE)
2826                 fs_flags |= XATTR_CREATE;
2827
2828         return __osd_xattr_set(info, inode, name, buf->lb_buf, buf->lb_len,
2829                                fs_flags);
2830 }
2831
2832 /*
2833  * Concurrency: @dt is read locked.
2834  */
2835 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
2836                           struct lu_buf *buf, struct lustre_capa *capa)
2837 {
2838         struct osd_object      *obj    = osd_dt_obj(dt);
2839         struct inode           *inode  = obj->oo_inode;
2840         struct osd_thread_info *info   = osd_oti_get(env);
2841         struct dentry          *dentry = &info->oti_obj_dentry;
2842
2843         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2844         LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
2845         LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2846
2847         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2848                 return -EACCES;
2849
2850         dentry->d_inode = inode;
2851         dentry->d_sb = inode->i_sb;
2852         return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
2853 }
2854
2855 static int osd_declare_xattr_del(const struct lu_env *env,
2856                                  struct dt_object *dt, const char *name,
2857                                  struct thandle *handle)
2858 {
2859         struct osd_thandle *oh;
2860
2861         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2862         LASSERT(handle != NULL);
2863
2864         oh = container_of0(handle, struct osd_thandle, ot_super);
2865         LASSERT(oh->ot_handle == NULL);
2866
2867         osd_trans_declare_op(env, oh, OSD_OT_XATTR_SET,
2868                              osd_dto_credits_noquota[DTO_XATTR_SET]);
2869
2870         return 0;
2871 }
2872
2873 /*
2874  * Concurrency: @dt is write locked.
2875  */
2876 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
2877                          const char *name, struct thandle *handle,
2878                          struct lustre_capa *capa)
2879 {
2880         struct osd_object      *obj    = osd_dt_obj(dt);
2881         struct inode           *inode  = obj->oo_inode;
2882         struct osd_thread_info *info   = osd_oti_get(env);
2883         struct dentry          *dentry = &info->oti_obj_dentry;
2884         int                     rc;
2885
2886         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2887         LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
2888         LASSERT(handle != NULL);
2889
2890         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2891                 return -EACCES;
2892
2893         osd_trans_exec_op(env, handle, OSD_OT_XATTR_SET);
2894
2895         ll_vfs_dq_init(inode);
2896         dentry->d_inode = inode;
2897         dentry->d_sb = inode->i_sb;
2898         rc = inode->i_op->removexattr(dentry, name);
2899         return rc;
2900 }
2901
2902 static struct obd_capa *osd_capa_get(const struct lu_env *env,
2903                                      struct dt_object *dt,
2904                                      struct lustre_capa *old, __u64 opc)
2905 {
2906         struct osd_thread_info *info = osd_oti_get(env);
2907         const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2908         struct osd_object *obj = osd_dt_obj(dt);
2909         struct osd_device *osd = osd_obj2dev(obj);
2910         struct lustre_capa_key *key = &info->oti_capa_key;
2911         struct lustre_capa *capa = &info->oti_capa;
2912         struct obd_capa *oc;
2913         struct lu_capainfo *lci;
2914         int rc;
2915         ENTRY;
2916
2917         if (!osd->od_fl_capa)
2918                 RETURN(ERR_PTR(-ENOENT));
2919
2920         LASSERT(dt_object_exists(dt) && !dt_object_remote(dt));
2921         LINVRNT(osd_invariant(obj));
2922
2923         /* renewal sanity check */
2924         if (old && osd_object_auth(env, dt, old, opc))
2925                 RETURN(ERR_PTR(-EACCES));
2926
2927         lci = lu_capainfo_get(env);
2928         if (unlikely(lci == NULL))
2929                 RETURN(ERR_PTR(-ENOENT));
2930
2931         switch (lci->lci_auth) {
2932         case LC_ID_NONE:
2933                 RETURN(NULL);
2934         case LC_ID_PLAIN:
2935                 capa->lc_uid = obj->oo_inode->i_uid;
2936                 capa->lc_gid = obj->oo_inode->i_gid;
2937                 capa->lc_flags = LC_ID_PLAIN;
2938                 break;
2939         case LC_ID_CONVERT: {
2940                 __u32 d[4], s[4];
2941
2942                 s[0] = obj->oo_inode->i_uid;
2943                 cfs_get_random_bytes(&(s[1]), sizeof(__u32));
2944                 s[2] = obj->oo_inode->i_gid;
2945                 cfs_get_random_bytes(&(s[3]), sizeof(__u32));
2946                 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
2947                 if (unlikely(rc))
2948                         RETURN(ERR_PTR(rc));
2949
2950                 capa->lc_uid   = ((__u64)d[1] << 32) | d[0];
2951                 capa->lc_gid   = ((__u64)d[3] << 32) | d[2];
2952                 capa->lc_flags = LC_ID_CONVERT;
2953                 break;
2954         }
2955         default:
2956                 RETURN(ERR_PTR(-EINVAL));
2957         }
2958
2959         capa->lc_fid = *fid;
2960         capa->lc_opc = opc;
2961         capa->lc_flags |= osd->od_capa_alg << 24;
2962         capa->lc_timeout = osd->od_capa_timeout;
2963         capa->lc_expiry = 0;
2964
2965         oc = capa_lookup(osd->od_capa_hash, capa, 1);
2966         if (oc) {
2967                 LASSERT(!capa_is_expired(oc));
2968                 RETURN(oc);
2969         }
2970
2971         spin_lock(&capa_lock);
2972         *key = osd->od_capa_keys[1];
2973         spin_unlock(&capa_lock);
2974
2975         capa->lc_keyid = key->lk_keyid;
2976         capa->lc_expiry = cfs_time_current_sec() + osd->od_capa_timeout;
2977
2978         rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
2979         if (rc) {
2980                 DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
2981                 RETURN(ERR_PTR(rc));
2982         }
2983
2984         oc = capa_add(osd->od_capa_hash, capa);
2985         RETURN(oc);
2986 }
2987
2988 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
2989 {
2990         struct osd_object       *obj    = osd_dt_obj(dt);
2991         struct inode            *inode  = obj->oo_inode;
2992         struct osd_thread_info  *info   = osd_oti_get(env);
2993         struct dentry           *dentry = &info->oti_obj_dentry;
2994         struct file             *file   = &info->oti_file;
2995         int                     rc;
2996
2997         ENTRY;
2998
2999         dentry->d_inode = inode;
3000         dentry->d_sb = inode->i_sb;
3001         file->f_dentry = dentry;
3002         file->f_mapping = inode->i_mapping;
3003         file->f_op = inode->i_fop;
3004 #ifndef HAVE_FILE_FSYNC_4ARGS
3005         mutex_lock(&inode->i_mutex);
3006 #endif
3007         rc = do_fsync(file, 0);
3008 #ifndef HAVE_FILE_FSYNC_4ARGS
3009         mutex_unlock(&inode->i_mutex);
3010 #endif
3011         RETURN(rc);
3012 }
3013
3014 static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
3015                         void **data)
3016 {
3017         struct osd_object *obj = osd_dt_obj(dt);
3018         ENTRY;
3019
3020         *data = (void *)obj->oo_inode;
3021         RETURN(0);
3022 }
3023
3024 /*
3025  * Index operations.
3026  */
3027
3028 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
3029                            const struct dt_index_features *feat)
3030 {
3031         struct iam_descr *descr;
3032
3033         if (osd_object_is_root(o))
3034                 return feat == &dt_directory_features;
3035
3036         LASSERT(o->oo_dir != NULL);
3037
3038         descr = o->oo_dir->od_container.ic_descr;
3039         if (feat == &dt_directory_features) {
3040                 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
3041                         return 1;
3042                 else
3043                         return 0;
3044         } else {
3045                 return
3046                         feat->dif_keysize_min <= descr->id_key_size &&
3047                         descr->id_key_size <= feat->dif_keysize_max &&
3048                         feat->dif_recsize_min <= descr->id_rec_size &&
3049                         descr->id_rec_size <= feat->dif_recsize_max &&
3050                         !(feat->dif_flags & (DT_IND_VARKEY |
3051                                              DT_IND_VARREC | DT_IND_NONUNQ)) &&
3052                         ergo(feat->dif_flags & DT_IND_UPDATE,
3053                              1 /* XXX check that object (and file system) is
3054                                 * writable */);
3055         }
3056 }
3057
3058 static int osd_iam_container_init(const struct lu_env *env,
3059                                   struct osd_object *obj,
3060                                   struct osd_directory *dir)
3061 {
3062         struct iam_container *bag = &dir->od_container;
3063         int result;
3064
3065         result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
3066         if (result != 0)
3067                 return result;
3068
3069         result = iam_container_setup(bag);
3070         if (result == 0)
3071                 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
3072         else
3073                 iam_container_fini(bag);
3074
3075         return result;
3076 }
3077
3078
3079 /*
3080  * Concurrency: no external locking is necessary.
3081  */
3082 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
3083                          const struct dt_index_features *feat)
3084 {
3085         int                      result;
3086         int                      skip_iam = 0;
3087         struct osd_object       *obj = osd_dt_obj(dt);
3088
3089         LINVRNT(osd_invariant(obj));
3090
3091         if (osd_object_is_root(obj)) {
3092                 dt->do_index_ops = &osd_index_ea_ops;
3093                 result = 0;
3094         } else if (feat == &dt_directory_features) {
3095                 dt->do_index_ops = &osd_index_ea_ops;
3096                 if (obj->oo_inode != NULL && S_ISDIR(obj->oo_inode->i_mode))
3097                         result = 0;
3098                 else
3099                         result = -ENOTDIR;
3100                 skip_iam = 1;
3101         } else if (unlikely(feat == &dt_otable_features)) {
3102                 dt->do_index_ops = &osd_otable_ops;
3103                 return 0;
3104         } else if (unlikely(feat == &dt_acct_features)) {
3105                 dt->do_index_ops = &osd_acct_index_ops;
3106                 result = 0;
3107                 skip_iam = 1;
3108         } else if (!osd_has_index(obj)) {
3109                 struct osd_directory *dir;
3110
3111                 OBD_ALLOC_PTR(dir);
3112                 if (dir != NULL) {
3113
3114                         spin_lock(&obj->oo_guard);
3115                         if (obj->oo_dir == NULL)
3116                                 obj->oo_dir = dir;
3117                         else
3118                                 /*
3119                                  * Concurrent thread allocated container data.
3120                                  */
3121                                 OBD_FREE_PTR(dir);
3122                         spin_unlock(&obj->oo_guard);
3123                         /*
3124                          * Now, that we have container data, serialize its
3125                          * initialization.
3126                          */
3127                         down_write(&obj->oo_ext_idx_sem);
3128                         /*
3129                          * recheck under lock.
3130                          */
3131                         if (!osd_has_index(obj))
3132                                 result = osd_iam_container_init(env, obj, dir);
3133                         else
3134                                 result = 0;
3135                         up_write(&obj->oo_ext_idx_sem);
3136                 } else {
3137                         result = -ENOMEM;
3138                 }
3139         } else {
3140                 result = 0;
3141         }
3142
3143         if (result == 0 && skip_iam == 0) {
3144                 if (!osd_iam_index_probe(env, obj, feat))
3145                         result = -ENOTDIR;
3146         }
3147         LINVRNT(osd_invariant(obj));
3148
3149         if (result == 0 && is_quota_glb_feat(feat) &&
3150             fid_seq(lu_object_fid(&dt->do_lu)) == FID_SEQ_QUOTA_GLB)
3151                 result = osd_quota_migration(env, dt, feat);
3152
3153         return result;
3154 }
3155
3156 static int osd_otable_it_attr_get(const struct lu_env *env,
3157                                  struct dt_object *dt,
3158                                  struct lu_attr *attr,
3159                                  struct lustre_capa *capa)
3160 {
3161         attr->la_valid = 0;
3162         return 0;
3163 }
3164
3165 static const struct dt_object_operations osd_obj_ops = {
3166         .do_read_lock         = osd_object_read_lock,
3167         .do_write_lock        = osd_object_write_lock,
3168         .do_read_unlock       = osd_object_read_unlock,
3169         .do_write_unlock      = osd_object_write_unlock,
3170         .do_write_locked      = osd_object_write_locked,
3171         .do_attr_get          = osd_attr_get,
3172         .do_declare_attr_set  = osd_declare_attr_set,
3173         .do_attr_set          = osd_attr_set,
3174         .do_ah_init           = osd_ah_init,
3175         .do_declare_create    = osd_declare_object_create,
3176         .do_create            = osd_object_create,
3177         .do_declare_destroy   = osd_declare_object_destroy,
3178         .do_destroy           = osd_object_destroy,
3179         .do_index_try         = osd_index_try,
3180         .do_declare_ref_add   = osd_declare_object_ref_add,
3181         .do_ref_add           = osd_object_ref_add,
3182         .do_declare_ref_del   = osd_declare_object_ref_del,