Whamcloud - gitweb
LU-10189 osd: handle PFID EA in LMA properly
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd-zfs/osd_object.c
33  *
34  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35  * Author: Mike Pershin <tappro@whamcloud.com>
36  * Author: Johann Lombardi <johann@whamcloud.com>
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
44 #include <obd.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
48
49 #include "osd_internal.h"
50
51 #include <sys/dnode.h>
52 #include <sys/dbuf.h>
53 #include <sys/spa.h>
54 #include <sys/stat.h>
55 #include <sys/zap.h>
56 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/dmu_tx.h>
59 #include <sys/dmu_objset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/sa_impl.h>
62 #include <sys/txg.h>
63
64 char *osd_obj_tag = "osd_object";
65
66 static struct dt_object_operations osd_obj_ops;
67 static struct lu_object_operations osd_lu_obj_ops;
68 extern struct dt_body_operations osd_body_ops;
69 static struct dt_object_operations osd_obj_otable_it_ops;
70
71 extern struct kmem_cache *osd_object_kmem;
72
73 static void
74 osd_object_sa_fini(struct osd_object *obj)
75 {
76         if (obj->oo_sa_hdl) {
77                 sa_handle_destroy(obj->oo_sa_hdl);
78                 obj->oo_sa_hdl = NULL;
79         }
80 }
81
82 static int
83 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
84 {
85         int rc;
86
87         LASSERT(obj->oo_sa_hdl == NULL);
88         LASSERT(obj->oo_dn != NULL);
89
90         rc = osd_sa_handle_get(obj);
91         if (rc)
92                 return rc;
93
94         /* Cache the xattr object id, valid for the life of the object */
95         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
96         if (rc == -ENOENT) {
97                 obj->oo_xattr = ZFS_NO_OBJECT;
98                 rc = 0;
99         } else if (rc) {
100                 osd_object_sa_fini(obj);
101         }
102
103         return rc;
104 }
105
106 /*
107  * Add object to list of dirty objects in tx handle.
108  */
109 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
110 {
111         if (!list_empty(&obj->oo_sa_linkage))
112                 return;
113
114         write_lock(&obj->oo_attr_lock);
115         if (likely(list_empty(&obj->oo_sa_linkage)))
116                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
117         write_unlock(&obj->oo_attr_lock);
118 }
119
120 /*
121  * Release spill block dbuf hold for all dirty SAs.
122  */
123 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
124 {
125         struct osd_object *obj;
126
127         while (!list_empty(&oh->ot_sa_list)) {
128                 obj = list_entry(oh->ot_sa_list.next,
129                                  struct osd_object, oo_sa_linkage);
130                 write_lock(&obj->oo_attr_lock);
131                 list_del_init(&obj->oo_sa_linkage);
132                 write_unlock(&obj->oo_attr_lock);
133                 if (obj->oo_late_xattr) {
134                         /*
135                          * take oo_guard to protect oo_sa_xattr buffer
136                          * from concurrent update by osd_xattr_set()
137                          */
138                         LASSERT(oh->ot_assigned != 0);
139                         down_write(&obj->oo_guard);
140                         if (obj->oo_late_attr_set)
141                                 __osd_sa_attr_init(env, obj, oh);
142                         else if (obj->oo_late_xattr)
143                                 __osd_sa_xattr_update(env, obj, oh);
144                         up_write(&obj->oo_guard);
145                 }
146                 sa_spill_rele(obj->oo_sa_hdl);
147         }
148 }
149
150 /*
151  * Update the SA and add the object to the dirty list.
152  */
153 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
154                          void *buf, uint32_t buflen, struct osd_thandle *oh)
155 {
156         int rc;
157
158         LASSERT(obj->oo_sa_hdl != NULL);
159         LASSERT(oh->ot_tx != NULL);
160
161         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
162         osd_object_sa_dirty_add(obj, oh);
163
164         return rc;
165 }
166
167 /*
168  * Bulk update the SA and add the object to the dirty list.
169  */
170 static int
171 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
172                           int count, struct osd_thandle *oh)
173 {
174         int rc;
175
176         LASSERT(obj->oo_sa_hdl != NULL);
177         LASSERT(oh->ot_tx != NULL);
178
179         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
180         osd_object_sa_dirty_add(obj, oh);
181
182         return rc;
183 }
184
185 /*
186  * Retrieve the attributes of a DMU object
187  */
188 int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
189                           struct osd_object *obj, struct lu_attr *la)
190 {
191         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
192         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
193         int              cnt = 0;
194         int              rc;
195         ENTRY;
196
197         LASSERT(obj->oo_dn != NULL);
198
199         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
200                         LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
201
202         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
203         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
204         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
205         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
206         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
207         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
208         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
209         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
210         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
211         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
212
213         rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
214         if (rc)
215                 GOTO(out_sa, rc);
216
217 #ifdef ZFS_PROJINHERIT
218         if (o->od_projectused_dn && osa->flags & ZFS_PROJID) {
219                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(o),
220                                 &osa->projid, 8);
221                 if (rc)
222                         GOTO(out_sa, rc);
223
224                 la->la_projid = osa->projid;
225                 la->la_valid |= LA_PROJID;
226                 obj->oo_with_projid = 1;
227         } else {
228                 la->la_projid = ZFS_DEFAULT_PROJID;
229                 la->la_valid &= ~LA_PROJID;
230         }
231 #else
232         la->la_projid = 0;
233         la->la_valid &= ~LA_PROJID;
234 #endif
235
236         la->la_atime = osa->atime[0];
237         la->la_mtime = osa->mtime[0];
238         la->la_ctime = osa->ctime[0];
239         la->la_mode = osa->mode;
240         la->la_uid = osa->uid;
241         la->la_gid = osa->gid;
242         la->la_nlink = osa->nlink;
243         la->la_flags = attrs_zfs2fs(osa->flags);
244         la->la_size = osa->size;
245
246         /* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
247          * flags is stored in LMA, and it is only for orphan directory */
248         if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
249                 struct osd_thread_info *info = osd_oti_get(env);
250                 struct lustre_mdt_attrs *lma;
251                 struct lu_buf buf;
252
253                 lma = (struct lustre_mdt_attrs *)info->oti_buf;
254                 buf.lb_buf = lma;
255                 buf.lb_len = sizeof(info->oti_buf);
256                 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
257                 if (rc > 0) {
258                         rc = 0;
259                         lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
260                         obj->oo_lma_flags =
261                                 lma_to_lustre_flags(lma->lma_incompat);
262
263                 } else if (rc == -ENODATA) {
264                         rc = 0;
265                 }
266         }
267
268         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
269                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
270                 if (rc)
271                         GOTO(out_sa, rc);
272                 la->la_rdev = osa->rdev;
273                 la->la_valid |= LA_RDEV;
274         }
275 out_sa:
276
277         RETURN(rc);
278 }
279
280 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
281 {
282         dmu_buf_t *db;
283         dmu_buf_impl_t *dbi;
284         int rc;
285
286         rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
287         if (rc)
288                 return rc;
289
290         dbi = (dmu_buf_impl_t *)db;
291         DB_DNODE_ENTER(dbi);
292         *dnp = DB_DNODE(dbi);
293         LASSERT(*dnp != NULL);
294
295         return 0;
296 }
297
298 /*
299  * Concurrency: no concurrent access is possible that early in object
300  * life-cycle.
301  */
302 struct lu_object *osd_object_alloc(const struct lu_env *env,
303                                    const struct lu_object_header *hdr,
304                                    struct lu_device *d)
305 {
306         struct osd_object *mo;
307
308         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
309         if (mo != NULL) {
310                 struct lu_object *l;
311
312                 l = &mo->oo_dt.do_lu;
313                 dt_object_init(&mo->oo_dt, NULL, d);
314                 mo->oo_dt.do_ops = &osd_obj_ops;
315                 l->lo_ops = &osd_lu_obj_ops;
316                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
317                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
318                 init_rwsem(&mo->oo_sem);
319                 init_rwsem(&mo->oo_guard);
320                 rwlock_init(&mo->oo_attr_lock);
321                 mo->oo_destroy = OSD_DESTROY_NONE;
322                 return l;
323         } else {
324                 return NULL;
325         }
326 }
327
328 static void osd_obj_set_blksize(const struct lu_env *env,
329                                 struct osd_device *osd, struct osd_object *obj)
330 {
331         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
332         dmu_tx_t *tx;
333         dnode_t *dn = obj->oo_dn;
334         uint32_t blksz;
335         int rc = 0;
336         ENTRY;
337
338         LASSERT(!osd_oti_get(env)->oti_in_trans);
339
340         tx = dmu_tx_create(osd->od_os);
341         if (!tx) {
342                 CERROR("%s: fail to create tx to set blksize for "DFID"\n",
343                        osd->od_svname, PFID(fid));
344                 RETURN_EXIT;
345         }
346
347         dmu_tx_hold_bonus(tx, dn->dn_object);
348         rc = -dmu_tx_assign(tx, TXG_WAIT);
349         if (rc) {
350                 dmu_tx_abort(tx);
351                 CERROR("%s: fail to assign tx to set blksize for "DFID
352                        ": rc = %d\n", osd->od_svname, PFID(fid), rc);
353                 RETURN_EXIT;
354         }
355
356         down_write(&obj->oo_guard);
357         if (unlikely((1 << dn->dn_datablkshift) >= PAGE_SIZE))
358                 GOTO(out, rc = 1);
359
360         blksz = dn->dn_datablksz;
361         if (!is_power_of_2(blksz))
362                 blksz = size_roundup_power2(blksz);
363
364         if (blksz > osd->od_max_blksz)
365                 blksz = osd->od_max_blksz;
366         else if (blksz < PAGE_SIZE)
367                 blksz = PAGE_SIZE;
368         rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object, blksz, 0, tx);
369
370         GOTO(out, rc);
371
372 out:
373         up_write(&obj->oo_guard);
374         if (rc) {
375                 dmu_tx_abort(tx);
376                 if (unlikely(obj->oo_dn->dn_maxblkid > 0))
377                         rc = 1;
378                 if (rc < 0)
379                         CERROR("%s: fail to set blksize for "DFID": rc = %d\n",
380                                osd->od_svname, PFID(fid), rc);
381         } else {
382                 dmu_tx_commit(tx);
383                 CDEBUG(D_INODE, "%s: set blksize as %u for "DFID"\n",
384                        osd->od_svname, blksz, PFID(fid));
385         }
386 }
387
388 /*
389  * Concurrency: shouldn't matter.
390  */
391 int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
392 {
393         struct osd_device       *osd = osd_obj2dev(obj);
394         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
395         int                      rc = 0;
396         ENTRY;
397
398         LASSERT(obj->oo_dn);
399
400         rc = osd_object_sa_init(obj, osd);
401         if (rc)
402                 RETURN(rc);
403
404         /* cache attrs in object */
405         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
406         if (rc)
407                 RETURN(rc);
408
409         if (likely(!fid_is_acct(fid))) {
410                 /* no body operations for accounting objects */
411                 obj->oo_dt.do_body_ops = &osd_body_ops;
412
413                 if (S_ISREG(obj->oo_attr.la_mode) &&
414                     obj->oo_dn->dn_maxblkid == 0 &&
415                     (1 << obj->oo_dn->dn_datablkshift) < PAGE_SIZE &&
416                     (fid_is_idif(fid) || fid_is_norm(fid) ||
417                      fid_is_echo(fid)) &&
418                     osd->od_is_ost && !osd->od_dt_dev.dd_rdonly)
419                         osd_obj_set_blksize(env, osd, obj);
420         }
421
422         /*
423          * initialize object before marking it existing
424          */
425         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
426
427         smp_mb();
428         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
429
430         RETURN(0);
431 }
432
433 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
434 {
435         struct osd_thread_info  *info = osd_oti_get(env);
436         struct lu_buf           buf;
437         int                     rc;
438         struct lustre_mdt_attrs *lma;
439         ENTRY;
440
441         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
442         lma = (struct lustre_mdt_attrs *)info->oti_buf;
443         buf.lb_buf = lma;
444         buf.lb_len = sizeof(info->oti_buf);
445
446         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
447         if (rc > 0) {
448                 rc = 0;
449                 lustre_lma_swab(lma);
450                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
451                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
452                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
453                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
454                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
455                               PFID(lu_object_fid(&obj->oo_dt.do_lu)));
456                         rc = -EOPNOTSUPP;
457                 } else {
458                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
459                             osd_obj2dev(obj)->od_is_ost)
460                                 obj->oo_pfid_in_lma = 1;
461                 }
462         } else if (rc == -ENODATA) {
463                 /* haven't initialize LMA xattr */
464                 rc = 0;
465         }
466
467         RETURN(rc);
468 }
469
470 /**
471  * Helper function to retrieve DMU object id from fid for accounting object
472  */
473 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
474                                   const struct lu_fid *fid)
475 {
476         dnode_t *dn = NULL;
477
478         LASSERT(fid_is_acct(fid));
479
480         switch (fid_oid(fid)) {
481         case ACCT_USER_OID:
482                 dn = osd->od_userused_dn;
483                 break;
484         case ACCT_GROUP_OID:
485                 dn = osd->od_groupused_dn;
486                 break;
487 #ifdef ZFS_PROJINHERIT
488         case ACCT_PROJECT_OID:
489                 dn = osd->od_projectused_dn;
490                 break;
491 #endif
492         default:
493                 break;
494         }
495
496         return dn;
497 }
498
499 /*
500  * Concurrency: no concurrent access is possible that early in object
501  * life-cycle.
502  */
503 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
504                            const struct lu_object_conf *conf)
505 {
506         struct osd_object *obj = osd_obj(l);
507         struct osd_device *osd = osd_obj2dev(obj);
508         const struct lu_fid *fid = lu_object_fid(l);
509         uint64_t oid;
510         int rc = 0;
511         ENTRY;
512
513         LASSERT(osd_invariant(obj));
514
515         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
516                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
517                 l->lo_header->loh_attr |= LOHA_EXISTS;
518                 RETURN(0);
519         }
520
521         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
522                 GOTO(out, rc = 0);
523
524         if (unlikely(fid_is_acct(fid))) {
525                 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
526                 if (obj->oo_dn) {
527                         obj->oo_dt.do_index_ops = &osd_acct_index_ops;
528                         l->lo_header->loh_attr |= LOHA_EXISTS;
529                 }
530
531                 GOTO(out, rc = 0);
532         }
533
534         rc = osd_fid_lookup(env, osd, fid, &oid);
535         if (rc == 0) {
536                 LASSERT(obj->oo_dn == NULL);
537                 rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
538                 /* EEXIST will be returned if object is being deleted in ZFS */
539                 if (rc == -EEXIST) {
540                         rc = 0;
541                         GOTO(out, rc);
542                 }
543                 if (rc != 0) {
544                         CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
545                                osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
546                         GOTO(out, rc);
547                 }
548                 rc = osd_object_init0(env, obj);
549                 if (rc != 0)
550                         GOTO(out, rc);
551
552                 rc = osd_check_lma(env, obj);
553                 if (rc != 0)
554                         GOTO(out, rc);
555         } else if (rc == -ENOENT) {
556                 rc = 0;
557         }
558         LASSERT(osd_invariant(obj));
559 out:
560         RETURN(rc);
561 }
562
563 /*
564  * Concurrency: no concurrent access is possible that late in object
565  * life-cycle.
566  */
567 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
568 {
569         struct osd_object *obj = osd_obj(l);
570
571         LASSERT(osd_invariant(obj));
572
573         dt_object_fini(&obj->oo_dt);
574         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
575 }
576
577 static int
578 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
579 {
580         int rc = -EBUSY;
581
582         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
583
584         /* the object is supposed to be exclusively locked by
585          * the caller (osd_destroy()), while the transaction
586          * (oh) is per-thread and not shared */
587         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
588                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
589                 rc = 0;
590         }
591
592         return rc;
593 }
594
595 /* Default to max data size covered by a level-1 indirect block */
596 static unsigned long osd_sync_destroy_max_size =
597         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
598 module_param(osd_sync_destroy_max_size, ulong, 0444);
599 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
600
601 static inline void
602 osd_object_set_destroy_type(struct osd_object *obj)
603 {
604         /*
605          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
606          * only once and use it consistently thereafter.
607          */
608         down_write(&obj->oo_guard);
609         if (obj->oo_destroy == OSD_DESTROY_NONE) {
610                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
611                         obj->oo_destroy = OSD_DESTROY_SYNC;
612                 else /* Larger objects are destroyed asynchronously */
613                         obj->oo_destroy = OSD_DESTROY_ASYNC;
614         }
615         up_write(&obj->oo_guard);
616 }
617
618 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
619                                struct thandle *th)
620 {
621         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
622         struct osd_object       *obj = osd_dt_obj(dt);
623         struct osd_device       *osd = osd_obj2dev(obj);
624         struct osd_thandle      *oh;
625         dnode_t *dn;
626         int                      rc;
627         uint64_t                 zapid;
628         ENTRY;
629
630         LASSERT(th != NULL);
631         LASSERT(dt_object_exists(dt));
632
633         oh = container_of0(th, struct osd_thandle, ot_super);
634         LASSERT(oh->ot_tx != NULL);
635
636         /* declare that we'll remove object from fid-dnode mapping */
637         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
638         osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
639
640         osd_declare_xattrs_destroy(env, obj, oh);
641
642         /* one less inode */
643         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
644                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
645                                -1, oh, NULL, OSD_QID_INODE);
646         if (rc)
647                 RETURN(rc);
648
649         /* data to be truncated */
650         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
651                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
652                                0, oh, NULL, OSD_QID_BLK);
653         if (rc)
654                 RETURN(rc);
655
656         osd_object_set_destroy_type(obj);
657         if (obj->oo_destroy == OSD_DESTROY_SYNC)
658                 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
659                                  0, DMU_OBJECT_END);
660         else
661                 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
662                                 osd->od_unlinked, TRUE, NULL);
663
664         /* will help to find FID->ino when this object is being
665          * added to PENDING/ */
666         osd_idc_find_and_init(env, osd, obj);
667
668         RETURN(0);
669 }
670
671 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
672                        struct thandle *th)
673 {
674         struct osd_thread_info  *info = osd_oti_get(env);
675         char                    *buf = info->oti_str;
676         struct osd_object       *obj = osd_dt_obj(dt);
677         struct osd_device       *osd = osd_obj2dev(obj);
678         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
679         struct osd_thandle      *oh;
680         int                      rc;
681         uint64_t                 oid, zapid;
682         dnode_t *zdn;
683         ENTRY;
684
685         down_write(&obj->oo_guard);
686
687         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
688                 GOTO(out, rc = -ENOENT);
689
690         LASSERT(obj->oo_dn != NULL);
691
692         oh = container_of0(th, struct osd_thandle, ot_super);
693         LASSERT(oh != NULL);
694         LASSERT(oh->ot_tx != NULL);
695
696         /* remove obj ref from index dir (it depends) */
697         zapid = osd_get_name_n_idx(env, osd, fid, buf,
698                                    sizeof(info->oti_str), &zdn);
699         rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
700         if (rc) {
701                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
702                        osd->od_svname, buf, rc);
703                 GOTO(out, rc);
704         }
705
706         rc = osd_xattrs_destroy(env, obj, oh);
707         if (rc) {
708                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
709                        osd->od_svname, buf, rc);
710                 GOTO(out, rc);
711         }
712
713         oid = obj->oo_dn->dn_object;
714         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
715                 /* this may happen if the destroy wasn't declared
716                  * e.g. when the object is created and then destroyed
717                  * in the same transaction - we don't need additional
718                  * space for destroy specifically */
719                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
720                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
721                 if (rc)
722                         CERROR("%s: failed to free %s %llu: rc = %d\n",
723                                osd->od_svname, buf, oid, rc);
724         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
725                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
726                 if (rc)
727                         CERROR("%s: failed to free %s %llu: rc = %d\n",
728                                osd->od_svname, buf, oid, rc);
729         } else { /* asynchronous destroy */
730                 char *key = info->oti_key;
731
732                 rc = osd_object_unlinked_add(obj, oh);
733                 if (rc)
734                         GOTO(out, rc);
735
736                 snprintf(key, sizeof(info->oti_key), "%llx", oid);
737                 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
738                                  osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
739                 if (rc)
740                         CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
741                                osd->od_svname, buf, oid, rc);
742         }
743
744 out:
745         /* not needed in the cache anymore */
746         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
747         if (rc == 0)
748                 obj->oo_destroyed = 1;
749         up_write(&obj->oo_guard);
750         RETURN (0);
751 }
752
753 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
754 {
755         struct osd_object *obj = osd_obj(l);
756         const struct lu_fid *fid = lu_object_fid(l);
757
758         if (obj->oo_dn) {
759                 if (likely(!fid_is_acct(fid))) {
760                         osd_object_sa_fini(obj);
761                         if (obj->oo_sa_xattr) {
762                                 nvlist_free(obj->oo_sa_xattr);
763                                 obj->oo_sa_xattr = NULL;
764                         }
765                         osd_dnode_rele(obj->oo_dn);
766                         list_del(&obj->oo_sa_linkage);
767                 }
768                 obj->oo_dn = NULL;
769         }
770 }
771
772 /*
773  * Concurrency: ->loo_object_release() is called under site spin-lock.
774  */
775 static void osd_object_release(const struct lu_env *env,
776                                struct lu_object *l)
777 {
778 }
779
780 /*
781  * Concurrency: shouldn't matter.
782  */
783 static int osd_object_print(const struct lu_env *env, void *cookie,
784                             lu_printer_t p, const struct lu_object *l)
785 {
786         struct osd_object *o = osd_obj(l);
787
788         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
789 }
790
791 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
792                           unsigned role)
793 {
794         struct osd_object *obj = osd_dt_obj(dt);
795
796         LASSERT(osd_invariant(obj));
797
798         down_read_nested(&obj->oo_sem, role);
799 }
800
801 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
802                            unsigned role)
803 {
804         struct osd_object *obj = osd_dt_obj(dt);
805
806         LASSERT(osd_invariant(obj));
807
808         down_write_nested(&obj->oo_sem, role);
809 }
810
811 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
812 {
813         struct osd_object *obj = osd_dt_obj(dt);
814
815         LASSERT(osd_invariant(obj));
816         up_read(&obj->oo_sem);
817 }
818
819 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
820 {
821         struct osd_object *obj = osd_dt_obj(dt);
822
823         LASSERT(osd_invariant(obj));
824         up_write(&obj->oo_sem);
825 }
826
827 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
828 {
829         struct osd_object *obj = osd_dt_obj(dt);
830         int rc = 1;
831
832         LASSERT(osd_invariant(obj));
833
834         if (down_write_trylock(&obj->oo_sem)) {
835                 rc = 0;
836                 up_write(&obj->oo_sem);
837         }
838         return rc;
839 }
840
841 static int osd_attr_get(const struct lu_env *env,
842                         struct dt_object *dt,
843                         struct lu_attr *attr)
844 {
845         struct osd_object       *obj = osd_dt_obj(dt);
846         uint64_t                 blocks;
847         uint32_t                 blksize;
848         int                      rc = 0;
849
850         down_read(&obj->oo_guard);
851
852         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
853                 GOTO(out, rc = -ENOENT);
854
855         if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
856                 GOTO(out, rc = 0);
857
858         LASSERT(osd_invariant(obj));
859         LASSERT(obj->oo_dn);
860
861         read_lock(&obj->oo_attr_lock);
862         *attr = obj->oo_attr;
863         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
864                 attr->la_flags |= LUSTRE_ORPHAN_FL;
865         read_unlock(&obj->oo_attr_lock);
866
867         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
868          * from within sa_object_size() can block on a mutex, so
869          * we can't call sa_object_size() holding rwlock */
870         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
871         /* we do not control size of indices, so always calculate
872          * it from number of blocks reported by DMU */
873         if (S_ISDIR(attr->la_mode))
874                 attr->la_size = 512 * blocks;
875         /* Block size may be not set; suggest maximal I/O transfers. */
876         if (blksize == 0)
877                 blksize = osd_spa_maxblocksize(
878                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
879
880         attr->la_blksize = blksize;
881         attr->la_blocks = blocks;
882         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
883
884 out:
885         up_read(&obj->oo_guard);
886         return rc;
887 }
888
889 /* Simple wrapper on top of qsd API which implement quota transfer for osd
890  * setattr needs. As a reminder, only the root user can change ownership of
891  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
892 static inline int qsd_transfer(const struct lu_env *env,
893                                struct qsd_instance *qsd,
894                                struct lquota_trans *trans, int qtype,
895                                __u64 orig_id, __u64 new_id, __u64 bspace,
896                                struct lquota_id_info *qi)
897 {
898         int     rc;
899
900         if (unlikely(qsd == NULL))
901                 return 0;
902
903         LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
904         qi->lqi_type = qtype;
905
906         /* inode accounting */
907         qi->lqi_is_blk = false;
908
909         /* one more inode for the new owner ... */
910         qi->lqi_id.qid_uid = new_id;
911         qi->lqi_space      = 1;
912         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
913         if (rc == -EDQUOT || rc == -EINPROGRESS)
914                 rc = 0;
915         if (rc)
916                 return rc;
917
918         /* and one less inode for the current id */
919         qi->lqi_id.qid_uid = orig_id;;
920         qi->lqi_space      = -1;
921         /* can't get EDQUOT when reducing usage */
922         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
923         if (rc == -EINPROGRESS)
924                 rc = 0;
925         if (rc)
926                 return rc;
927
928         /* block accounting */
929         qi->lqi_is_blk = true;
930
931         /* more blocks for the new owner ... */
932         qi->lqi_id.qid_uid = new_id;
933         qi->lqi_space      = bspace;
934         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
935         if (rc == -EDQUOT || rc == -EINPROGRESS)
936                 rc = 0;
937         if (rc)
938                 return rc;
939
940         /* and finally less blocks for the current owner */
941         qi->lqi_id.qid_uid = orig_id;
942         qi->lqi_space      = -bspace;
943         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
944         /* can't get EDQUOT when reducing usage */
945         if (rc == -EINPROGRESS)
946                 rc = 0;
947         return rc;
948 }
949
950 static int osd_declare_attr_set(const struct lu_env *env,
951                                 struct dt_object *dt,
952                                 const struct lu_attr *attr,
953                                 struct thandle *handle)
954 {
955         struct osd_thread_info  *info = osd_oti_get(env);
956         struct osd_object       *obj = osd_dt_obj(dt);
957         struct osd_device       *osd = osd_obj2dev(obj);
958         dmu_tx_hold_t           *txh;
959         struct osd_thandle      *oh;
960         uint64_t                 bspace;
961         uint32_t                 blksize;
962         int                      rc = 0;
963         bool                     found;
964         ENTRY;
965
966
967         LASSERT(handle != NULL);
968         LASSERT(osd_invariant(obj));
969
970         oh = container_of0(handle, struct osd_thandle, ot_super);
971
972         down_read(&obj->oo_guard);
973         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
974                 GOTO(out, rc = 0);
975
976         LASSERT(obj->oo_sa_hdl != NULL);
977         LASSERT(oh->ot_tx != NULL);
978         /* regular attributes are part of the bonus buffer */
979         /* let's check whether this object is already part of
980          * transaction.. */
981         found = false;
982         for (txh = list_head(&oh->ot_tx->tx_holds); txh;
983              txh = list_next(&oh->ot_tx->tx_holds, txh)) {
984                 if (txh->txh_dnode == NULL)
985                         continue;
986                 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
987                         continue;
988                 /* this object is part of the transaction already
989                  * we don't need to declare bonus again */
990                 found = true;
991                 break;
992         }
993         if (!found)
994                 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
995         if (oh->ot_tx->tx_err != 0)
996                 GOTO(out, rc = -oh->ot_tx->tx_err);
997
998         if (attr && attr->la_valid & LA_FLAGS) {
999                 /* LMA is usually a part of bonus, no need to declare
1000                  * anything else */
1001         }
1002
1003         if (attr && (attr->la_valid & (LA_UID | LA_GID | LA_PROJID))) {
1004                 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
1005                 bspace = toqb(bspace * blksize);
1006         }
1007
1008         if (attr && attr->la_valid & LA_UID) {
1009                 /* quota enforcement for user */
1010                 if (attr->la_uid != obj->oo_attr.la_uid) {
1011                         rc = qsd_transfer(env, osd->od_quota_slave,
1012                                           &oh->ot_quota_trans, USRQUOTA,
1013                                           obj->oo_attr.la_uid, attr->la_uid,
1014                                           bspace, &info->oti_qi);
1015                         if (rc)
1016                                 GOTO(out, rc);
1017                 }
1018         }
1019         if (attr && attr->la_valid & LA_GID) {
1020                 /* quota enforcement for group */
1021                 if (attr->la_gid != obj->oo_attr.la_gid) {
1022                         rc = qsd_transfer(env, osd->od_quota_slave,
1023                                           &oh->ot_quota_trans, GRPQUOTA,
1024                                           obj->oo_attr.la_gid, attr->la_gid,
1025                                           bspace, &info->oti_qi);
1026                         if (rc)
1027                                 GOTO(out, rc);
1028                 }
1029         }
1030 #ifdef ZFS_PROJINHERIT
1031         if (attr && attr->la_valid & LA_PROJID) {
1032                 if (!osd->od_projectused_dn)
1033                         GOTO(out, rc = -EOPNOTSUPP);
1034
1035                 /* Usually, if project quota is upgradable for the device,
1036                  * then the upgrade will be done before or when mount the
1037                  * device. So when we come here, this project should have
1038                  * project ID attribute already (that is zero by default).
1039                  * Otherwise, there was something wrong during the former
1040                  * upgrade, let's return failure to report that.
1041                  *
1042                  * Please note that, different from other attributes, you
1043                  * can NOT simply set the project ID attribute under such
1044                  * case, because adding (NOT change) project ID attribute
1045                  * needs to change the object's attribute layout to match
1046                  * zfs backend quota accounting requirement. */
1047                 if (unlikely(!obj->oo_with_projid))
1048                         GOTO(out, rc = -ENXIO);
1049
1050                 /* quota enforcement for project */
1051                 if (attr->la_projid != obj->oo_attr.la_projid) {
1052                         rc = qsd_transfer(env, osd->od_quota_slave,
1053                                           &oh->ot_quota_trans, PRJQUOTA,
1054                                           obj->oo_attr.la_projid,
1055                                           attr->la_projid, bspace,
1056                                           &info->oti_qi);
1057                         if (rc)
1058                                 GOTO(out, rc);
1059                 }
1060         }
1061 #endif
1062 out:
1063         up_read(&obj->oo_guard);
1064         RETURN(rc);
1065 }
1066
1067 /*
1068  * Set the attributes of an object
1069  *
1070  * The transaction passed to this routine must have
1071  * dmu_tx_hold_bonus(tx, oid) called and then assigned
1072  * to a transaction group.
1073  */
1074 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
1075                         const struct lu_attr *la, struct thandle *handle)
1076 {
1077         struct osd_thread_info  *info = osd_oti_get(env);
1078         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
1079         struct osd_object       *obj = osd_dt_obj(dt);
1080         struct osd_device       *osd = osd_obj2dev(obj);
1081         struct osd_thandle      *oh;
1082         struct osa_attr         *osa = &info->oti_osa;
1083         __u64                    valid = la->la_valid;
1084         int                      cnt;
1085         int                      rc = 0;
1086
1087         ENTRY;
1088
1089         down_read(&obj->oo_guard);
1090         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1091                 GOTO(out, rc = -ENOENT);
1092
1093         LASSERT(handle != NULL);
1094         LASSERT(osd_invariant(obj));
1095         LASSERT(obj->oo_sa_hdl);
1096
1097         oh = container_of0(handle, struct osd_thandle, ot_super);
1098         /* Assert that the transaction has been assigned to a
1099            transaction group. */
1100         LASSERT(oh->ot_tx->tx_txg != 0);
1101
1102         /* Only allow set size for regular file */
1103         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
1104                 valid &= ~(LA_SIZE | LA_BLOCKS);
1105
1106         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
1107                 valid &= ~LA_CTIME;
1108
1109         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
1110                 valid &= ~LA_MTIME;
1111
1112         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
1113                 valid &= ~LA_ATIME;
1114
1115         if (valid == 0)
1116                 GOTO(out, rc = 0);
1117
1118         if (valid & LA_FLAGS) {
1119                 struct lustre_mdt_attrs *lma;
1120                 struct lu_buf buf;
1121
1122                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
1123                         LASSERT(!obj->oo_pfid_in_lma);
1124                         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
1125                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
1126                         buf.lb_buf = lma;
1127                         buf.lb_len = sizeof(info->oti_buf);
1128                         rc = osd_xattr_get(env, &obj->oo_dt, &buf,
1129                                            XATTR_NAME_LMA);
1130                         if (rc > 0) {
1131                                 lma->lma_incompat =
1132                                         le32_to_cpu(lma->lma_incompat);
1133                                 lma->lma_incompat |=
1134                                         lustre_to_lma_flags(la->la_flags);
1135                                 lma->lma_incompat =
1136                                         cpu_to_le32(lma->lma_incompat);
1137                                 buf.lb_buf = lma;
1138                                 buf.lb_len = sizeof(*lma);
1139                                 rc = osd_xattr_set_internal(env, obj, &buf,
1140                                                             XATTR_NAME_LMA,
1141                                                             LU_XATTR_REPLACE,
1142                                                             oh);
1143                         }
1144                         if (rc < 0) {
1145                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
1146                                        osd->od_svname, rc);
1147                                 GOTO(out, rc);
1148                         }
1149                 }
1150         }
1151
1152         write_lock(&obj->oo_attr_lock);
1153         cnt = 0;
1154
1155         if (valid & LA_PROJID) {
1156 #ifdef ZFS_PROJINHERIT
1157                 /* osd_declare_attr_set() must be called firstly.
1158                  * If osd::od_projectused_dn is not set, then we
1159                  * can not arrive at here. */
1160                 LASSERT(osd->od_projectused_dn);
1161                 LASSERT(obj->oo_with_projid);
1162
1163                 osa->projid = obj->oo_attr.la_projid = la->la_projid;
1164                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1165                                  &osa->projid, 8);
1166 #else
1167                 valid &= ~LA_PROJID;
1168 #endif
1169         }
1170
1171         if (valid & LA_ATIME) {
1172                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1173                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1174                                  osa->atime, 16);
1175         }
1176         if (valid & LA_MTIME) {
1177                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1178                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1179                                  osa->mtime, 16);
1180         }
1181         if (valid & LA_CTIME) {
1182                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1183                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1184                                  osa->ctime, 16);
1185         }
1186         if (valid & LA_MODE) {
1187                 /* mode is stored along with type, so read it first */
1188                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1189                         (la->la_mode & ~S_IFMT);
1190                 osa->mode = obj->oo_attr.la_mode;
1191                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1192                                  &osa->mode, 8);
1193         }
1194         if (valid & LA_SIZE) {
1195                 osa->size = obj->oo_attr.la_size = la->la_size;
1196                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1197                                  &osa->size, 8);
1198         }
1199         if (valid & LA_NLINK) {
1200                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1201                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1202                                  &osa->nlink, 8);
1203         }
1204         if (valid & LA_RDEV) {
1205                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1206                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1207                                  &osa->rdev, 8);
1208         }
1209         if (valid & LA_FLAGS) {
1210                 osa->flags = attrs_fs2zfs(la->la_flags);
1211                 /* many flags are not supported by zfs, so ensure a good cached
1212                  * copy */
1213                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1214 #ifdef ZFS_PROJINHERIT
1215                 if (obj->oo_with_projid)
1216                         osa->flags |= ZFS_PROJID;
1217 #endif
1218                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1219                                  &osa->flags, 8);
1220         }
1221         if (valid & LA_UID) {
1222                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1223                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1224                                  &osa->uid, 8);
1225         }
1226         if (valid & LA_GID) {
1227                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1228                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1229                                  &osa->gid, 8);
1230         }
1231         obj->oo_attr.la_valid |= valid;
1232         write_unlock(&obj->oo_attr_lock);
1233
1234         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1235         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1236
1237 out:
1238         up_read(&obj->oo_guard);
1239         RETURN(rc);
1240 }
1241
1242 /*
1243  * Object creation.
1244  *
1245  * XXX temporary solution.
1246  */
1247
1248 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1249                         struct dt_object *parent, struct dt_object *child,
1250                         umode_t child_mode)
1251 {
1252         LASSERT(ah);
1253
1254         ah->dah_parent = parent;
1255         ah->dah_mode = child_mode;
1256
1257         if (parent != NULL && !dt_object_remote(parent)) {
1258                 /* will help to find FID->ino at dt_insert("..") */
1259                 struct osd_object *pobj = osd_dt_obj(parent);
1260
1261                 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1262         }
1263 }
1264
1265 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1266                               struct lu_attr *attr,
1267                               struct dt_allocation_hint *hint,
1268                               struct dt_object_format *dof,
1269                               struct thandle *handle)
1270 {
1271         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1272         struct osd_object       *obj = osd_dt_obj(dt);
1273         struct osd_device       *osd = osd_obj2dev(obj);
1274         struct osd_thandle      *oh;
1275         uint64_t                 zapid;
1276         dnode_t                 *dn;
1277         int                      rc, dnode_size;
1278         ENTRY;
1279
1280         LASSERT(dof);
1281
1282         switch (dof->dof_type) {
1283                 case DFT_REGULAR:
1284                 case DFT_SYM:
1285                 case DFT_NODE:
1286                         if (obj->oo_dt.do_body_ops == NULL)
1287                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1288                         break;
1289                 default:
1290                         break;
1291         }
1292
1293         LASSERT(handle != NULL);
1294         oh = container_of0(handle, struct osd_thandle, ot_super);
1295         LASSERT(oh->ot_tx != NULL);
1296
1297         /* this is the minimum set of EAs on every Lustre object */
1298         obj->oo_ea_in_bonus = ZFS_SA_BASE_ATTR_SIZE +
1299                                 sizeof(__u64) + /* VBR VERSION */
1300                                 sizeof(struct lustre_mdt_attrs); /* LMA */
1301         /* reserve 32 bytes for extra stuff like ACLs */
1302         dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1303
1304         switch (dof->dof_type) {
1305                 case DFT_DIR:
1306                         dt->do_index_ops = &osd_dir_ops;
1307                 case DFT_INDEX:
1308                         /* for zap create */
1309                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1310                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1311                         break;
1312                 case DFT_REGULAR:
1313                 case DFT_SYM:
1314                 case DFT_NODE:
1315                         /* first, we'll create new object */
1316                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1317                         break;
1318
1319                 default:
1320                         LBUG();
1321                         break;
1322         }
1323
1324         /* and we'll add it to some mapping */
1325         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1326         osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1327
1328         /* will help to find FID->ino mapping at dt_insert() */
1329         osd_idc_find_and_init(env, osd, obj);
1330
1331         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid,
1332                                attr->la_projid, 1, oh, NULL, OSD_QID_INODE);
1333
1334         RETURN(rc);
1335 }
1336
1337 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1338                     struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
1339                     struct lu_attr *la, uint64_t parent,
1340                     nvlist_t *xattr)
1341 {
1342         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
1343         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1344         uint64_t         gen;
1345         uint64_t         crtime[2];
1346         timestruc_t      now;
1347         int              cnt;
1348         int              rc;
1349         char *dxattr = NULL;
1350         size_t sa_size;
1351
1352
1353         LASSERT(sa_hdl);
1354
1355         gen = dmu_tx_get_txg(tx);
1356         gethrestime(&now);
1357         ZFS_TIME_ENCODE(&now, crtime);
1358
1359         osa->atime[0] = la->la_atime;
1360         osa->ctime[0] = la->la_ctime;
1361         osa->mtime[0] = la->la_mtime;
1362         osa->mode = la->la_mode;
1363         osa->uid = la->la_uid;
1364         osa->gid = la->la_gid;
1365         osa->rdev = la->la_rdev;
1366         osa->nlink = la->la_nlink;
1367         if (la->la_valid & LA_FLAGS)
1368                 osa->flags = attrs_fs2zfs(la->la_flags);
1369         else
1370                 osa->flags = 0;
1371         osa->size  = la->la_size;
1372 #ifdef ZFS_PROJINHERIT
1373         if (osd->od_projectused_dn) {
1374                 if (la->la_valid & LA_PROJID)
1375                         osa->projid = la->la_projid;
1376                 else
1377                         osa->projid = ZFS_DEFAULT_PROJID;
1378                 osa->flags |= ZFS_PROJID;
1379                 if (obj)
1380                         obj->oo_with_projid = 1;
1381         } else {
1382                 osa->flags &= ~ZFS_PROJID;
1383         }
1384 #endif
1385
1386         /*
1387          * we need to create all SA below upon object create.
1388          *
1389          * XXX The attribute order matters since the accounting callback relies
1390          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1391          * look up the UID/GID/PROJID attributes. Moreover, the callback does
1392          * not seem to support the spill block.
1393          * We define attributes in the same order as SA_*_OFFSET in order to
1394          * work around the problem. See ORI-610.
1395          */
1396         cnt = 0;
1397         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1398         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1399         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1400         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1401         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1402         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1403         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1404         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1405         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1406         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1407         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
1408         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1409 #ifdef ZFS_PROJINHERIT
1410         if (osd->od_projectused_dn)
1411                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1412                                  &osa->projid, 8);
1413 #endif
1414         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1415         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1416
1417         if (xattr) {
1418                 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1419                 LASSERT(rc == 0);
1420
1421                 dxattr = osd_zio_buf_alloc(sa_size);
1422                 LASSERT(dxattr);
1423
1424                 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1425                                 NV_ENCODE_XDR, KM_SLEEP);
1426                 LASSERT(rc == 0);
1427
1428                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1429                                 NULL, dxattr, sa_size);
1430         }
1431
1432         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1433         if (dxattr)
1434                 osd_zio_buf_free(dxattr, sa_size);
1435
1436         return rc;
1437 }
1438
1439 static int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1440                               uint64_t oid, dnode_t **dnp)
1441 {
1442         dmu_tx_hold_t *txh;
1443         int rc = 0;
1444
1445         /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1446         for (txh = list_tail(&tx->tx_holds); txh;
1447              txh = list_prev(&tx->tx_holds, txh)) {
1448                 dnode_t *dn = txh->txh_dnode;
1449                 dmu_buf_impl_t *db;
1450
1451                 if (dn == NULL)
1452                         continue;
1453                 if (dn->dn_object != oid)
1454                         continue;
1455                 db = dn->dn_bonus;
1456                 if (db == NULL) {
1457                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1458                         if (dn->dn_bonus == NULL)
1459                                 dbuf_create_bonus(dn);
1460                         rw_exit(&dn->dn_struct_rwlock);
1461                 }
1462                 db = dn->dn_bonus;
1463                 LASSERT(db);
1464                 LASSERT(dn->dn_handle);
1465                 DB_DNODE_ENTER(db);
1466                 if (refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1467                         refcount_add(&dn->dn_holds, tag);
1468                         atomic_inc_32(&dn->dn_dbufs_count);
1469                 }
1470                 *dnp = dn;
1471                 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1472                 break;
1473         }
1474
1475         if (unlikely(*dnp == NULL))
1476                 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1477
1478         return rc;
1479 }
1480
1481 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1482 static int osd_find_dnsize(struct osd_object *obj)
1483 {
1484         struct osd_device *osd = osd_obj2dev(obj);
1485         int dnsize;
1486
1487         if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1488                 dnsize = DNODE_MIN_SIZE;
1489                 do {
1490                         if (DN_BONUS_SIZE(dnsize) >= obj->oo_ea_in_bonus + 32)
1491                                 break;
1492                         dnsize <<= 1;
1493                 } while (dnsize < DNODE_MAX_SIZE);
1494                 if (dnsize > DNODE_MAX_SIZE)
1495                         dnsize = DNODE_MAX_SIZE;
1496         } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1497                 dnsize = 1024;
1498         } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1499                 dnsize = 2048;
1500         } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1501                 dnsize = 4096;
1502         } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1503                 dnsize = 8192;
1504         } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1505                 dnsize = 16384;
1506         } else {
1507                 dnsize = DNODE_MIN_SIZE;
1508         }
1509         return dnsize;
1510 }
1511 #else
1512 static int inline osd_find_dnsize(struct osd_object *obj)
1513 {
1514         return DN_MAX_BONUSLEN;
1515 }
1516 #endif
1517
1518 /*
1519  * The transaction passed to this routine must have
1520  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1521  * to a transaction group.
1522  */
1523 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
1524                         dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1525 {
1526         struct osd_device   *osd = osd_obj2dev(obj);
1527         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1528         dmu_object_type_t    type = DMU_OT_PLAIN_FILE_CONTENTS;
1529         uint64_t oid;
1530
1531         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1532          * would get an additional ditto copy */
1533         if (unlikely(S_ISREG(la->la_mode) &&
1534                      fid_seq_is_local_file(fid_seq(fid))))
1535                 type = DMU_OTN_UINT8_METADATA;
1536
1537         /* Create a new DMU object using the default dnode size. */
1538         oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1539                                    osd_find_dnsize(obj), tx);
1540
1541         LASSERT(la->la_valid & LA_MODE);
1542         la->la_size = 0;
1543         la->la_nlink = 1;
1544
1545         return osd_find_new_dnode(env, tx, oid, dnp);
1546 }
1547
1548 /*
1549  * The transaction passed to this routine must have
1550  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1551  * to a transaction group.
1552  *
1553  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1554  * This is fine for directories today, because storing the FID in the dirent
1555  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1556  * then we might need to re-evaluate the use of this flag and instead do
1557  * a conversion from the different internal ZAP hash formats being used. */
1558 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1559                      dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1560                      unsigned dnsize, zap_flags_t flags)
1561 {
1562         uint64_t oid;
1563
1564         /* Assert that the transaction has been assigned to a
1565            transaction group. */
1566         LASSERT(tx->tx_txg != 0);
1567         *dnp = NULL;
1568
1569         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1570                                    DMU_OT_DIRECTORY_CONTENTS,
1571                                    14, /* == ZFS fzap_default_blockshift */
1572                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1573                                    dnsize, tx);
1574
1575         la->la_size = 2;
1576         la->la_nlink = 1;
1577
1578         return osd_find_new_dnode(env, tx, oid, dnp);
1579 }
1580
1581 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1582                           struct lu_attr *la, struct osd_thandle *oh)
1583 {
1584         dnode_t *dn;
1585         int rc;
1586
1587         /* Index file should be created as regular file in order not to confuse
1588          * ZPL which could interpret them as directory.
1589          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1590          * binary keys */
1591         LASSERT(S_ISREG(la->la_mode));
1592         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la,
1593                               osd_find_dnsize(obj), ZAP_FLAG_UINT64_KEY);
1594         if (rc)
1595                 return ERR_PTR(rc);
1596         return dn;
1597 }
1598
1599 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1600                           struct lu_attr *la, struct osd_thandle *oh)
1601 {
1602         dnode_t *dn;
1603         int rc;
1604
1605         LASSERT(S_ISDIR(la->la_mode));
1606         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la,
1607                               osd_find_dnsize(obj), 0);
1608         if (rc)
1609                 return ERR_PTR(rc);
1610         return dn;
1611 }
1612
1613 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1614                           struct lu_attr *la, struct osd_thandle *oh)
1615 {
1616         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1617         struct osd_device *osd = osd_obj2dev(obj);
1618         dnode_t *dn;
1619         int rc;
1620
1621         LASSERT(S_ISREG(la->la_mode));
1622         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1623         if (rc)
1624                 return ERR_PTR(rc);
1625
1626         if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid))) {
1627                 /* The minimum block size must be at least page size otherwise
1628                  * it will break the assumption in tgt_thread_big_cache where
1629                  * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1630                  * RDMA due to subpage transfer size */
1631                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1632                                                PAGE_SIZE, 0, oh->ot_tx);
1633                 if (unlikely(rc)) {
1634                         CERROR("%s: can't change blocksize: %d\n",
1635                                osd->od_svname, rc);
1636                         return ERR_PTR(rc);
1637                 }
1638         }
1639
1640         return dn;
1641 }
1642
1643 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1644                           struct lu_attr *la, struct osd_thandle *oh)
1645 {
1646         dnode_t *dn;
1647         int rc;
1648
1649         LASSERT(S_ISLNK(la->la_mode));
1650         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1651         if (rc)
1652                 return ERR_PTR(rc);
1653         return dn;
1654 }
1655
1656 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1657                           struct lu_attr *la, struct osd_thandle *oh)
1658 {
1659         dnode_t *dn;
1660         int rc;
1661
1662         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1663                 la->la_valid |= LA_RDEV;
1664
1665         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1666         if (rc)
1667                 return ERR_PTR(rc);
1668         return dn;
1669 }
1670
1671 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1672                                    struct osd_object *obj,
1673                                    struct lu_attr *la,
1674                                    struct osd_thandle *oh);
1675
1676 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1677 {
1678         osd_obj_type_f result;
1679
1680         switch (type) {
1681         case DFT_DIR:
1682                 result = osd_mkdir;
1683                 break;
1684         case DFT_INDEX:
1685                 result = osd_mkidx;
1686                 break;
1687         case DFT_REGULAR:
1688                 result = osd_mkreg;
1689                 break;
1690         case DFT_SYM:
1691                 result = osd_mksym;
1692                 break;
1693         case DFT_NODE:
1694                 result = osd_mknod;
1695                 break;
1696         default:
1697                 LBUG();
1698                 break;
1699         }
1700         return result;
1701 }
1702
1703 /*
1704  * Concurrency: @dt is write locked.
1705  */
1706 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1707                       struct lu_attr *attr, struct dt_allocation_hint *hint,
1708                       struct dt_object_format *dof, struct thandle *th)
1709 {
1710         struct osd_thread_info  *info = osd_oti_get(env);
1711         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1712         struct zpl_direntry     *zde = &info->oti_zde.lzd_reg;
1713         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1714         struct osd_object       *obj = osd_dt_obj(dt);
1715         struct osd_device       *osd = osd_obj2dev(obj);
1716         char                    *buf = info->oti_str;
1717         struct osd_thandle      *oh;
1718         dnode_t *dn = NULL, *zdn = NULL;
1719         uint64_t                 zapid, parent = 0;
1720         int                      rc;
1721
1722         ENTRY;
1723
1724         LASSERT(!fid_is_acct(fid));
1725
1726         /* concurrent create declarations should not see
1727          * the object inconsistent (db, attr, etc).
1728          * in regular cases acquisition should be cheap */
1729         down_write(&obj->oo_guard);
1730
1731         if (unlikely(dt_object_exists(dt)))
1732                 GOTO(out, rc = -EEXIST);
1733
1734         LASSERT(osd_invariant(obj));
1735         LASSERT(dof != NULL);
1736
1737         LASSERT(th != NULL);
1738         oh = container_of0(th, struct osd_thandle, ot_super);
1739
1740         LASSERT(obj->oo_dn == NULL);
1741
1742         /* to follow ZFS on-disk format we need
1743          * to initialize parent dnode properly */
1744         if (hint != NULL && hint->dah_parent != NULL &&
1745             !dt_object_remote(hint->dah_parent))
1746                 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1747
1748         /* we may fix some attributes, better do not change the source */
1749         obj->oo_attr = *attr;
1750         obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1751
1752 #ifdef ZFS_PROJINHERIT
1753         if (osd->od_projectused_dn) {
1754                 if (!(obj->oo_attr.la_valid & LA_PROJID))
1755                         obj->oo_attr.la_projid = ZFS_DEFAULT_PROJID;
1756                 obj->oo_with_projid = 1;
1757         }
1758 #endif
1759
1760         dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1761         if (IS_ERR(dn)) {
1762                 rc = PTR_ERR(dn);
1763                 dn = NULL;
1764                 GOTO(out, rc);
1765         }
1766
1767         zde->zde_pad = 0;
1768         zde->zde_dnode = dn->dn_object;
1769         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1770
1771         zapid = osd_get_name_n_idx(env, osd, fid, buf,
1772                                    sizeof(info->oti_str), &zdn);
1773         rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
1774         if (rc)
1775                 GOTO(out, rc);
1776         obj->oo_dn = dn;
1777         /* Now add in all of the "SA" attributes */
1778         rc = osd_sa_handle_get(obj);
1779         if (rc)
1780                 GOTO(out, rc);
1781
1782         rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
1783         if (rc)
1784                 GOTO(out, rc);
1785
1786         /* initialize LMA */
1787         lustre_lma_init(lma, fid, 0, 0);
1788         lustre_lma_swab(lma);
1789         rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
1790                                     (uchar_t *)lma, sizeof(*lma));
1791         if (rc)
1792                 GOTO(out, rc);
1793
1794         /* configure new osd object */
1795         obj->oo_parent = parent != 0 ? parent : zapid;
1796         obj->oo_late_attr_set = 1;
1797         rc = __osd_sa_xattr_schedule_update(env, obj, oh);
1798         if (rc)
1799                 GOTO(out, rc);
1800
1801         /* XXX: oo_lma_flags */
1802         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
1803         if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
1804                 /* no body operations for accounting objects */
1805                 obj->oo_dt.do_body_ops = &osd_body_ops;
1806
1807         osd_idc_find_and_init(env, osd, obj);
1808
1809 out:
1810         if (unlikely(rc && dn)) {
1811                 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
1812                 osd_dnode_rele(dn);
1813                 obj->oo_dn = NULL;
1814         } else if (!rc) {
1815                 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
1816         }
1817         up_write(&obj->oo_guard);
1818         RETURN(rc);
1819 }
1820
1821 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
1822                                struct thandle *th)
1823 {
1824         return osd_declare_attr_set(env, dt, NULL, th);
1825 }
1826
1827 /*
1828  * Concurrency: @dt is write locked.
1829  */
1830 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
1831                        struct thandle *handle)
1832 {
1833         struct osd_object       *obj = osd_dt_obj(dt);
1834         struct osd_thandle      *oh;
1835         struct osd_device       *osd = osd_obj2dev(obj);
1836         uint64_t                 nlink;
1837         int rc;
1838
1839         ENTRY;
1840
1841         down_read(&obj->oo_guard);
1842         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1843                 GOTO(out, rc = -ENOENT);
1844
1845         LASSERT(osd_invariant(obj));
1846         LASSERT(obj->oo_sa_hdl != NULL);
1847
1848         oh = container_of0(handle, struct osd_thandle, ot_super);
1849
1850         write_lock(&obj->oo_attr_lock);
1851         nlink = ++obj->oo_attr.la_nlink;
1852         write_unlock(&obj->oo_attr_lock);
1853
1854         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1855
1856 out:
1857         up_read(&obj->oo_guard);
1858         RETURN(rc);
1859 }
1860
1861 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
1862                                struct thandle *handle)
1863 {
1864         return osd_declare_attr_set(env, dt, NULL, handle);
1865 }
1866
1867 /*
1868  * Concurrency: @dt is write locked.
1869  */
1870 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
1871                        struct thandle *handle)
1872 {
1873         struct osd_object       *obj = osd_dt_obj(dt);
1874         struct osd_thandle      *oh;
1875         struct osd_device       *osd = osd_obj2dev(obj);
1876         uint64_t                 nlink;
1877         int                      rc;
1878
1879         ENTRY;
1880
1881         down_read(&obj->oo_guard);
1882
1883         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1884                 GOTO(out, rc = -ENOENT);
1885
1886         LASSERT(osd_invariant(obj));
1887         LASSERT(obj->oo_sa_hdl != NULL);
1888
1889         oh = container_of0(handle, struct osd_thandle, ot_super);
1890         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
1891
1892         write_lock(&obj->oo_attr_lock);
1893         nlink = --obj->oo_attr.la_nlink;
1894         write_unlock(&obj->oo_attr_lock);
1895
1896         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1897
1898 out:
1899         up_read(&obj->oo_guard);
1900         RETURN(rc);
1901 }
1902
1903 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
1904                            __u64 start, __u64 end)
1905 {
1906         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1907         ENTRY;
1908
1909         /* XXX: no other option than syncing the whole filesystem until we
1910          * support ZIL.  If the object tracked the txg that it was last
1911          * modified in, it could pass that txg here instead of "0".  Maybe
1912          * the changes are already committed, so no wait is needed at all? */
1913         if (!osd->od_dt_dev.dd_rdonly)
1914                 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
1915
1916         RETURN(0);
1917 }
1918
1919 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
1920 {
1921         return 0;
1922 }
1923
1924 static struct dt_object_operations osd_obj_ops = {
1925         .do_read_lock           = osd_read_lock,
1926         .do_write_lock          = osd_write_lock,
1927         .do_read_unlock         = osd_read_unlock,
1928         .do_write_unlock        = osd_write_unlock,
1929         .do_write_locked        = osd_write_locked,
1930         .do_attr_get            = osd_attr_get,
1931         .do_declare_attr_set    = osd_declare_attr_set,
1932         .do_attr_set            = osd_attr_set,
1933         .do_ah_init             = osd_ah_init,
1934         .do_declare_create      = osd_declare_create,
1935         .do_create              = osd_create,
1936         .do_declare_destroy     = osd_declare_destroy,
1937         .do_destroy             = osd_destroy,
1938         .do_index_try           = osd_index_try,
1939         .do_declare_ref_add     = osd_declare_ref_add,
1940         .do_ref_add             = osd_ref_add,
1941         .do_declare_ref_del     = osd_declare_ref_del,
1942         .do_ref_del             = osd_ref_del,
1943         .do_xattr_get           = osd_xattr_get,
1944         .do_declare_xattr_set   = osd_declare_xattr_set,
1945         .do_xattr_set           = osd_xattr_set,
1946         .do_declare_xattr_del   = osd_declare_xattr_del,
1947         .do_xattr_del           = osd_xattr_del,
1948         .do_xattr_list          = osd_xattr_list,
1949         .do_object_sync         = osd_object_sync,
1950         .do_invalidate          = osd_invalidate,
1951 };
1952
1953 static struct lu_object_operations osd_lu_obj_ops = {
1954         .loo_object_init        = osd_object_init,
1955         .loo_object_delete      = osd_object_delete,
1956         .loo_object_release     = osd_object_release,
1957         .loo_object_free        = osd_object_free,
1958         .loo_object_print       = osd_object_print,
1959         .loo_object_invariant   = osd_object_invariant,
1960 };
1961
1962 static int osd_otable_it_attr_get(const struct lu_env *env,
1963                                 struct dt_object *dt,
1964                                 struct lu_attr *attr)
1965 {
1966         attr->la_valid = 0;
1967         return 0;
1968 }
1969
1970 static struct dt_object_operations osd_obj_otable_it_ops = {
1971         .do_attr_get            = osd_otable_it_attr_get,
1972         .do_index_try           = osd_index_try,
1973 };