Whamcloud - gitweb
LU-6427 osd: race between destroy and load LMV
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd-zfs/osd_object.c
37  *
38  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
39  * Author: Mike Pershin <tappro@whamcloud.com>
40  * Author: Johann Lombardi <johann@whamcloud.com>
41  */
42
43 #define DEBUG_SUBSYSTEM S_OSD
44
45 #include <lustre_ver.h>
46 #include <libcfs/libcfs.h>
47 #include <obd_support.h>
48 #include <lustre_net.h>
49 #include <obd.h>
50 #include <obd_class.h>
51 #include <lustre_disk.h>
52 #include <lustre_fid.h>
53
54 #include "osd_internal.h"
55
56 #include <sys/dnode.h>
57 #include <sys/dbuf.h>
58 #include <sys/spa.h>
59 #include <sys/stat.h>
60 #include <sys/zap.h>
61 #include <sys/spa_impl.h>
62 #include <sys/zfs_znode.h>
63 #include <sys/dmu_tx.h>
64 #include <sys/dmu_objset.h>
65 #include <sys/dsl_prop.h>
66 #include <sys/sa_impl.h>
67 #include <sys/txg.h>
68
69 char *osd_obj_tag = "osd_object";
70
71 static struct dt_object_operations osd_obj_ops;
72 static struct lu_object_operations osd_lu_obj_ops;
73 extern struct dt_body_operations osd_body_ops;
74 static struct dt_object_operations osd_obj_otable_it_ops;
75
76 extern struct kmem_cache *osd_object_kmem;
77
78 static void
79 osd_object_sa_fini(struct osd_object *obj)
80 {
81         if (obj->oo_sa_hdl) {
82                 sa_handle_destroy(obj->oo_sa_hdl);
83                 obj->oo_sa_hdl = NULL;
84         }
85 }
86
87 static int
88 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
89 {
90         int rc;
91
92         LASSERT(obj->oo_sa_hdl == NULL);
93         LASSERT(obj->oo_db != NULL);
94
95         rc = -sa_handle_get(o->od_os, obj->oo_db->db_object, obj,
96                             SA_HDL_PRIVATE, &obj->oo_sa_hdl);
97         if (rc)
98                 return rc;
99
100         /* Cache the xattr object id, valid for the life of the object */
101         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
102         if (rc == -ENOENT) {
103                 obj->oo_xattr = ZFS_NO_OBJECT;
104                 rc = 0;
105         } else if (rc) {
106                 osd_object_sa_fini(obj);
107         }
108
109         return rc;
110 }
111
112 /*
113  * Add object to list of dirty objects in tx handle.
114  */
115 static void
116 osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
117 {
118         if (!list_empty(&obj->oo_sa_linkage))
119                 return;
120
121         down(&oh->ot_sa_lock);
122         write_lock(&obj->oo_attr_lock);
123         if (likely(list_empty(&obj->oo_sa_linkage)))
124                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
125         write_unlock(&obj->oo_attr_lock);
126         up(&oh->ot_sa_lock);
127 }
128
129 /*
130  * Release spill block dbuf hold for all dirty SAs.
131  */
132 void osd_object_sa_dirty_rele(struct osd_thandle *oh)
133 {
134         struct osd_object *obj;
135
136         down(&oh->ot_sa_lock);
137         while (!list_empty(&oh->ot_sa_list)) {
138                 obj = list_entry(oh->ot_sa_list.next,
139                                  struct osd_object, oo_sa_linkage);
140                 sa_spill_rele(obj->oo_sa_hdl);
141                 write_lock(&obj->oo_attr_lock);
142                 list_del_init(&obj->oo_sa_linkage);
143                 write_unlock(&obj->oo_attr_lock);
144         }
145         up(&oh->ot_sa_lock);
146 }
147
148 /*
149  * Update the SA and add the object to the dirty list.
150  */
151 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
152                          void *buf, uint32_t buflen, struct osd_thandle *oh)
153 {
154         int rc;
155
156         LASSERT(obj->oo_sa_hdl != NULL);
157         LASSERT(oh->ot_tx != NULL);
158
159         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
160         osd_object_sa_dirty_add(obj, oh);
161
162         return rc;
163 }
164
165 /*
166  * Bulk update the SA and add the object to the dirty list.
167  */
168 static int
169 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
170                           int count, struct osd_thandle *oh)
171 {
172         int rc;
173
174         LASSERT(obj->oo_sa_hdl != NULL);
175         LASSERT(oh->ot_tx != NULL);
176
177         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
178         osd_object_sa_dirty_add(obj, oh);
179
180         return rc;
181 }
182
183 /*
184  * Retrieve the attributes of a DMU object
185  */
186 int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
187                           struct osd_object *obj, struct lu_attr *la)
188 {
189         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
190         sa_handle_t     *sa_hdl;
191         sa_bulk_attr_t  *bulk;
192         int              cnt = 0;
193         int              rc;
194         ENTRY;
195
196         LASSERT(obj->oo_db != NULL);
197
198         rc = -sa_handle_get(o->od_os, obj->oo_db->db_object, NULL,
199                             SA_HDL_PRIVATE, &sa_hdl);
200         if (rc)
201                 RETURN(rc);
202
203         OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 9);
204         if (bulk == NULL)
205                 GOTO(out_sa, rc = -ENOMEM);
206
207         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
208                         LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
209
210         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
211         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
212         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
213         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
214         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
215         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
216         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
217         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
218         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
219
220         rc = -sa_bulk_lookup(sa_hdl, bulk, cnt);
221         if (rc)
222                 GOTO(out_bulk, rc);
223
224         la->la_atime = osa->atime[0];
225         la->la_mtime = osa->mtime[0];
226         la->la_ctime = osa->ctime[0];
227         la->la_mode = osa->mode;
228         la->la_uid = osa->uid;
229         la->la_gid = osa->gid;
230         la->la_nlink = osa->nlink;
231         la->la_flags = attrs_zfs2fs(osa->flags);
232         la->la_size = osa->size;
233
234         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
235                 rc = -sa_lookup(sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
236                 if (rc)
237                         GOTO(out_bulk, rc);
238                 la->la_rdev = osa->rdev;
239                 la->la_valid |= LA_RDEV;
240         }
241 out_bulk:
242         OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 9);
243 out_sa:
244         sa_handle_destroy(sa_hdl);
245
246         RETURN(rc);
247 }
248
249 int __osd_obj2dbuf(const struct lu_env *env, objset_t *os,
250                    uint64_t oid, dmu_buf_t **dbp)
251 {
252         dmu_object_info_t *doi = &osd_oti_get(env)->oti_doi;
253         int rc;
254
255         rc = -sa_buf_hold(os, oid, osd_obj_tag, dbp);
256         if (rc)
257                 return rc;
258
259         dmu_object_info_from_db(*dbp, doi);
260         if (unlikely (oid != DMU_USERUSED_OBJECT &&
261             oid != DMU_GROUPUSED_OBJECT && doi->doi_bonus_type != DMU_OT_SA)) {
262                 sa_buf_rele(*dbp, osd_obj_tag);
263                 *dbp = NULL;
264                 return -EINVAL;
265         }
266
267         LASSERT(*dbp);
268         LASSERT((*dbp)->db_object == oid);
269         LASSERT((*dbp)->db_offset == -1);
270         LASSERT((*dbp)->db_data != NULL);
271
272         return 0;
273 }
274
275 /*
276  * Concurrency: no concurrent access is possible that early in object
277  * life-cycle.
278  */
279 struct lu_object *osd_object_alloc(const struct lu_env *env,
280                                    const struct lu_object_header *hdr,
281                                    struct lu_device *d)
282 {
283         struct osd_object *mo;
284
285         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
286         if (mo != NULL) {
287                 struct lu_object *l;
288
289                 l = &mo->oo_dt.do_lu;
290                 dt_object_init(&mo->oo_dt, NULL, d);
291                 mo->oo_dt.do_ops = &osd_obj_ops;
292                 l->lo_ops = &osd_lu_obj_ops;
293                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
294                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
295                 init_rwsem(&mo->oo_sem);
296                 sema_init(&mo->oo_guard, 1);
297                 rwlock_init(&mo->oo_attr_lock);
298                 mo->oo_destroy = OSD_DESTROY_NONE;
299                 return l;
300         } else {
301                 return NULL;
302         }
303 }
304
305 /*
306  * Concurrency: shouldn't matter.
307  */
308 int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
309 {
310         struct osd_device       *osd = osd_obj2dev(obj);
311         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
312         int                      rc = 0;
313         ENTRY;
314
315         if (obj->oo_db == NULL)
316                 RETURN(0);
317
318         /* object exist */
319
320         rc = osd_object_sa_init(obj, osd);
321         if (rc)
322                 RETURN(rc);
323
324         /* cache attrs in object */
325         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
326         if (rc)
327                 RETURN(rc);
328
329         if (likely(!fid_is_acct(fid)))
330                 /* no body operations for accounting objects */
331                 obj->oo_dt.do_body_ops = &osd_body_ops;
332
333         /*
334          * initialize object before marking it existing
335          */
336         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
337
338         smp_mb();
339         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
340
341         RETURN(0);
342 }
343
344 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
345 {
346         struct osd_thread_info  *info = osd_oti_get(env);
347         struct lu_buf           buf;
348         int                     rc;
349         struct lustre_mdt_attrs *lma;
350         ENTRY;
351
352         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
353         lma = (struct lustre_mdt_attrs *)info->oti_buf;
354         buf.lb_buf = lma;
355         buf.lb_len = sizeof(info->oti_buf);
356
357         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
358         if (rc > 0) {
359                 rc = 0;
360                 lustre_lma_swab(lma);
361                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
362                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
363                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
364                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
365                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
366                               PFID(lu_object_fid(&obj->oo_dt.do_lu)));
367                         rc = -EOPNOTSUPP;
368                 }
369         } else if (rc == -ENODATA) {
370                 /* haven't initialize LMA xattr */
371                 rc = 0;
372         }
373
374         RETURN(rc);
375 }
376
377 /*
378  * Concurrency: no concurrent access is possible that early in object
379  * life-cycle.
380  */
381 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
382                            const struct lu_object_conf *conf)
383 {
384         struct osd_object       *obj = osd_obj(l);
385         struct osd_device       *osd = osd_obj2dev(obj);
386         uint64_t                 oid;
387         int                      rc;
388         ENTRY;
389
390         LASSERT(osd_invariant(obj));
391
392         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
393                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
394                 l->lo_header->loh_attr |= LOHA_EXISTS;
395                 RETURN(0);
396         }
397
398         rc = osd_fid_lookup(env, osd, lu_object_fid(l), &oid);
399         if (rc == 0) {
400                 LASSERT(obj->oo_db == NULL);
401                 rc = __osd_obj2dbuf(env, osd->od_os, oid, &obj->oo_db);
402                 if (rc != 0) {
403                         CERROR("%s: lookup "DFID"/"LPX64" failed: rc = %d\n",
404                                osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
405                         GOTO(out, rc);
406                 }
407                 LASSERT(obj->oo_db);
408                 rc = osd_object_init0(env, obj);
409                 if (rc != 0)
410                         GOTO(out, rc);
411
412                 rc = osd_check_lma(env, obj);
413                 if (rc != 0)
414                         GOTO(out, rc);
415         } else if (rc == -ENOENT) {
416                 rc = 0;
417         }
418         LASSERT(osd_invariant(obj));
419 out:
420         RETURN(rc);
421 }
422
423 /*
424  * Concurrency: no concurrent access is possible that late in object
425  * life-cycle.
426  */
427 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
428 {
429         struct osd_object *obj = osd_obj(l);
430
431         LASSERT(osd_invariant(obj));
432
433         dt_object_fini(&obj->oo_dt);
434         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
435 }
436
437 static int
438 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
439 {
440         int rc = -EBUSY;
441
442         down(&obj->oo_guard);
443
444         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
445
446         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
447                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
448                 rc = 0;
449         }
450
451         up(&obj->oo_guard);
452
453         return rc;
454 }
455
456 /* Default to max data size covered by a level-1 indirect block */
457 static unsigned long osd_sync_destroy_max_size =
458         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
459 CFS_MODULE_PARM(osd_sync_destroy_max_size, "ul", ulong, 0444,
460                 "Maximum object size to use synchronous destroy.");
461
462 static inline void
463 osd_object_set_destroy_type(struct osd_object *obj)
464 {
465         /*
466          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
467          * only once and use it consistently thereafter.
468          */
469         down(&obj->oo_guard);
470         if (obj->oo_destroy == OSD_DESTROY_NONE) {
471                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
472                         obj->oo_destroy = OSD_DESTROY_SYNC;
473                 else /* Larger objects are destroyed asynchronously */
474                         obj->oo_destroy = OSD_DESTROY_ASYNC;
475         }
476         up(&obj->oo_guard);
477 }
478
479 static int osd_declare_object_destroy(const struct lu_env *env,
480                                       struct dt_object *dt,
481                                       struct thandle *th)
482 {
483         char                    *buf = osd_oti_get(env)->oti_str;
484         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
485         struct osd_object       *obj = osd_dt_obj(dt);
486         struct osd_device       *osd = osd_obj2dev(obj);
487         struct osd_thandle      *oh;
488         int                      rc;
489         uint64_t                 zapid;
490         ENTRY;
491
492         LASSERT(th != NULL);
493         LASSERT(dt_object_exists(dt));
494
495         oh = container_of0(th, struct osd_thandle, ot_super);
496         LASSERT(oh->ot_tx != NULL);
497
498         /* declare that we'll remove object from fid-dnode mapping */
499         zapid = osd_get_name_n_idx(env, osd, fid, buf);
500         dmu_tx_hold_bonus(oh->ot_tx, zapid);
501         dmu_tx_hold_zap(oh->ot_tx, zapid, FALSE, buf);
502
503         osd_declare_xattrs_destroy(env, obj, oh);
504
505         /* declare that we'll remove object from inode accounting ZAPs */
506         dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
507         dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, FALSE, buf);
508         dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
509         dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, FALSE, buf);
510
511         /* one less inode */
512         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
513                                obj->oo_attr.la_gid, -1, oh, false, NULL, false);
514         if (rc)
515                 RETURN(rc);
516
517         /* data to be truncated */
518         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
519                                obj->oo_attr.la_gid, 0, oh, true, NULL, false);
520         if (rc)
521                 RETURN(rc);
522
523         osd_object_set_destroy_type(obj);
524         if (obj->oo_destroy == OSD_DESTROY_SYNC)
525                 dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object,
526                                  0, DMU_OBJECT_END);
527         else
528                 dmu_tx_hold_zap(oh->ot_tx, osd->od_unlinkedid, TRUE, NULL);
529
530         RETURN(0);
531 }
532
533 static int osd_object_destroy(const struct lu_env *env,
534                               struct dt_object *dt, struct thandle *th)
535 {
536         char                    *buf = osd_oti_get(env)->oti_str;
537         struct osd_object       *obj = osd_dt_obj(dt);
538         struct osd_device       *osd = osd_obj2dev(obj);
539         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
540         struct osd_thandle      *oh;
541         int                      rc;
542         uint64_t                 oid, zapid;
543         ENTRY;
544
545         LASSERT(obj->oo_db != NULL);
546         LASSERT(dt_object_exists(dt));
547         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
548
549         oh = container_of0(th, struct osd_thandle, ot_super);
550         LASSERT(oh != NULL);
551         LASSERT(oh->ot_tx != NULL);
552
553         /* remove obj ref from index dir (it depends) */
554         zapid = osd_get_name_n_idx(env, osd, fid, buf);
555         rc = -zap_remove(osd->od_os, zapid, buf, oh->ot_tx);
556         if (rc) {
557                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
558                        osd->od_svname, buf, rc);
559                 GOTO(out, rc);
560         }
561
562         rc = osd_xattrs_destroy(env, obj, oh);
563         if (rc) {
564                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
565                        osd->od_svname, buf, rc);
566                 GOTO(out, rc);
567         }
568
569         /* Remove object from inode accounting. It is not fatal for the destroy
570          * operation if something goes wrong while updating accounting, but we
571          * still log an error message to notify the administrator */
572         rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
573                                 obj->oo_attr.la_uid, -1, oh->ot_tx);
574         if (rc)
575                 CERROR("%s: failed to remove "DFID" from accounting ZAP for usr"
576                        " %d: rc = %d\n", osd->od_svname, PFID(fid),
577                        obj->oo_attr.la_uid, rc);
578         rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
579                                 obj->oo_attr.la_gid, -1, oh->ot_tx);
580         if (rc)
581                 CERROR("%s: failed to remove "DFID" from accounting ZAP for grp"
582                        " %d: rc = %d\n", osd->od_svname, PFID(fid),
583                        obj->oo_attr.la_gid, rc);
584
585         oid = obj->oo_db->db_object;
586         if (obj->oo_destroy == OSD_DESTROY_SYNC) {
587                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
588                 if (rc)
589                         CERROR("%s: failed to free %s "LPU64": rc = %d\n",
590                                osd->od_svname, buf, oid, rc);
591         } else { /* asynchronous destroy */
592                 rc = osd_object_unlinked_add(obj, oh);
593                 if (rc)
594                         GOTO(out, rc);
595
596                 rc = -zap_add_int(osd->od_os, osd->od_unlinkedid,
597                                   oid, oh->ot_tx);
598                 if (rc)
599                         CERROR("%s: zap_add_int() failed %s "LPU64": rc = %d\n",
600                                osd->od_svname, buf, oid, rc);
601         }
602
603 out:
604         /* not needed in the cache anymore */
605         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
606         if (rc == 0)
607                 obj->oo_destroyed = 1;
608         RETURN (0);
609 }
610
611 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
612 {
613         struct osd_object *obj = osd_obj(l);
614
615         if (obj->oo_db != NULL) {
616                 osd_object_sa_fini(obj);
617                 if (obj->oo_sa_xattr) {
618                         nvlist_free(obj->oo_sa_xattr);
619                         obj->oo_sa_xattr = NULL;
620                 }
621                 sa_buf_rele(obj->oo_db, osd_obj_tag);
622                 list_del(&obj->oo_sa_linkage);
623                 obj->oo_db = NULL;
624         }
625 }
626
627 /*
628  * Concurrency: ->loo_object_release() is called under site spin-lock.
629  */
630 static void osd_object_release(const struct lu_env *env,
631                                struct lu_object *l)
632 {
633 }
634
635 /*
636  * Concurrency: shouldn't matter.
637  */
638 static int osd_object_print(const struct lu_env *env, void *cookie,
639                             lu_printer_t p, const struct lu_object *l)
640 {
641         struct osd_object *o = osd_obj(l);
642
643         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
644 }
645
646 static void osd_object_read_lock(const struct lu_env *env,
647                                  struct dt_object *dt, unsigned role)
648 {
649         struct osd_object *obj = osd_dt_obj(dt);
650
651         LASSERT(osd_invariant(obj));
652
653         down_read(&obj->oo_sem);
654 }
655
656 static void osd_object_write_lock(const struct lu_env *env,
657                                   struct dt_object *dt, unsigned role)
658 {
659         struct osd_object *obj = osd_dt_obj(dt);
660
661         LASSERT(osd_invariant(obj));
662
663         down_write(&obj->oo_sem);
664 }
665
666 static void osd_object_read_unlock(const struct lu_env *env,
667                                    struct dt_object *dt)
668 {
669         struct osd_object *obj = osd_dt_obj(dt);
670
671         LASSERT(osd_invariant(obj));
672         up_read(&obj->oo_sem);
673 }
674
675 static void osd_object_write_unlock(const struct lu_env *env,
676                                     struct dt_object *dt)
677 {
678         struct osd_object *obj = osd_dt_obj(dt);
679
680         LASSERT(osd_invariant(obj));
681         up_write(&obj->oo_sem);
682 }
683
684 static int osd_object_write_locked(const struct lu_env *env,
685                                    struct dt_object *dt)
686 {
687         struct osd_object *obj = osd_dt_obj(dt);
688         int rc = 1;
689
690         LASSERT(osd_invariant(obj));
691
692         if (down_write_trylock(&obj->oo_sem)) {
693                 rc = 0;
694                 up_write(&obj->oo_sem);
695         }
696         return rc;
697 }
698
699 static int osd_attr_get(const struct lu_env *env,
700                         struct dt_object *dt,
701                         struct lu_attr *attr)
702 {
703         struct osd_object       *obj = osd_dt_obj(dt);
704         uint64_t                 blocks;
705         uint32_t                 blksize;
706
707         LASSERT(dt_object_exists(dt));
708         LASSERT(osd_invariant(obj));
709         LASSERT(obj->oo_db);
710
711         read_lock(&obj->oo_attr_lock);
712         *attr = obj->oo_attr;
713         read_unlock(&obj->oo_attr_lock);
714
715         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
716          * from within sa_object_size() can block on a mutex, so
717          * we can't call sa_object_size() holding rwlock */
718         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
719         /* we do not control size of indices, so always calculate
720          * it from number of blocks reported by DMU */
721         if (S_ISDIR(attr->la_mode))
722                 attr->la_size = 512 * blocks;
723         /* Block size may be not set; suggest maximal I/O transfers. */
724         if (blksize == 0)
725                 blksize = osd_spa_maxblocksize(
726                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
727
728         attr->la_blksize = blksize;
729         attr->la_blocks = blocks;
730         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
731
732         return 0;
733 }
734
735 /* Simple wrapper on top of qsd API which implement quota transfer for osd
736  * setattr needs. As a reminder, only the root user can change ownership of
737  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
738 static inline int qsd_transfer(const struct lu_env *env,
739                                struct qsd_instance *qsd,
740                                struct lquota_trans *trans, int qtype,
741                                __u64 orig_id, __u64 new_id, __u64 bspace,
742                                struct lquota_id_info *qi)
743 {
744         int     rc;
745
746         if (unlikely(qsd == NULL))
747                 return 0;
748
749         LASSERT(qtype >= 0 && qtype < MAXQUOTAS);
750         qi->lqi_type = qtype;
751
752         /* inode accounting */
753         qi->lqi_is_blk = false;
754
755         /* one more inode for the new owner ... */
756         qi->lqi_id.qid_uid = new_id;
757         qi->lqi_space      = 1;
758         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
759         if (rc == -EDQUOT || rc == -EINPROGRESS)
760                 rc = 0;
761         if (rc)
762                 return rc;
763
764         /* and one less inode for the current id */
765         qi->lqi_id.qid_uid = orig_id;;
766         qi->lqi_space      = -1;
767         /* can't get EDQUOT when reducing usage */
768         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
769         if (rc == -EINPROGRESS)
770                 rc = 0;
771         if (rc)
772                 return rc;
773
774         /* block accounting */
775         qi->lqi_is_blk = true;
776
777         /* more blocks for the new owner ... */
778         qi->lqi_id.qid_uid = new_id;
779         qi->lqi_space      = bspace;
780         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
781         if (rc == -EDQUOT || rc == -EINPROGRESS)
782                 rc = 0;
783         if (rc)
784                 return rc;
785
786         /* and finally less blocks for the current owner */
787         qi->lqi_id.qid_uid = orig_id;
788         qi->lqi_space      = -bspace;
789         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
790         /* can't get EDQUOT when reducing usage */
791         if (rc == -EINPROGRESS)
792                 rc = 0;
793         return rc;
794 }
795
796 static int osd_declare_attr_set(const struct lu_env *env,
797                                 struct dt_object *dt,
798                                 const struct lu_attr *attr,
799                                 struct thandle *handle)
800 {
801         struct osd_thread_info  *info = osd_oti_get(env);
802         char                    *buf = osd_oti_get(env)->oti_str;
803         struct osd_object       *obj = osd_dt_obj(dt);
804         struct osd_device       *osd = osd_obj2dev(obj);
805         struct osd_thandle      *oh;
806         uint64_t                 bspace;
807         uint32_t                 blksize;
808         int                      rc;
809         ENTRY;
810
811         if (!dt_object_exists(dt)) {
812                 /* XXX: sanity check that object creation is declared */
813                 RETURN(0);
814         }
815
816         LASSERT(handle != NULL);
817         LASSERT(osd_invariant(obj));
818
819         oh = container_of0(handle, struct osd_thandle, ot_super);
820
821         LASSERT(obj->oo_sa_hdl != NULL);
822         LASSERT(oh->ot_tx != NULL);
823         dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
824         if (oh->ot_tx->tx_err != 0)
825                 RETURN(-oh->ot_tx->tx_err);
826
827         sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
828         bspace = toqb(bspace * blksize);
829
830         if (attr && attr->la_valid & LA_UID) {
831                 /* account for user inode tracking ZAP update */
832                 dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
833                 dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);
834
835                 /* quota enforcement for user */
836                 if (attr->la_uid != obj->oo_attr.la_uid) {
837                         rc = qsd_transfer(env, osd->od_quota_slave,
838                                           &oh->ot_quota_trans, USRQUOTA,
839                                           obj->oo_attr.la_uid, attr->la_uid,
840                                           bspace, &info->oti_qi);
841                         if (rc)
842                                 RETURN(rc);
843                 }
844         }
845         if (attr && attr->la_valid & LA_GID) {
846                 /* account for user inode tracking ZAP update */
847                 dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
848                 dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);
849
850                 /* quota enforcement for group */
851                 if (attr->la_gid != obj->oo_attr.la_gid) {
852                         rc = qsd_transfer(env, osd->od_quota_slave,
853                                           &oh->ot_quota_trans, GRPQUOTA,
854                                           obj->oo_attr.la_gid, attr->la_gid,
855                                           bspace, &info->oti_qi);
856                         if (rc)
857                                 RETURN(rc);
858                 }
859         }
860
861         RETURN(0);
862 }
863
864 /*
865  * Set the attributes of an object
866  *
867  * The transaction passed to this routine must have
868  * dmu_tx_hold_bonus(tx, oid) called and then assigned
869  * to a transaction group.
870  */
871 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
872                         const struct lu_attr *la, struct thandle *handle)
873 {
874         struct osd_object       *obj = osd_dt_obj(dt);
875         struct osd_device       *osd = osd_obj2dev(obj);
876         struct osd_thandle      *oh;
877         struct osa_attr         *osa = &osd_oti_get(env)->oti_osa;
878         sa_bulk_attr_t          *bulk;
879         __u64                    valid = la->la_valid;
880         int                      cnt;
881         int                      rc = 0;
882
883         ENTRY;
884         LASSERT(handle != NULL);
885         LASSERT(dt_object_exists(dt));
886         LASSERT(osd_invariant(obj));
887         LASSERT(obj->oo_sa_hdl);
888
889         oh = container_of0(handle, struct osd_thandle, ot_super);
890         /* Assert that the transaction has been assigned to a
891            transaction group. */
892         LASSERT(oh->ot_tx->tx_txg != 0);
893
894         /* Only allow set size for regular file */
895         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
896                 valid &= ~(LA_SIZE | LA_BLOCKS);
897
898         if (valid == 0)
899                 RETURN(0);
900
901         OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 10);
902         if (bulk == NULL)
903                 RETURN(-ENOMEM);
904
905         /* do both accounting updates outside oo_attr_lock below */
906         if ((valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) {
907                 /* Update user accounting. Failure isn't fatal, but we still
908                  * log an error message */
909                 rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
910                                         la->la_uid, 1, oh->ot_tx);
911                 if (rc)
912                         CERROR("%s: failed to update accounting ZAP for user "
913                                 "%d (%d)\n", osd->od_svname, la->la_uid, rc);
914                 rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
915                                         obj->oo_attr.la_uid, -1, oh->ot_tx);
916                 if (rc)
917                         CERROR("%s: failed to update accounting ZAP for user "
918                                 "%d (%d)\n", osd->od_svname,
919                                 obj->oo_attr.la_uid, rc);
920         }
921         if ((valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) {
922                 /* Update group accounting. Failure isn't fatal, but we still
923                  * log an error message */
924                 rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
925                                         la->la_gid, 1, oh->ot_tx);
926                 if (rc)
927                         CERROR("%s: failed to update accounting ZAP for user "
928                                 "%d (%d)\n", osd->od_svname, la->la_gid, rc);
929                 rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
930                                         obj->oo_attr.la_gid, -1, oh->ot_tx);
931                 if (rc)
932                         CERROR("%s: failed to update accounting ZAP for user "
933                                 "%d (%d)\n", osd->od_svname,
934                                 obj->oo_attr.la_gid, rc);
935         }
936
937         write_lock(&obj->oo_attr_lock);
938         cnt = 0;
939         if (valid & LA_ATIME) {
940                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
941                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
942                                  osa->atime, 16);
943         }
944         if (valid & LA_MTIME) {
945                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
946                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
947                                  osa->mtime, 16);
948         }
949         if (valid & LA_CTIME) {
950                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
951                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
952                                  osa->ctime, 16);
953         }
954         if (valid & LA_MODE) {
955                 /* mode is stored along with type, so read it first */
956                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
957                         (la->la_mode & ~S_IFMT);
958                 osa->mode = obj->oo_attr.la_mode;
959                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
960                                  &osa->mode, 8);
961         }
962         if (valid & LA_SIZE) {
963                 osa->size = obj->oo_attr.la_size = la->la_size;
964                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
965                                  &osa->size, 8);
966         }
967         if (valid & LA_NLINK) {
968                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
969                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
970                                  &osa->nlink, 8);
971         }
972         if (valid & LA_RDEV) {
973                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
974                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
975                                  &osa->rdev, 8);
976         }
977         if (valid & LA_FLAGS) {
978                 osa->flags = attrs_fs2zfs(la->la_flags);
979                 /* many flags are not supported by zfs, so ensure a good cached
980                  * copy */
981                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
982                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
983                                  &osa->flags, 8);
984         }
985         if (valid & LA_UID) {
986                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
987                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
988                                  &osa->uid, 8);
989         }
990         if (valid & LA_GID) {
991                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
992                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
993                                  &osa->gid, 8);
994         }
995         obj->oo_attr.la_valid |= valid;
996         write_unlock(&obj->oo_attr_lock);
997
998         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
999
1000         OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 10);
1001         RETURN(rc);
1002 }
1003
1004 /*
1005  * Object creation.
1006  *
1007  * XXX temporary solution.
1008  */
1009
1010 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1011                         struct dt_object *parent, struct dt_object *child,
1012                         umode_t child_mode)
1013 {
1014         LASSERT(ah);
1015
1016         ah->dah_parent = parent;
1017         ah->dah_mode = child_mode;
1018 }
1019
1020 static int osd_declare_object_create(const struct lu_env *env,
1021                                      struct dt_object *dt,
1022                                      struct lu_attr *attr,
1023                                      struct dt_allocation_hint *hint,
1024                                      struct dt_object_format *dof,
1025                                      struct thandle *handle)
1026 {
1027         char                    *buf = osd_oti_get(env)->oti_str;
1028         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1029         struct osd_object       *obj = osd_dt_obj(dt);
1030         struct osd_device       *osd = osd_obj2dev(obj);
1031         struct osd_thandle      *oh;
1032         uint64_t                 zapid;
1033         int                      rc;
1034         ENTRY;
1035
1036         LASSERT(dof);
1037
1038         switch (dof->dof_type) {
1039                 case DFT_REGULAR:
1040                 case DFT_SYM:
1041                 case DFT_NODE:
1042                         if (obj->oo_dt.do_body_ops == NULL)
1043                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1044                         break;
1045                 default:
1046                         break;
1047         }
1048
1049         LASSERT(handle != NULL);
1050         oh = container_of0(handle, struct osd_thandle, ot_super);
1051         LASSERT(oh->ot_tx != NULL);
1052
1053         switch (dof->dof_type) {
1054                 case DFT_DIR:
1055                         dt->do_index_ops = &osd_dir_ops;
1056                 case DFT_INDEX:
1057                         /* for zap create */
1058                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, 1, NULL);
1059                         break;
1060                 case DFT_REGULAR:
1061                 case DFT_SYM:
1062                 case DFT_NODE:
1063                         /* first, we'll create new object */
1064                         dmu_tx_hold_bonus(oh->ot_tx, DMU_NEW_OBJECT);
1065                         break;
1066
1067                 default:
1068                         LBUG();
1069                         break;
1070         }
1071
1072         /* and we'll add it to some mapping */
1073         zapid = osd_get_name_n_idx(env, osd, fid, buf);
1074         dmu_tx_hold_bonus(oh->ot_tx, zapid);
1075         dmu_tx_hold_zap(oh->ot_tx, zapid, TRUE, buf);
1076
1077         /* we will also update inode accounting ZAPs */
1078         dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
1079         dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);
1080         dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
1081         dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);
1082
1083         dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
1084
1085         __osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs),
1086                                 XATTR_NAME_LMA, oh);
1087
1088         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
1089                                false, NULL, false);
1090         RETURN(rc);
1091 }
1092
1093 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1094                     uint64_t oid, dmu_tx_t *tx, struct lu_attr *la,
1095                     uint64_t parent)
1096 {
1097         sa_bulk_attr_t  *bulk;
1098         sa_handle_t     *sa_hdl;
1099         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1100         uint64_t         gen;
1101         uint64_t         crtime[2];
1102         timestruc_t      now;
1103         int              cnt;
1104         int              rc;
1105
1106         gethrestime(&now);
1107         gen = dmu_tx_get_txg(tx);
1108
1109         ZFS_TIME_ENCODE(&now, crtime);
1110
1111         osa->atime[0] = la->la_atime;
1112         osa->ctime[0] = la->la_ctime;
1113         osa->mtime[0] = la->la_mtime;
1114         osa->mode = la->la_mode;
1115         osa->uid = la->la_uid;
1116         osa->gid = la->la_gid;
1117         osa->rdev = la->la_rdev;
1118         osa->nlink = la->la_nlink;
1119         osa->flags = attrs_fs2zfs(la->la_flags);
1120         osa->size  = la->la_size;
1121
1122         /* Now add in all of the "SA" attributes */
1123         rc = -sa_handle_get(osd->od_os, oid, NULL, SA_HDL_PRIVATE, &sa_hdl);
1124         if (rc)
1125                 return rc;
1126
1127         OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 13);
1128         if (bulk == NULL) {
1129                 rc = -ENOMEM;
1130                 goto out;
1131         }
1132         /*
1133          * we need to create all SA below upon object create.
1134          *
1135          * XXX The attribute order matters since the accounting callback relies
1136          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1137          * look up the UID/GID attributes. Moreover, the callback does not seem
1138          * to support the spill block.
1139          * We define attributes in the same order as SA_*_OFFSET in order to
1140          * work around the problem. See ORI-610.
1141          */
1142         cnt = 0;
1143         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1144         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1145         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1146         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1147         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1148         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1149         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1150         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1151         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1152         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1153         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
1154         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1155         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1156
1157         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1158
1159         OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 13);
1160 out:
1161         sa_handle_destroy(sa_hdl);
1162         return rc;
1163 }
1164
1165 /*
1166  * The transaction passed to this routine must have
1167  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1168  * to a transaction group.
1169  */
1170 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
1171                         dmu_buf_t **dbp, dmu_tx_t *tx, struct lu_attr *la,
1172                         uint64_t parent)
1173 {
1174         uint64_t             oid;
1175         int                  rc;
1176         struct osd_device   *osd = osd_obj2dev(obj);
1177         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1178         dmu_object_type_t    type = DMU_OT_PLAIN_FILE_CONTENTS;
1179
1180         /* Assert that the transaction has been assigned to a
1181            transaction group. */
1182         LASSERT(tx->tx_txg != 0);
1183
1184         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1185          * would get an additional ditto copy */
1186         if (unlikely(S_ISREG(la->la_mode) &&
1187                      fid_seq_is_local_file(fid_seq(fid))))
1188                 type = DMU_OTN_UINT8_METADATA;
1189
1190         /* Create a new DMU object. */
1191         oid = dmu_object_alloc(osd->od_os, type, 0,
1192                                DMU_OT_SA, DN_MAX_BONUSLEN, tx);
1193         rc = -sa_buf_hold(osd->od_os, oid, osd_obj_tag, dbp);
1194         LASSERTF(rc == 0, "sa_buf_hold "LPU64" failed: %d\n", oid, rc);
1195
1196         LASSERT(la->la_valid & LA_MODE);
1197         la->la_size = 0;
1198         la->la_nlink = 1;
1199
1200         rc = __osd_attr_init(env, osd, oid, tx, la, parent);
1201         if (rc != 0) {
1202                 sa_buf_rele(*dbp, osd_obj_tag);
1203                 *dbp = NULL;
1204                 dmu_object_free(osd->od_os, oid, tx);
1205                 return rc;
1206         }
1207
1208         return 0;
1209 }
1210
1211 /*
1212  * The transaction passed to this routine must have
1213  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1214  * to a transaction group.
1215  *
1216  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1217  * This is fine for directories today, because storing the FID in the dirent
1218  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1219  * then we might need to re-evaluate the use of this flag and instead do
1220  * a conversion from the different internal ZAP hash formats being used. */
1221 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1222                      dmu_buf_t **zap_dbp, dmu_tx_t *tx,
1223                      struct lu_attr *la, uint64_t parent, zap_flags_t flags)
1224 {
1225         uint64_t oid;
1226         int      rc;
1227
1228         /* Assert that the transaction has been assigned to a
1229            transaction group. */
1230         LASSERT(tx->tx_txg != 0);
1231
1232         oid = zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1233                                DMU_OT_DIRECTORY_CONTENTS,
1234                                14, /* == ZFS fzap_default_block_shift */
1235                                DN_MAX_INDBLKSHIFT, /* indirect block shift */
1236                                DMU_OT_SA, DN_MAX_BONUSLEN, tx);
1237
1238         rc = -sa_buf_hold(osd->od_os, oid, osd_obj_tag, zap_dbp);
1239         if (rc)
1240                 return rc;
1241
1242         LASSERT(la->la_valid & LA_MODE);
1243         la->la_size = 2;
1244         la->la_nlink = 1;
1245
1246         return __osd_attr_init(env, osd, oid, tx, la, parent);
1247 }
1248
1249 static dmu_buf_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1250                             struct lu_attr *la, uint64_t parent,
1251                             struct osd_thandle *oh)
1252 {
1253         dmu_buf_t *db;
1254         int        rc;
1255
1256         /* Index file should be created as regular file in order not to confuse
1257          * ZPL which could interpret them as directory.
1258          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1259          * binary keys */
1260         LASSERT(S_ISREG(la->la_mode));
1261         rc = __osd_zap_create(env, osd_obj2dev(obj), &db, oh->ot_tx, la, parent,
1262                               ZAP_FLAG_UINT64_KEY);
1263         if (rc)
1264                 return ERR_PTR(rc);
1265         return db;
1266 }
1267
1268 static dmu_buf_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1269                             struct lu_attr *la, uint64_t parent,
1270                             struct osd_thandle *oh)
1271 {
1272         dmu_buf_t *db;
1273         int        rc;
1274
1275         LASSERT(S_ISDIR(la->la_mode));
1276         rc = __osd_zap_create(env, osd_obj2dev(obj), &db,
1277                               oh->ot_tx, la, parent, 0);
1278         if (rc)
1279                 return ERR_PTR(rc);
1280         return db;
1281 }
1282
1283 static dmu_buf_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1284                             struct lu_attr *la, uint64_t parent,
1285                             struct osd_thandle *oh)
1286 {
1287         dmu_buf_t         *db;
1288         int                rc;
1289         struct osd_device *osd = osd_obj2dev(obj);
1290
1291         LASSERT(S_ISREG(la->la_mode));
1292         rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
1293         if (rc)
1294                 return ERR_PTR(rc);
1295
1296         /*
1297          * XXX: This heuristic is non-optimal.  It would be better to
1298          * increase the blocksize up to osd->od_max_blksz during the write.
1299          * This is exactly how the ZPL behaves and it ensures that the right
1300          * blocksize is selected based on the file size rather than the
1301          * making broad assumptions based on the osd type.
1302          */
1303         if (!lu_device_is_md(osd2lu_dev(osd))) {
1304                 rc = -dmu_object_set_blocksize(osd->od_os, db->db_object,
1305                                                osd->od_max_blksz, 0, oh->ot_tx);
1306                 if (unlikely(rc)) {
1307                         CERROR("%s: can't change blocksize: %d\n",
1308                                osd->od_svname, rc);
1309                         return ERR_PTR(rc);
1310                 }
1311         }
1312
1313         return db;
1314 }
1315
1316 static dmu_buf_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1317                             struct lu_attr *la, uint64_t parent,
1318                             struct osd_thandle *oh)
1319 {
1320         dmu_buf_t *db;
1321         int        rc;
1322
1323         LASSERT(S_ISLNK(la->la_mode));
1324         rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
1325         if (rc)
1326                 return ERR_PTR(rc);
1327         return db;
1328 }
1329
1330 static dmu_buf_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1331                             struct lu_attr *la, uint64_t parent,
1332                             struct osd_thandle *oh)
1333 {
1334         dmu_buf_t *db;
1335         int        rc;
1336
1337         la->la_valid = LA_MODE;
1338         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1339                 la->la_valid |= LA_RDEV;
1340
1341         rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
1342         if (rc)
1343                 return ERR_PTR(rc);
1344         return db;
1345 }
1346
1347 typedef dmu_buf_t *(*osd_obj_type_f)(const struct lu_env *env,
1348                                      struct osd_object *obj,
1349                                      struct lu_attr *la,
1350                                      uint64_t parent,
1351                                      struct osd_thandle *oh);
1352
1353 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1354 {
1355         osd_obj_type_f result;
1356
1357         switch (type) {
1358         case DFT_DIR:
1359                 result = osd_mkdir;
1360                 break;
1361         case DFT_INDEX:
1362                 result = osd_mkidx;
1363                 break;
1364         case DFT_REGULAR:
1365                 result = osd_mkreg;
1366                 break;
1367         case DFT_SYM:
1368                 result = osd_mksym;
1369                 break;
1370         case DFT_NODE:
1371                 result = osd_mknod;
1372                 break;
1373         default:
1374                 LBUG();
1375                 break;
1376         }
1377         return result;
1378 }
1379
1380 /*
1381  * Primitives for directory (i.e. ZAP) handling
1382  */
1383 static inline int osd_init_lma(const struct lu_env *env, struct osd_object *obj,
1384                                const struct lu_fid *fid, struct osd_thandle *oh)
1385 {
1386         struct osd_thread_info  *info = osd_oti_get(env);
1387         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1388         struct lu_buf            buf;
1389         int rc;
1390
1391         lustre_lma_init(lma, fid, 0, 0);
1392         lustre_lma_swab(lma);
1393         buf.lb_buf = lma;
1394         buf.lb_len = sizeof(*lma);
1395
1396         rc = osd_xattr_set_internal(env, obj, &buf, XATTR_NAME_LMA,
1397                                     LU_XATTR_CREATE, oh);
1398
1399         return rc;
1400 }
1401
1402 /*
1403  * Concurrency: @dt is write locked.
1404  */
1405 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1406                              struct lu_attr *attr,
1407                              struct dt_allocation_hint *hint,
1408                              struct dt_object_format *dof,
1409                              struct thandle *th)
1410 {
1411         struct zpl_direntry     *zde = &osd_oti_get(env)->oti_zde.lzd_reg;
1412         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1413         struct osd_object       *obj = osd_dt_obj(dt);
1414         struct osd_device       *osd = osd_obj2dev(obj);
1415         char                    *buf = osd_oti_get(env)->oti_str;
1416         struct osd_thandle      *oh;
1417         dmu_buf_t               *db;
1418         uint64_t                 zapid;
1419         int                      rc;
1420
1421         ENTRY;
1422
1423         /* concurrent create declarations should not see
1424          * the object inconsistent (db, attr, etc).
1425          * in regular cases acquisition should be cheap */
1426         down(&obj->oo_guard);
1427
1428         LASSERT(osd_invariant(obj));
1429         LASSERT(!dt_object_exists(dt));
1430         LASSERT(dof != NULL);
1431
1432         LASSERT(th != NULL);
1433         oh = container_of0(th, struct osd_thandle, ot_super);
1434
1435         /*
1436          * XXX missing: Quote handling.
1437          */
1438
1439         LASSERT(obj->oo_db == NULL);
1440
1441         /* to follow ZFS on-disk format we need
1442          * to initialize parent dnode properly */
1443         zapid = 0;
1444         if (hint != NULL && hint->dah_parent != NULL &&
1445             !dt_object_remote(hint->dah_parent))
1446                 zapid = osd_dt_obj(hint->dah_parent)->oo_db->db_object;
1447
1448         db = osd_create_type_f(dof->dof_type)(env, obj, attr, zapid, oh);
1449         if (IS_ERR(db))
1450                 GOTO(out, rc = PTR_ERR(db));
1451
1452         zde->zde_pad = 0;
1453         zde->zde_dnode = db->db_object;
1454         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1455
1456         zapid = osd_get_name_n_idx(env, osd, fid, buf);
1457
1458         rc = -zap_add(osd->od_os, zapid, buf, 8, 1, zde, oh->ot_tx);
1459         if (rc)
1460                 GOTO(out, rc);
1461
1462         /* Add new object to inode accounting.
1463          * Errors are not considered as fatal */
1464         rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
1465                                 (attr->la_valid & LA_UID) ? attr->la_uid : 0, 1,
1466                                 oh->ot_tx);
1467         if (rc)
1468                 CERROR("%s: failed to add "DFID" to accounting ZAP for usr %d "
1469                         "(%d)\n", osd->od_svname, PFID(fid), attr->la_uid, rc);
1470         rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
1471                                 (attr->la_valid & LA_GID) ? attr->la_gid : 0, 1,
1472                                 oh->ot_tx);
1473         if (rc)
1474                 CERROR("%s: failed to add "DFID" to accounting ZAP for grp %d "
1475                         "(%d)\n", osd->od_svname, PFID(fid), attr->la_gid, rc);
1476
1477         /* configure new osd object */
1478         obj->oo_db = db;
1479         rc = osd_object_init0(env, obj);
1480         LASSERT(ergo(rc == 0, dt_object_exists(dt)));
1481         LASSERT(osd_invariant(obj));
1482
1483         rc = osd_init_lma(env, obj, fid, oh);
1484         if (rc) {
1485                 CERROR("%s: can not set LMA on "DFID": rc = %d\n",
1486                        osd->od_svname, PFID(fid), rc);
1487                 /* ignore errors during LMA initialization */
1488                 rc = 0;
1489         }
1490
1491 out:
1492         up(&obj->oo_guard);
1493         RETURN(rc);
1494 }
1495
1496 static int osd_declare_object_ref_add(const struct lu_env *env,
1497                                       struct dt_object *dt,
1498                                       struct thandle *th)
1499 {
1500         return osd_declare_attr_set(env, dt, NULL, th);
1501 }
1502
1503 /*
1504  * Concurrency: @dt is write locked.
1505  */
1506 static int osd_object_ref_add(const struct lu_env *env,
1507                               struct dt_object *dt,
1508                               struct thandle *handle)
1509 {
1510         struct osd_object       *obj = osd_dt_obj(dt);
1511         struct osd_thandle      *oh;
1512         struct osd_device       *osd = osd_obj2dev(obj);
1513         uint64_t                 nlink;
1514         int rc;
1515
1516         ENTRY;
1517
1518         LASSERT(osd_invariant(obj));
1519         LASSERT(dt_object_exists(dt));
1520         LASSERT(obj->oo_sa_hdl != NULL);
1521
1522         oh = container_of0(handle, struct osd_thandle, ot_super);
1523
1524         write_lock(&obj->oo_attr_lock);
1525         nlink = ++obj->oo_attr.la_nlink;
1526         write_unlock(&obj->oo_attr_lock);
1527
1528         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1529         return rc;
1530 }
1531
1532 static int osd_declare_object_ref_del(const struct lu_env *env,
1533                                       struct dt_object *dt,
1534                                       struct thandle *handle)
1535 {
1536         return osd_declare_attr_set(env, dt, NULL, handle);
1537 }
1538
1539 /*
1540  * Concurrency: @dt is write locked.
1541  */
1542 static int osd_object_ref_del(const struct lu_env *env,
1543                               struct dt_object *dt,
1544                               struct thandle *handle)
1545 {
1546         struct osd_object       *obj = osd_dt_obj(dt);
1547         struct osd_thandle      *oh;
1548         struct osd_device       *osd = osd_obj2dev(obj);
1549         uint64_t                 nlink;
1550         int                      rc;
1551
1552         ENTRY;
1553
1554         LASSERT(osd_invariant(obj));
1555         LASSERT(dt_object_exists(dt));
1556         LASSERT(obj->oo_sa_hdl != NULL);
1557
1558         oh = container_of0(handle, struct osd_thandle, ot_super);
1559         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
1560
1561         write_lock(&obj->oo_attr_lock);
1562         nlink = --obj->oo_attr.la_nlink;
1563         write_unlock(&obj->oo_attr_lock);
1564
1565         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1566         return rc;
1567 }
1568
1569 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
1570                            __u64 start, __u64 end)
1571 {
1572         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1573         ENTRY;
1574
1575         /* XXX: no other option than syncing the whole filesystem until we
1576          * support ZIL.  If the object tracked the txg that it was last
1577          * modified in, it could pass that txg here instead of "0".  Maybe
1578          * the changes are already committed, so no wait is needed at all? */
1579         txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
1580
1581         RETURN(0);
1582 }
1583
1584 static struct dt_object_operations osd_obj_ops = {
1585         .do_read_lock           = osd_object_read_lock,
1586         .do_write_lock          = osd_object_write_lock,
1587         .do_read_unlock         = osd_object_read_unlock,
1588         .do_write_unlock        = osd_object_write_unlock,
1589         .do_write_locked        = osd_object_write_locked,
1590         .do_attr_get            = osd_attr_get,
1591         .do_declare_attr_set    = osd_declare_attr_set,
1592         .do_attr_set            = osd_attr_set,
1593         .do_ah_init             = osd_ah_init,
1594         .do_declare_create      = osd_declare_object_create,
1595         .do_create              = osd_object_create,
1596         .do_declare_destroy     = osd_declare_object_destroy,
1597         .do_destroy             = osd_object_destroy,
1598         .do_index_try           = osd_index_try,
1599         .do_declare_ref_add     = osd_declare_object_ref_add,
1600         .do_ref_add             = osd_object_ref_add,
1601         .do_declare_ref_del     = osd_declare_object_ref_del,
1602         .do_ref_del             = osd_object_ref_del,
1603         .do_xattr_get           = osd_xattr_get,
1604         .do_declare_xattr_set   = osd_declare_xattr_set,
1605         .do_xattr_set           = osd_xattr_set,
1606         .do_declare_xattr_del   = osd_declare_xattr_del,
1607         .do_xattr_del           = osd_xattr_del,
1608         .do_xattr_list          = osd_xattr_list,
1609         .do_object_sync         = osd_object_sync,
1610 };
1611
1612 static struct lu_object_operations osd_lu_obj_ops = {
1613         .loo_object_init        = osd_object_init,
1614         .loo_object_delete      = osd_object_delete,
1615         .loo_object_release     = osd_object_release,
1616         .loo_object_free        = osd_object_free,
1617         .loo_object_print       = osd_object_print,
1618         .loo_object_invariant   = osd_object_invariant,
1619 };
1620
1621 static int osd_otable_it_attr_get(const struct lu_env *env,
1622                                 struct dt_object *dt,
1623                                 struct lu_attr *attr)
1624 {
1625         attr->la_valid = 0;
1626         return 0;
1627 }
1628
1629 static struct dt_object_operations osd_obj_otable_it_ops = {
1630         .do_attr_get    = osd_otable_it_attr_get,
1631         .do_index_try   = osd_index_try,
1632 };