Whamcloud - gitweb
2f1d2f2009abc79fdb6874965cba1a6f79097fe8
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd-zfs/osd_object.c
33  *
34  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35  * Author: Mike Pershin <tappro@whamcloud.com>
36  * Author: Johann Lombardi <johann@whamcloud.com>
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <lustre_ver.h>
42 #include <libcfs/libcfs.h>
43 #include <obd_support.h>
44 #include <lustre_net.h>
45 #include <obd.h>
46 #include <obd_class.h>
47 #include <lustre_disk.h>
48 #include <lustre_fid.h>
49
50 #include "osd_internal.h"
51
52 #include <sys/dnode.h>
53 #include <sys/dbuf.h>
54 #include <sys/spa.h>
55 #include <sys/stat.h>
56 #include <sys/zap.h>
57 #include <sys/spa_impl.h>
58 #include <sys/zfs_znode.h>
59 #include <sys/dmu_tx.h>
60 #include <sys/dmu_objset.h>
61 #include <sys/dsl_prop.h>
62 #include <sys/sa_impl.h>
63 #include <sys/txg.h>
64
65 char *osd_obj_tag = "osd_object";
66
67 static struct dt_object_operations osd_obj_ops;
68 static struct lu_object_operations osd_lu_obj_ops;
69 extern struct dt_body_operations osd_body_ops;
70 static struct dt_object_operations osd_obj_otable_it_ops;
71
72 extern struct kmem_cache *osd_object_kmem;
73
74 static void
75 osd_object_sa_fini(struct osd_object *obj)
76 {
77         if (obj->oo_sa_hdl) {
78                 sa_handle_destroy(obj->oo_sa_hdl);
79                 obj->oo_sa_hdl = NULL;
80         }
81 }
82
83 static int
84 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
85 {
86         int rc;
87
88         LASSERT(obj->oo_sa_hdl == NULL);
89         LASSERT(obj->oo_dn != NULL);
90
91         rc = osd_sa_handle_get(obj);
92         if (rc)
93                 return rc;
94
95         /* Cache the xattr object id, valid for the life of the object */
96         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
97         if (rc == -ENOENT) {
98                 obj->oo_xattr = ZFS_NO_OBJECT;
99                 rc = 0;
100         } else if (rc) {
101                 osd_object_sa_fini(obj);
102         }
103
104         return rc;
105 }
106
107 /*
108  * Add object to list of dirty objects in tx handle.
109  */
110 static void
111 osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
112 {
113         if (!list_empty(&obj->oo_sa_linkage))
114                 return;
115
116         down(&oh->ot_sa_lock);
117         write_lock(&obj->oo_attr_lock);
118         if (likely(list_empty(&obj->oo_sa_linkage)))
119                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
120         write_unlock(&obj->oo_attr_lock);
121         up(&oh->ot_sa_lock);
122 }
123
124 /*
125  * Release spill block dbuf hold for all dirty SAs.
126  */
127 void osd_object_sa_dirty_rele(struct osd_thandle *oh)
128 {
129         struct osd_object *obj;
130
131         down(&oh->ot_sa_lock);
132         while (!list_empty(&oh->ot_sa_list)) {
133                 obj = list_entry(oh->ot_sa_list.next,
134                                  struct osd_object, oo_sa_linkage);
135                 sa_spill_rele(obj->oo_sa_hdl);
136                 write_lock(&obj->oo_attr_lock);
137                 list_del_init(&obj->oo_sa_linkage);
138                 write_unlock(&obj->oo_attr_lock);
139         }
140         up(&oh->ot_sa_lock);
141 }
142
143 /*
144  * Update the SA and add the object to the dirty list.
145  */
146 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
147                          void *buf, uint32_t buflen, struct osd_thandle *oh)
148 {
149         int rc;
150
151         LASSERT(obj->oo_sa_hdl != NULL);
152         LASSERT(oh->ot_tx != NULL);
153
154         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
155         osd_object_sa_dirty_add(obj, oh);
156
157         return rc;
158 }
159
160 /*
161  * Bulk update the SA and add the object to the dirty list.
162  */
163 static int
164 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
165                           int count, struct osd_thandle *oh)
166 {
167         int rc;
168
169         LASSERT(obj->oo_sa_hdl != NULL);
170         LASSERT(oh->ot_tx != NULL);
171
172         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
173         osd_object_sa_dirty_add(obj, oh);
174
175         return rc;
176 }
177
178 /*
179  * Retrieve the attributes of a DMU object
180  */
181 int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
182                           struct osd_object *obj, struct lu_attr *la)
183 {
184         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
185         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
186         int              cnt = 0;
187         int              rc;
188         ENTRY;
189
190         LASSERT(obj->oo_dn != NULL);
191
192         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
193                         LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
194
195         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
196         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
197         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
198         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
199         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
200         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
201         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
202         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
203         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
204         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
205
206         rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
207         if (rc)
208                 GOTO(out_sa, rc);
209
210         la->la_atime = osa->atime[0];
211         la->la_mtime = osa->mtime[0];
212         la->la_ctime = osa->ctime[0];
213         la->la_mode = osa->mode;
214         la->la_uid = osa->uid;
215         la->la_gid = osa->gid;
216         la->la_nlink = osa->nlink;
217         la->la_flags = attrs_zfs2fs(osa->flags);
218         la->la_size = osa->size;
219
220         /* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
221          * flags is stored in LMA, and it is only for orphan directory */
222         if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
223                 struct osd_thread_info *info = osd_oti_get(env);
224                 struct lustre_mdt_attrs *lma;
225                 struct lu_buf buf;
226
227                 lma = (struct lustre_mdt_attrs *)info->oti_buf;
228                 buf.lb_buf = lma;
229                 buf.lb_len = sizeof(info->oti_buf);
230                 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
231                 if (rc > 0) {
232                         rc = 0;
233                         lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
234                         obj->oo_lma_flags =
235                                 lma_to_lustre_flags(lma->lma_incompat);
236
237                 } else if (rc == -ENODATA) {
238                         rc = 0;
239                 }
240         }
241
242         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
243                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
244                 if (rc)
245                         GOTO(out_sa, rc);
246                 la->la_rdev = osa->rdev;
247                 la->la_valid |= LA_RDEV;
248         }
249 out_sa:
250
251         RETURN(rc);
252 }
253
254 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
255 {
256         dmu_buf_t *db;
257         dmu_buf_impl_t *dbi;
258         int rc;
259
260         rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
261         if (rc)
262                 return rc;
263
264         dbi = (dmu_buf_impl_t *)db;
265         DB_DNODE_ENTER(dbi);
266         *dnp = DB_DNODE(dbi);
267         LASSERT(*dnp != NULL);
268
269         return 0;
270 }
271
272 /*
273  * Concurrency: no concurrent access is possible that early in object
274  * life-cycle.
275  */
276 struct lu_object *osd_object_alloc(const struct lu_env *env,
277                                    const struct lu_object_header *hdr,
278                                    struct lu_device *d)
279 {
280         struct osd_object *mo;
281
282         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
283         if (mo != NULL) {
284                 struct lu_object *l;
285
286                 l = &mo->oo_dt.do_lu;
287                 dt_object_init(&mo->oo_dt, NULL, d);
288                 mo->oo_dt.do_ops = &osd_obj_ops;
289                 l->lo_ops = &osd_lu_obj_ops;
290                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
291                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
292                 init_rwsem(&mo->oo_sem);
293                 init_rwsem(&mo->oo_guard);
294                 rwlock_init(&mo->oo_attr_lock);
295                 mo->oo_destroy = OSD_DESTROY_NONE;
296                 return l;
297         } else {
298                 return NULL;
299         }
300 }
301
302 /*
303  * Concurrency: shouldn't matter.
304  */
305 int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
306 {
307         struct osd_device       *osd = osd_obj2dev(obj);
308         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
309         int                      rc = 0;
310         ENTRY;
311
312         if (obj->oo_dn == NULL)
313                 RETURN(0);
314
315         /* object exist */
316
317         rc = osd_object_sa_init(obj, osd);
318         if (rc)
319                 RETURN(rc);
320
321         /* cache attrs in object */
322         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
323         if (rc)
324                 RETURN(rc);
325
326         if (likely(!fid_is_acct(fid)))
327                 /* no body operations for accounting objects */
328                 obj->oo_dt.do_body_ops = &osd_body_ops;
329
330         /*
331          * initialize object before marking it existing
332          */
333         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
334
335         smp_mb();
336         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
337
338         RETURN(0);
339 }
340
341 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
342 {
343         struct osd_thread_info  *info = osd_oti_get(env);
344         struct lu_buf           buf;
345         int                     rc;
346         struct lustre_mdt_attrs *lma;
347         ENTRY;
348
349         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
350         lma = (struct lustre_mdt_attrs *)info->oti_buf;
351         buf.lb_buf = lma;
352         buf.lb_len = sizeof(info->oti_buf);
353
354         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
355         if (rc > 0) {
356                 rc = 0;
357                 lustre_lma_swab(lma);
358                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
359                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
360                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
361                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
362                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
363                               PFID(lu_object_fid(&obj->oo_dt.do_lu)));
364                         rc = -EOPNOTSUPP;
365                 }
366         } else if (rc == -ENODATA) {
367                 /* haven't initialize LMA xattr */
368                 rc = 0;
369         }
370
371         RETURN(rc);
372 }
373
374 /*
375  * Concurrency: no concurrent access is possible that early in object
376  * life-cycle.
377  */
378 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
379                            const struct lu_object_conf *conf)
380 {
381         struct osd_object       *obj = osd_obj(l);
382         struct osd_device       *osd = osd_obj2dev(obj);
383         uint64_t                 oid;
384         int                      rc;
385         ENTRY;
386
387         LASSERT(osd_invariant(obj));
388
389         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
390                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
391                 l->lo_header->loh_attr |= LOHA_EXISTS;
392                 RETURN(0);
393         }
394
395         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
396                 GOTO(out, rc = 0);
397
398         rc = osd_fid_lookup(env, osd, lu_object_fid(l), &oid);
399         if (rc == 0) {
400                 LASSERT(obj->oo_dn == NULL);
401                 rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
402                 /* EEXIST will be returned if object is being deleted in ZFS */
403                 if (rc == -EEXIST) {
404                         rc = 0;
405                         GOTO(out, rc);
406                 }
407                 if (rc != 0) {
408                         CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
409                                osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
410                         GOTO(out, rc);
411                 }
412                 LASSERT(obj->oo_dn);
413                 rc = osd_object_init0(env, obj);
414                 if (rc != 0)
415                         GOTO(out, rc);
416
417                 rc = osd_check_lma(env, obj);
418                 if (rc != 0)
419                         GOTO(out, rc);
420         } else if (rc == -ENOENT) {
421                 rc = 0;
422         }
423         LASSERT(osd_invariant(obj));
424 out:
425         RETURN(rc);
426 }
427
428 /*
429  * Concurrency: no concurrent access is possible that late in object
430  * life-cycle.
431  */
432 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
433 {
434         struct osd_object *obj = osd_obj(l);
435
436         LASSERT(osd_invariant(obj));
437
438         dt_object_fini(&obj->oo_dt);
439         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
440 }
441
442 static int
443 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
444 {
445         int rc = -EBUSY;
446
447         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
448
449         /* the object is supposed to be exclusively locked by
450          * the caller (osd_destroy()), while the transaction
451          * (oh) is per-thread and not shared */
452         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
453                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
454                 rc = 0;
455         }
456
457         return rc;
458 }
459
460 /* Default to max data size covered by a level-1 indirect block */
461 static unsigned long osd_sync_destroy_max_size =
462         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
463 module_param(osd_sync_destroy_max_size, ulong, 0444);
464 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
465
466 static inline void
467 osd_object_set_destroy_type(struct osd_object *obj)
468 {
469         /*
470          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
471          * only once and use it consistently thereafter.
472          */
473         down_write(&obj->oo_guard);
474         if (obj->oo_destroy == OSD_DESTROY_NONE) {
475                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
476                         obj->oo_destroy = OSD_DESTROY_SYNC;
477                 else /* Larger objects are destroyed asynchronously */
478                         obj->oo_destroy = OSD_DESTROY_ASYNC;
479         }
480         up_write(&obj->oo_guard);
481 }
482
483 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
484                                struct thandle *th)
485 {
486         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
487         struct osd_object       *obj = osd_dt_obj(dt);
488         struct osd_device       *osd = osd_obj2dev(obj);
489         struct osd_thandle      *oh;
490         dnode_t *dn;
491         int                      rc;
492         uint64_t                 zapid;
493         ENTRY;
494
495         LASSERT(th != NULL);
496         LASSERT(dt_object_exists(dt));
497
498         oh = container_of0(th, struct osd_thandle, ot_super);
499         LASSERT(oh->ot_tx != NULL);
500
501         /* declare that we'll remove object from fid-dnode mapping */
502         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
503         osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
504
505         osd_declare_xattrs_destroy(env, obj, oh);
506
507         /* one less inode */
508         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
509                                obj->oo_attr.la_gid, -1, oh, false, NULL, false);
510         if (rc)
511                 RETURN(rc);
512
513         /* data to be truncated */
514         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
515                                obj->oo_attr.la_gid, 0, oh, true, NULL, false);
516         if (rc)
517                 RETURN(rc);
518
519         osd_object_set_destroy_type(obj);
520         if (obj->oo_destroy == OSD_DESTROY_SYNC)
521                 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
522                                  0, DMU_OBJECT_END);
523         else
524                 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
525                                 osd->od_unlinked, TRUE, NULL);
526
527         /* will help to find FID->ino when this object is being
528          * added to PENDING/ */
529         osd_idc_find_and_init(env, osd, obj);
530
531         RETURN(0);
532 }
533
534 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
535                        struct thandle *th)
536 {
537         struct osd_thread_info  *info = osd_oti_get(env);
538         char                    *buf = info->oti_str;
539         struct osd_object       *obj = osd_dt_obj(dt);
540         struct osd_device       *osd = osd_obj2dev(obj);
541         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
542         struct osd_thandle      *oh;
543         int                      rc;
544         uint64_t                 oid, zapid;
545         dnode_t *zdn;
546         ENTRY;
547
548         down_write(&obj->oo_guard);
549
550         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
551                 GOTO(out, rc = -ENOENT);
552
553         LASSERT(obj->oo_dn != NULL);
554
555         oh = container_of0(th, struct osd_thandle, ot_super);
556         LASSERT(oh != NULL);
557         LASSERT(oh->ot_tx != NULL);
558
559         /* remove obj ref from index dir (it depends) */
560         zapid = osd_get_name_n_idx(env, osd, fid, buf,
561                                    sizeof(info->oti_str), &zdn);
562         rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
563         if (rc) {
564                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
565                        osd->od_svname, buf, rc);
566                 GOTO(out, rc);
567         }
568
569         rc = osd_xattrs_destroy(env, obj, oh);
570         if (rc) {
571                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
572                        osd->od_svname, buf, rc);
573                 GOTO(out, rc);
574         }
575
576         oid = obj->oo_dn->dn_object;
577         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
578                 /* this may happen if the destroy wasn't declared
579                  * e.g. when the object is created and then destroyed
580                  * in the same transaction - we don't need additional
581                  * space for destroy specifically */
582                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
583                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
584                 if (rc)
585                         CERROR("%s: failed to free %s %llu: rc = %d\n",
586                                osd->od_svname, buf, oid, rc);
587         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
588                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
589                 if (rc)
590                         CERROR("%s: failed to free %s %llu: rc = %d\n",
591                                osd->od_svname, buf, oid, rc);
592         } else { /* asynchronous destroy */
593                 char *key = info->oti_key;
594
595                 rc = osd_object_unlinked_add(obj, oh);
596                 if (rc)
597                         GOTO(out, rc);
598
599                 snprintf(key, sizeof(info->oti_key), "%llx", oid);
600                 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
601                                  osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
602                 if (rc)
603                         CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
604                                osd->od_svname, buf, oid, rc);
605         }
606
607 out:
608         /* not needed in the cache anymore */
609         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
610         if (rc == 0)
611                 obj->oo_destroyed = 1;
612         up_write(&obj->oo_guard);
613         RETURN (0);
614 }
615
616 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
617 {
618         struct osd_object *obj = osd_obj(l);
619
620         if (obj->oo_dn != NULL) {
621                 osd_object_sa_fini(obj);
622                 if (obj->oo_sa_xattr) {
623                         nvlist_free(obj->oo_sa_xattr);
624                         obj->oo_sa_xattr = NULL;
625                 }
626                 osd_dnode_rele(obj->oo_dn);
627                 list_del(&obj->oo_sa_linkage);
628                 obj->oo_dn = NULL;
629         }
630 }
631
632 /*
633  * Concurrency: ->loo_object_release() is called under site spin-lock.
634  */
635 static void osd_object_release(const struct lu_env *env,
636                                struct lu_object *l)
637 {
638 }
639
640 /*
641  * Concurrency: shouldn't matter.
642  */
643 static int osd_object_print(const struct lu_env *env, void *cookie,
644                             lu_printer_t p, const struct lu_object *l)
645 {
646         struct osd_object *o = osd_obj(l);
647
648         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
649 }
650
651 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
652                           unsigned role)
653 {
654         struct osd_object *obj = osd_dt_obj(dt);
655
656         LASSERT(osd_invariant(obj));
657
658         down_read_nested(&obj->oo_sem, role);
659 }
660
661 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
662                            unsigned role)
663 {
664         struct osd_object *obj = osd_dt_obj(dt);
665
666         LASSERT(osd_invariant(obj));
667
668         down_write_nested(&obj->oo_sem, role);
669 }
670
671 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
672 {
673         struct osd_object *obj = osd_dt_obj(dt);
674
675         LASSERT(osd_invariant(obj));
676         up_read(&obj->oo_sem);
677 }
678
679 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
680 {
681         struct osd_object *obj = osd_dt_obj(dt);
682
683         LASSERT(osd_invariant(obj));
684         up_write(&obj->oo_sem);
685 }
686
687 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
688 {
689         struct osd_object *obj = osd_dt_obj(dt);
690         int rc = 1;
691
692         LASSERT(osd_invariant(obj));
693
694         if (down_write_trylock(&obj->oo_sem)) {
695                 rc = 0;
696                 up_write(&obj->oo_sem);
697         }
698         return rc;
699 }
700
701 static int osd_attr_get(const struct lu_env *env,
702                         struct dt_object *dt,
703                         struct lu_attr *attr)
704 {
705         struct osd_object       *obj = osd_dt_obj(dt);
706         uint64_t                 blocks;
707         uint32_t                 blksize;
708         int                      rc = 0;
709
710         down_read(&obj->oo_guard);
711
712         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
713                 GOTO(out, rc = -ENOENT);
714
715         LASSERT(osd_invariant(obj));
716         LASSERT(obj->oo_dn);
717
718         read_lock(&obj->oo_attr_lock);
719         *attr = obj->oo_attr;
720         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
721                 attr->la_flags |= LUSTRE_ORPHAN_FL;
722         read_unlock(&obj->oo_attr_lock);
723
724         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
725          * from within sa_object_size() can block on a mutex, so
726          * we can't call sa_object_size() holding rwlock */
727         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
728         /* we do not control size of indices, so always calculate
729          * it from number of blocks reported by DMU */
730         if (S_ISDIR(attr->la_mode))
731                 attr->la_size = 512 * blocks;
732         /* Block size may be not set; suggest maximal I/O transfers. */
733         if (blksize == 0)
734                 blksize = osd_spa_maxblocksize(
735                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
736
737         attr->la_blksize = blksize;
738         attr->la_blocks = blocks;
739         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
740
741 out:
742         up_read(&obj->oo_guard);
743         return rc;
744 }
745
746 /* Simple wrapper on top of qsd API which implement quota transfer for osd
747  * setattr needs. As a reminder, only the root user can change ownership of
748  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
749 static inline int qsd_transfer(const struct lu_env *env,
750                                struct qsd_instance *qsd,
751                                struct lquota_trans *trans, int qtype,
752                                __u64 orig_id, __u64 new_id, __u64 bspace,
753                                struct lquota_id_info *qi)
754 {
755         int     rc;
756
757         if (unlikely(qsd == NULL))
758                 return 0;
759
760         LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
761         qi->lqi_type = qtype;
762
763         /* inode accounting */
764         qi->lqi_is_blk = false;
765
766         /* one more inode for the new owner ... */
767         qi->lqi_id.qid_uid = new_id;
768         qi->lqi_space      = 1;
769         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
770         if (rc == -EDQUOT || rc == -EINPROGRESS)
771                 rc = 0;
772         if (rc)
773                 return rc;
774
775         /* and one less inode for the current id */
776         qi->lqi_id.qid_uid = orig_id;;
777         qi->lqi_space      = -1;
778         /* can't get EDQUOT when reducing usage */
779         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
780         if (rc == -EINPROGRESS)
781                 rc = 0;
782         if (rc)
783                 return rc;
784
785         /* block accounting */
786         qi->lqi_is_blk = true;
787
788         /* more blocks for the new owner ... */
789         qi->lqi_id.qid_uid = new_id;
790         qi->lqi_space      = bspace;
791         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
792         if (rc == -EDQUOT || rc == -EINPROGRESS)
793                 rc = 0;
794         if (rc)
795                 return rc;
796
797         /* and finally less blocks for the current owner */
798         qi->lqi_id.qid_uid = orig_id;
799         qi->lqi_space      = -bspace;
800         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
801         /* can't get EDQUOT when reducing usage */
802         if (rc == -EINPROGRESS)
803                 rc = 0;
804         return rc;
805 }
806
807 static int osd_declare_attr_set(const struct lu_env *env,
808                                 struct dt_object *dt,
809                                 const struct lu_attr *attr,
810                                 struct thandle *handle)
811 {
812         struct osd_thread_info  *info = osd_oti_get(env);
813         struct osd_object       *obj = osd_dt_obj(dt);
814         struct osd_device       *osd = osd_obj2dev(obj);
815         dmu_tx_hold_t           *txh;
816         struct osd_thandle      *oh;
817         uint64_t                 bspace;
818         uint32_t                 blksize;
819         int                      rc = 0;
820         bool                     found;
821         ENTRY;
822
823
824         LASSERT(handle != NULL);
825         LASSERT(osd_invariant(obj));
826
827         oh = container_of0(handle, struct osd_thandle, ot_super);
828
829         down_read(&obj->oo_guard);
830         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
831                 GOTO(out, rc = 0);
832
833         LASSERT(obj->oo_sa_hdl != NULL);
834         LASSERT(oh->ot_tx != NULL);
835         /* regular attributes are part of the bonus buffer */
836         /* let's check whether this object is already part of
837          * transaction.. */
838         found = false;
839         for (txh = list_head(&oh->ot_tx->tx_holds); txh;
840              txh = list_next(&oh->ot_tx->tx_holds, txh)) {
841                 if (txh->txh_dnode == NULL)
842                         continue;
843                 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
844                         continue;
845                 /* this object is part of the transaction already
846                  * we don't need to declare bonus again */
847                 found = true;
848                 break;
849         }
850         if (!found)
851                 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
852         if (oh->ot_tx->tx_err != 0)
853                 GOTO(out, rc = -oh->ot_tx->tx_err);
854
855         if (attr && attr->la_valid & LA_FLAGS) {
856                 /* LMA is usually a part of bonus, no need to declare
857                  * anything else */
858         }
859
860         if (attr && (attr->la_valid & (LA_UID | LA_GID))) {
861                 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
862                 bspace = toqb(bspace * blksize);
863         }
864
865         if (attr && attr->la_valid & LA_UID) {
866                 /* quota enforcement for user */
867                 if (attr->la_uid != obj->oo_attr.la_uid) {
868                         rc = qsd_transfer(env, osd->od_quota_slave,
869                                           &oh->ot_quota_trans, USRQUOTA,
870                                           obj->oo_attr.la_uid, attr->la_uid,
871                                           bspace, &info->oti_qi);
872                         if (rc)
873                                 GOTO(out, rc);
874                 }
875         }
876         if (attr && attr->la_valid & LA_GID) {
877                 /* quota enforcement for group */
878                 if (attr->la_gid != obj->oo_attr.la_gid) {
879                         rc = qsd_transfer(env, osd->od_quota_slave,
880                                           &oh->ot_quota_trans, GRPQUOTA,
881                                           obj->oo_attr.la_gid, attr->la_gid,
882                                           bspace, &info->oti_qi);
883                         if (rc)
884                                 GOTO(out, rc);
885                 }
886         }
887
888 out:
889         up_read(&obj->oo_guard);
890         RETURN(rc);
891 }
892
893 /*
894  * Set the attributes of an object
895  *
896  * The transaction passed to this routine must have
897  * dmu_tx_hold_bonus(tx, oid) called and then assigned
898  * to a transaction group.
899  */
900 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
901                         const struct lu_attr *la, struct thandle *handle)
902 {
903         struct osd_thread_info  *info = osd_oti_get(env);
904         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
905         struct osd_object       *obj = osd_dt_obj(dt);
906         struct osd_device       *osd = osd_obj2dev(obj);
907         struct osd_thandle      *oh;
908         struct osa_attr         *osa = &info->oti_osa;
909         __u64                    valid = la->la_valid;
910         int                      cnt;
911         int                      rc = 0;
912
913         ENTRY;
914
915         down_read(&obj->oo_guard);
916         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
917                 GOTO(out, rc = -ENOENT);
918
919         LASSERT(handle != NULL);
920         LASSERT(osd_invariant(obj));
921         LASSERT(obj->oo_sa_hdl);
922
923         oh = container_of0(handle, struct osd_thandle, ot_super);
924         /* Assert that the transaction has been assigned to a
925            transaction group. */
926         LASSERT(oh->ot_tx->tx_txg != 0);
927
928         /* Only allow set size for regular file */
929         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
930                 valid &= ~(LA_SIZE | LA_BLOCKS);
931
932         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
933                 valid &= ~LA_CTIME;
934
935         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
936                 valid &= ~LA_MTIME;
937
938         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
939                 valid &= ~LA_ATIME;
940
941         if (valid == 0)
942                 GOTO(out, rc = 0);
943
944         if (valid & LA_FLAGS) {
945                 struct lustre_mdt_attrs *lma;
946                 struct lu_buf buf;
947
948                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
949                         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
950                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
951                         buf.lb_buf = lma;
952                         buf.lb_len = sizeof(info->oti_buf);
953                         rc = osd_xattr_get(env, &obj->oo_dt, &buf,
954                                            XATTR_NAME_LMA);
955                         if (rc > 0) {
956                                 lma->lma_incompat =
957                                         le32_to_cpu(lma->lma_incompat);
958                                 lma->lma_incompat |=
959                                         lustre_to_lma_flags(la->la_flags);
960                                 lma->lma_incompat =
961                                         cpu_to_le32(lma->lma_incompat);
962                                 buf.lb_buf = lma;
963                                 buf.lb_len = sizeof(*lma);
964                                 rc = osd_xattr_set_internal(env, obj, &buf,
965                                                             XATTR_NAME_LMA,
966                                                             LU_XATTR_REPLACE,
967                                                             oh);
968                         }
969                         if (rc < 0) {
970                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
971                                        osd->od_svname, rc);
972                                 RETURN(rc);
973                         }
974                 }
975         }
976
977         write_lock(&obj->oo_attr_lock);
978         cnt = 0;
979         if (valid & LA_ATIME) {
980                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
981                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
982                                  osa->atime, 16);
983         }
984         if (valid & LA_MTIME) {
985                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
986                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
987                                  osa->mtime, 16);
988         }
989         if (valid & LA_CTIME) {
990                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
991                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
992                                  osa->ctime, 16);
993         }
994         if (valid & LA_MODE) {
995                 /* mode is stored along with type, so read it first */
996                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
997                         (la->la_mode & ~S_IFMT);
998                 osa->mode = obj->oo_attr.la_mode;
999                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1000                                  &osa->mode, 8);
1001         }
1002         if (valid & LA_SIZE) {
1003                 osa->size = obj->oo_attr.la_size = la->la_size;
1004                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1005                                  &osa->size, 8);
1006         }
1007         if (valid & LA_NLINK) {
1008                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1009                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1010                                  &osa->nlink, 8);
1011         }
1012         if (valid & LA_RDEV) {
1013                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1014                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1015                                  &osa->rdev, 8);
1016         }
1017         if (valid & LA_FLAGS) {
1018                 osa->flags = attrs_fs2zfs(la->la_flags);
1019                 /* many flags are not supported by zfs, so ensure a good cached
1020                  * copy */
1021                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1022                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1023                                  &osa->flags, 8);
1024         }
1025         if (valid & LA_UID) {
1026                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1027                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1028                                  &osa->uid, 8);
1029         }
1030         if (valid & LA_GID) {
1031                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1032                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1033                                  &osa->gid, 8);
1034         }
1035         obj->oo_attr.la_valid |= valid;
1036         write_unlock(&obj->oo_attr_lock);
1037
1038         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1039         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1040
1041 out:
1042         up_read(&obj->oo_guard);
1043         RETURN(rc);
1044 }
1045
1046 /*
1047  * Object creation.
1048  *
1049  * XXX temporary solution.
1050  */
1051
1052 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1053                         struct dt_object *parent, struct dt_object *child,
1054                         umode_t child_mode)
1055 {
1056         LASSERT(ah);
1057
1058         ah->dah_parent = parent;
1059         ah->dah_mode = child_mode;
1060
1061         if (parent != NULL && !dt_object_remote(parent)) {
1062                 /* will help to find FID->ino at dt_insert("..") */
1063                 struct osd_object *pobj = osd_dt_obj(parent);
1064
1065                 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1066         }
1067 }
1068
1069 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1070                               struct lu_attr *attr,
1071                               struct dt_allocation_hint *hint,
1072                               struct dt_object_format *dof,
1073                               struct thandle *handle)
1074 {
1075         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1076         struct osd_object       *obj = osd_dt_obj(dt);
1077         struct osd_device       *osd = osd_obj2dev(obj);
1078         struct osd_thandle      *oh;
1079         uint64_t                 zapid;
1080         dnode_t                 *dn;
1081         int                      rc, dnode_size;
1082         ENTRY;
1083
1084         LASSERT(dof);
1085
1086         switch (dof->dof_type) {
1087                 case DFT_REGULAR:
1088                 case DFT_SYM:
1089                 case DFT_NODE:
1090                         if (obj->oo_dt.do_body_ops == NULL)
1091                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1092                         break;
1093                 default:
1094                         break;
1095         }
1096
1097         LASSERT(handle != NULL);
1098         oh = container_of0(handle, struct osd_thandle, ot_super);
1099         LASSERT(oh->ot_tx != NULL);
1100
1101         /* this is the minimum set of EAs on every Lustre object */
1102         obj->oo_ea_in_bonus = ZFS_SA_BASE_ATTR_SIZE +
1103                                 sizeof(__u64) + /* VBR VERSION */
1104                                 sizeof(struct lustre_mdt_attrs); /* LMA */
1105         /* reserve 32 bytes for extra stuff like ACLs */
1106         dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1107
1108         switch (dof->dof_type) {
1109                 case DFT_DIR:
1110                         dt->do_index_ops = &osd_dir_ops;
1111                 case DFT_INDEX:
1112                         /* for zap create */
1113                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1114                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1115                         break;
1116                 case DFT_REGULAR:
1117                 case DFT_SYM:
1118                 case DFT_NODE:
1119                         /* first, we'll create new object */
1120                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1121                         break;
1122
1123                 default:
1124                         LBUG();
1125                         break;
1126         }
1127
1128         /* and we'll add it to some mapping */
1129         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1130         osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1131
1132         /* will help to find FID->ino mapping at dt_insert() */
1133         osd_idc_find_and_init(env, osd, obj);
1134
1135         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
1136                                false, NULL, false);
1137
1138         RETURN(rc);
1139 }
1140
1141 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1142                     sa_handle_t *sa_hdl, dmu_tx_t *tx,
1143                     struct lu_attr *la, uint64_t parent)
1144 {
1145         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
1146         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1147         uint64_t         gen;
1148         uint64_t         crtime[2];
1149         timestruc_t      now;
1150         int              cnt;
1151         int              rc;
1152
1153         LASSERT(sa_hdl);
1154
1155         gen = dmu_tx_get_txg(tx);
1156         gethrestime(&now);
1157         ZFS_TIME_ENCODE(&now, crtime);
1158
1159         osa->atime[0] = la->la_atime;
1160         osa->ctime[0] = la->la_ctime;
1161         osa->mtime[0] = la->la_mtime;
1162         osa->mode = la->la_mode;
1163         osa->uid = la->la_uid;
1164         osa->gid = la->la_gid;
1165         osa->rdev = la->la_rdev;
1166         osa->nlink = la->la_nlink;
1167         osa->flags = attrs_fs2zfs(la->la_flags);
1168         osa->size  = la->la_size;
1169
1170         /*
1171          * we need to create all SA below upon object create.
1172          *
1173          * XXX The attribute order matters since the accounting callback relies
1174          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1175          * look up the UID/GID attributes. Moreover, the callback does not seem
1176          * to support the spill block.
1177          * We define attributes in the same order as SA_*_OFFSET in order to
1178          * work around the problem. See ORI-610.
1179          */
1180         cnt = 0;
1181         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1182         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1183         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1184         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1185         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1186         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1187         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1188         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1189         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1190         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1191         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
1192         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1193         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1194         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1195
1196         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1197
1198         return rc;
1199 }
1200
1201 static int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1202                               uint64_t oid, dnode_t **dnp)
1203 {
1204         dmu_tx_hold_t *txh;
1205         int rc = 0;
1206
1207         /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1208         for (txh = list_tail(&tx->tx_holds); txh;
1209              txh = list_prev(&tx->tx_holds, txh)) {
1210                 dnode_t *dn = txh->txh_dnode;
1211                 dmu_buf_impl_t *db;
1212
1213                 if (dn == NULL)
1214                         continue;
1215                 if (dn->dn_object != oid)
1216                         continue;
1217                 db = dn->dn_bonus;
1218                 if (db == NULL) {
1219                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1220                         if (dn->dn_bonus == NULL)
1221                                 dbuf_create_bonus(dn);
1222                         rw_exit(&dn->dn_struct_rwlock);
1223                 }
1224                 db = dn->dn_bonus;
1225                 LASSERT(db);
1226                 LASSERT(dn->dn_handle);
1227                 DB_DNODE_ENTER(db);
1228                 if (refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1229                         refcount_add(&dn->dn_holds, tag);
1230                         atomic_inc_32(&dn->dn_dbufs_count);
1231                 }
1232                 *dnp = dn;
1233                 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1234                 break;
1235         }
1236
1237         if (unlikely(*dnp == NULL))
1238                 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1239
1240         return rc;
1241 }
1242
1243 /*
1244  * The transaction passed to this routine must have
1245  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1246  * to a transaction group.
1247  */
1248 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
1249                         dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1250 {
1251         struct osd_device   *osd = osd_obj2dev(obj);
1252         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1253         dmu_object_type_t    type = DMU_OT_PLAIN_FILE_CONTENTS;
1254         uint64_t oid;
1255
1256         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1257          * would get an additional ditto copy */
1258         if (unlikely(S_ISREG(la->la_mode) &&
1259                      fid_seq_is_local_file(fid_seq(fid))))
1260                 type = DMU_OTN_UINT8_METADATA;
1261
1262         /* Create a new DMU object using the default dnode size. */
1263         oid = osd_dmu_object_alloc(osd->od_os, type, 0, 0, tx);
1264
1265         LASSERT(la->la_valid & LA_MODE);
1266         la->la_size = 0;
1267         la->la_nlink = 1;
1268
1269         return osd_find_new_dnode(env, tx, oid, dnp);
1270 }
1271
1272 /*
1273  * The transaction passed to this routine must have
1274  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1275  * to a transaction group.
1276  *
1277  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1278  * This is fine for directories today, because storing the FID in the dirent
1279  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1280  * then we might need to re-evaluate the use of this flag and instead do
1281  * a conversion from the different internal ZAP hash formats being used. */
1282 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1283                      dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1284                      zap_flags_t flags)
1285 {
1286         uint64_t oid;
1287
1288         /* Assert that the transaction has been assigned to a
1289            transaction group. */
1290         LASSERT(tx->tx_txg != 0);
1291         *dnp = NULL;
1292
1293         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1294                                    DMU_OT_DIRECTORY_CONTENTS,
1295                                    14, /* == ZFS fzap_default_blockshift */
1296                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1297                                    0, tx);
1298
1299         la->la_size = 2;
1300         la->la_nlink = 1;
1301
1302         return osd_find_new_dnode(env, tx, oid, dnp);
1303 }
1304
1305 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1306                           struct lu_attr *la, struct osd_thandle *oh)
1307 {
1308         dnode_t *dn;
1309         int rc;
1310
1311         /* Index file should be created as regular file in order not to confuse
1312          * ZPL which could interpret them as directory.
1313          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1314          * binary keys */
1315         LASSERT(S_ISREG(la->la_mode));
1316         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la,
1317                               ZAP_FLAG_UINT64_KEY);
1318         if (rc)
1319                 return ERR_PTR(rc);
1320         return dn;
1321 }
1322
1323 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1324                           struct lu_attr *la, struct osd_thandle *oh)
1325 {
1326         dnode_t *dn;
1327         int rc;
1328
1329         LASSERT(S_ISDIR(la->la_mode));
1330         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la, 0);
1331         if (rc)
1332                 return ERR_PTR(rc);
1333         return dn;
1334 }
1335
1336 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1337                           struct lu_attr *la, struct osd_thandle *oh)
1338 {
1339         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1340         struct osd_device *osd = osd_obj2dev(obj);
1341         dnode_t *dn;
1342         int rc;
1343
1344         LASSERT(S_ISREG(la->la_mode));
1345         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1346         if (rc)
1347                 return ERR_PTR(rc);
1348
1349         if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid)) &&
1350             osd->od_is_ost) {
1351                 /* The minimum block size must be at least page size otherwise
1352                  * it will break the assumption in tgt_thread_big_cache where
1353                  * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1354                  * RDMA due to subpage transfer size */
1355                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1356                                                PAGE_SIZE, 0, oh->ot_tx);
1357                 if (unlikely(rc)) {
1358                         CERROR("%s: can't change blocksize: %d\n",
1359                                osd->od_svname, rc);
1360                         return ERR_PTR(rc);
1361                 }
1362         }
1363
1364         return dn;
1365 }
1366
1367 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1368                           struct lu_attr *la, struct osd_thandle *oh)
1369 {
1370         dnode_t *dn;
1371         int rc;
1372
1373         LASSERT(S_ISLNK(la->la_mode));
1374         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1375         if (rc)
1376                 return ERR_PTR(rc);
1377         return dn;
1378 }
1379
1380 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1381                           struct lu_attr *la, struct osd_thandle *oh)
1382 {
1383         dnode_t *dn;
1384         int rc;
1385
1386         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1387                 la->la_valid |= LA_RDEV;
1388
1389         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1390         if (rc)
1391                 return ERR_PTR(rc);
1392         return dn;
1393 }
1394
1395 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1396                                    struct osd_object *obj,
1397                                    struct lu_attr *la,
1398                                    struct osd_thandle *oh);
1399
1400 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1401 {
1402         osd_obj_type_f result;
1403
1404         switch (type) {
1405         case DFT_DIR:
1406                 result = osd_mkdir;
1407                 break;
1408         case DFT_INDEX:
1409                 result = osd_mkidx;
1410                 break;
1411         case DFT_REGULAR:
1412                 result = osd_mkreg;
1413                 break;
1414         case DFT_SYM:
1415                 result = osd_mksym;
1416                 break;
1417         case DFT_NODE:
1418                 result = osd_mknod;
1419                 break;
1420         default:
1421                 LBUG();
1422                 break;
1423         }
1424         return result;
1425 }
1426
1427 /*
1428  * Concurrency: @dt is write locked.
1429  */
1430 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1431                       struct lu_attr *attr, struct dt_allocation_hint *hint,
1432                       struct dt_object_format *dof, struct thandle *th)
1433 {
1434         struct osd_thread_info  *info = osd_oti_get(env);
1435         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1436         struct zpl_direntry     *zde = &info->oti_zde.lzd_reg;
1437         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1438         struct osd_object       *obj = osd_dt_obj(dt);
1439         struct osd_device       *osd = osd_obj2dev(obj);
1440         char                    *buf = info->oti_str;
1441         struct osd_thandle      *oh;
1442         dnode_t *dn = NULL, *zdn = NULL;
1443         uint64_t                 zapid, parent = 0;
1444         int                      rc;
1445
1446         ENTRY;
1447
1448         /* concurrent create declarations should not see
1449          * the object inconsistent (db, attr, etc).
1450          * in regular cases acquisition should be cheap */
1451         down_write(&obj->oo_guard);
1452
1453         if (unlikely(dt_object_exists(dt)))
1454                 GOTO(out, rc = -EEXIST);
1455
1456         LASSERT(osd_invariant(obj));
1457         LASSERT(dof != NULL);
1458
1459         LASSERT(th != NULL);
1460         oh = container_of0(th, struct osd_thandle, ot_super);
1461
1462         LASSERT(obj->oo_dn == NULL);
1463
1464         /* to follow ZFS on-disk format we need
1465          * to initialize parent dnode properly */
1466         if (hint != NULL && hint->dah_parent != NULL &&
1467             !dt_object_remote(hint->dah_parent))
1468                 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1469
1470         /* we may fix some attributes, better do not change the source */
1471         obj->oo_attr = *attr;
1472         obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1473
1474         dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1475         if (IS_ERR(dn)) {
1476                 rc = PTR_ERR(dn);
1477                 dn = NULL;
1478                 GOTO(out, rc);
1479         }
1480
1481         zde->zde_pad = 0;
1482         zde->zde_dnode = dn->dn_object;
1483         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1484
1485         zapid = osd_get_name_n_idx(env, osd, fid, buf,
1486                                    sizeof(info->oti_str), &zdn);
1487         rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
1488         if (rc)
1489                 GOTO(out, rc);
1490         obj->oo_dn = dn;
1491         /* Now add in all of the "SA" attributes */
1492         rc = osd_sa_handle_get(obj);
1493         if (rc)
1494                 GOTO(out, rc);
1495
1496         /* configure new osd object */
1497         parent = parent != 0 ? parent : zapid;
1498         rc = __osd_attr_init(env, osd, obj->oo_sa_hdl, oh->ot_tx,
1499                              &obj->oo_attr, parent);
1500         if (rc)
1501                 GOTO(out, rc);
1502
1503         /* XXX: oo_lma_flags */
1504         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
1505         if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
1506                 /* no body operations for accounting objects */
1507                 obj->oo_dt.do_body_ops = &osd_body_ops;
1508
1509         rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
1510         if (rc)
1511                 GOTO(out, rc);
1512
1513         /* initialize LMA */
1514         lustre_lma_init(lma, lu_object_fid(&obj->oo_dt.do_lu), 0, 0);
1515         lustre_lma_swab(lma);
1516         rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
1517                                     (uchar_t *)lma, sizeof(*lma));
1518         if (rc)
1519                 GOTO(out, rc);
1520         rc = __osd_sa_xattr_update(env, obj, oh);
1521         if (rc)
1522                 GOTO(out, rc);
1523         osd_idc_find_and_init(env, osd, obj);
1524
1525 out:
1526         if (unlikely(rc && dn)) {
1527                 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
1528                 osd_dnode_rele(dn);
1529                 obj->oo_dn = NULL;
1530         } else if (!rc) {
1531                 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
1532         }
1533         up_write(&obj->oo_guard);
1534         RETURN(rc);
1535 }
1536
1537 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
1538                                struct thandle *th)
1539 {
1540         return osd_declare_attr_set(env, dt, NULL, th);
1541 }
1542
1543 /*
1544  * Concurrency: @dt is write locked.
1545  */
1546 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
1547                        struct thandle *handle)
1548 {
1549         struct osd_object       *obj = osd_dt_obj(dt);
1550         struct osd_thandle      *oh;
1551         struct osd_device       *osd = osd_obj2dev(obj);
1552         uint64_t                 nlink;
1553         int rc;
1554
1555         ENTRY;
1556
1557         down_read(&obj->oo_guard);
1558         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1559                 GOTO(out, rc = -ENOENT);
1560
1561         LASSERT(osd_invariant(obj));
1562         LASSERT(obj->oo_sa_hdl != NULL);
1563
1564         oh = container_of0(handle, struct osd_thandle, ot_super);
1565
1566         write_lock(&obj->oo_attr_lock);
1567         nlink = ++obj->oo_attr.la_nlink;
1568         write_unlock(&obj->oo_attr_lock);
1569
1570         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1571
1572 out:
1573         up_read(&obj->oo_guard);
1574         RETURN(rc);
1575 }
1576
1577 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
1578                                struct thandle *handle)
1579 {
1580         return osd_declare_attr_set(env, dt, NULL, handle);
1581 }
1582
1583 /*
1584  * Concurrency: @dt is write locked.
1585  */
1586 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
1587                        struct thandle *handle)
1588 {
1589         struct osd_object       *obj = osd_dt_obj(dt);
1590         struct osd_thandle      *oh;
1591         struct osd_device       *osd = osd_obj2dev(obj);
1592         uint64_t                 nlink;
1593         int                      rc;
1594
1595         ENTRY;
1596
1597         down_read(&obj->oo_guard);
1598
1599         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1600                 GOTO(out, rc = -ENOENT);
1601
1602         LASSERT(osd_invariant(obj));
1603         LASSERT(obj->oo_sa_hdl != NULL);
1604
1605         oh = container_of0(handle, struct osd_thandle, ot_super);
1606         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
1607
1608         write_lock(&obj->oo_attr_lock);
1609         nlink = --obj->oo_attr.la_nlink;
1610         write_unlock(&obj->oo_attr_lock);
1611
1612         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1613
1614 out:
1615         up_read(&obj->oo_guard);
1616         RETURN(rc);
1617 }
1618
1619 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
1620                            __u64 start, __u64 end)
1621 {
1622         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1623         ENTRY;
1624
1625         /* XXX: no other option than syncing the whole filesystem until we
1626          * support ZIL.  If the object tracked the txg that it was last
1627          * modified in, it could pass that txg here instead of "0".  Maybe
1628          * the changes are already committed, so no wait is needed at all? */
1629         if (!osd->od_dt_dev.dd_rdonly)
1630                 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
1631
1632         RETURN(0);
1633 }
1634
1635 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
1636 {
1637         return 0;
1638 }
1639
1640 static struct dt_object_operations osd_obj_ops = {
1641         .do_read_lock           = osd_read_lock,
1642         .do_write_lock          = osd_write_lock,
1643         .do_read_unlock         = osd_read_unlock,
1644         .do_write_unlock        = osd_write_unlock,
1645         .do_write_locked        = osd_write_locked,
1646         .do_attr_get            = osd_attr_get,
1647         .do_declare_attr_set    = osd_declare_attr_set,
1648         .do_attr_set            = osd_attr_set,
1649         .do_ah_init             = osd_ah_init,
1650         .do_declare_create      = osd_declare_create,
1651         .do_create              = osd_create,
1652         .do_declare_destroy     = osd_declare_destroy,
1653         .do_destroy             = osd_destroy,
1654         .do_index_try           = osd_index_try,
1655         .do_declare_ref_add     = osd_declare_ref_add,
1656         .do_ref_add             = osd_ref_add,
1657         .do_declare_ref_del     = osd_declare_ref_del,
1658         .do_ref_del             = osd_ref_del,
1659         .do_xattr_get           = osd_xattr_get,
1660         .do_declare_xattr_set   = osd_declare_xattr_set,
1661         .do_xattr_set           = osd_xattr_set,
1662         .do_declare_xattr_del   = osd_declare_xattr_del,
1663         .do_xattr_del           = osd_xattr_del,
1664         .do_xattr_list          = osd_xattr_list,
1665         .do_object_sync         = osd_object_sync,
1666         .do_invalidate          = osd_invalidate,
1667 };
1668
1669 static struct lu_object_operations osd_lu_obj_ops = {
1670         .loo_object_init        = osd_object_init,
1671         .loo_object_delete      = osd_object_delete,
1672         .loo_object_release     = osd_object_release,
1673         .loo_object_free        = osd_object_free,
1674         .loo_object_print       = osd_object_print,
1675         .loo_object_invariant   = osd_object_invariant,
1676 };
1677
1678 static int osd_otable_it_attr_get(const struct lu_env *env,
1679                                 struct dt_object *dt,
1680                                 struct lu_attr *attr)
1681 {
1682         attr->la_valid = 0;
1683         return 0;
1684 }
1685
1686 static struct dt_object_operations osd_obj_otable_it_ops = {
1687         .do_attr_get            = osd_otable_it_attr_get,
1688         .do_index_try           = osd_index_try,
1689 };