Whamcloud - gitweb
LU-7899 osd: batch EA updates
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osd-zfs/osd_object.c
37  *
38  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
39  * Author: Mike Pershin <tappro@whamcloud.com>
40  * Author: Johann Lombardi <johann@whamcloud.com>
41  */
42
43 #define DEBUG_SUBSYSTEM S_OSD
44
45 #include <lustre_ver.h>
46 #include <libcfs/libcfs.h>
47 #include <obd_support.h>
48 #include <lustre_net.h>
49 #include <obd.h>
50 #include <obd_class.h>
51 #include <lustre_disk.h>
52 #include <lustre_fid.h>
53
54 #include "osd_internal.h"
55
56 #include <sys/dnode.h>
57 #include <sys/dbuf.h>
58 #include <sys/spa.h>
59 #include <sys/stat.h>
60 #include <sys/zap.h>
61 #include <sys/spa_impl.h>
62 #include <sys/zfs_znode.h>
63 #include <sys/dmu_tx.h>
64 #include <sys/dmu_objset.h>
65 #include <sys/dsl_prop.h>
66 #include <sys/sa_impl.h>
67 #include <sys/txg.h>
68
69 char *osd_obj_tag = "osd_object";
70
71 static struct dt_object_operations osd_obj_ops;
72 static struct lu_object_operations osd_lu_obj_ops;
73 extern struct dt_body_operations osd_body_ops;
74 static struct dt_object_operations osd_obj_otable_it_ops;
75
76 extern struct kmem_cache *osd_object_kmem;
77
78 static void
79 osd_object_sa_fini(struct osd_object *obj)
80 {
81         if (obj->oo_sa_hdl) {
82                 sa_handle_destroy(obj->oo_sa_hdl);
83                 obj->oo_sa_hdl = NULL;
84         }
85 }
86
87 static int
88 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
89 {
90         int rc;
91
92         LASSERT(obj->oo_sa_hdl == NULL);
93         LASSERT(obj->oo_db != NULL);
94
95         rc = -sa_handle_get(o->od_os, obj->oo_db->db_object, obj,
96                             SA_HDL_PRIVATE, &obj->oo_sa_hdl);
97         if (rc)
98                 return rc;
99
100         /* Cache the xattr object id, valid for the life of the object */
101         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
102         if (rc == -ENOENT) {
103                 obj->oo_xattr = ZFS_NO_OBJECT;
104                 rc = 0;
105         } else if (rc) {
106                 osd_object_sa_fini(obj);
107         }
108
109         return rc;
110 }
111
112 /*
113  * Add object to list of dirty objects in tx handle.
114  */
115 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
116 {
117         if (!list_empty(&obj->oo_sa_linkage))
118                 return;
119
120         write_lock(&obj->oo_attr_lock);
121         if (likely(list_empty(&obj->oo_sa_linkage)))
122                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
123         write_unlock(&obj->oo_attr_lock);
124 }
125
126 /*
127  * Release spill block dbuf hold for all dirty SAs.
128  */
129 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
130 {
131         struct osd_object *obj;
132
133         while (!list_empty(&oh->ot_sa_list)) {
134                 obj = list_entry(oh->ot_sa_list.next,
135                                  struct osd_object, oo_sa_linkage);
136                 write_lock(&obj->oo_attr_lock);
137                 list_del_init(&obj->oo_sa_linkage);
138                 if (obj->oo_late_xattr) {
139                         LASSERT(oh->ot_assigned != 0);
140                         /* we need oo_guard to protect oo_sa_xattr
141                          * consistency, but if we just take it, then
142                          * we break lock ordering. in majority of
143                          * cases the callers don't try to set EAs
144                          * very often in concurrent manner, so hopefully
145                          * we don't need to retake locks too often */
146                         if (down_read_trylock(&obj->oo_guard)) {
147                                 __osd_sa_xattr_update(env, obj, oh);
148                                 up_read(&obj->oo_guard);
149                         } else {
150                                 down_read(&obj->oo_guard);
151                                 __osd_sa_xattr_update(env, obj, oh);
152                                 up_read(&obj->oo_guard);
153                         }
154                 }
155                 sa_spill_rele(obj->oo_sa_hdl);
156                 write_unlock(&obj->oo_attr_lock);
157         }
158 }
159
160 /*
161  * Update the SA and add the object to the dirty list.
162  */
163 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
164                          void *buf, uint32_t buflen, struct osd_thandle *oh)
165 {
166         int rc;
167
168         LASSERT(obj->oo_sa_hdl != NULL);
169         LASSERT(oh->ot_tx != NULL);
170
171         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
172         osd_object_sa_dirty_add(obj, oh);
173
174         return rc;
175 }
176
177 /*
178  * Bulk update the SA and add the object to the dirty list.
179  */
180 static int
181 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
182                           int count, struct osd_thandle *oh)
183 {
184         int rc;
185
186         LASSERT(obj->oo_sa_hdl != NULL);
187         LASSERT(oh->ot_tx != NULL);
188
189         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
190         osd_object_sa_dirty_add(obj, oh);
191
192         return rc;
193 }
194
195 /*
196  * Retrieve the attributes of a DMU object
197  */
198 int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
199                           struct osd_object *obj, struct lu_attr *la)
200 {
201         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
202         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
203         sa_handle_t     *sa_hdl;
204         int              cnt = 0;
205         int              rc;
206         ENTRY;
207
208         LASSERT(obj->oo_db != NULL);
209
210         rc = -sa_handle_get(o->od_os, obj->oo_db->db_object, NULL,
211                             SA_HDL_PRIVATE, &sa_hdl);
212         if (rc)
213                 RETURN(rc);
214
215         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
216                         LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
217
218         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
219         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
220         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
221         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
222         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
223         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
224         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
225         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
226         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
227         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
228
229         rc = -sa_bulk_lookup(sa_hdl, bulk, cnt);
230         if (rc)
231                 GOTO(out_sa, rc);
232
233         la->la_atime = osa->atime[0];
234         la->la_mtime = osa->mtime[0];
235         la->la_ctime = osa->ctime[0];
236         la->la_mode = osa->mode;
237         la->la_uid = osa->uid;
238         la->la_gid = osa->gid;
239         la->la_nlink = osa->nlink;
240         la->la_flags = attrs_zfs2fs(osa->flags);
241         la->la_size = osa->size;
242
243         /* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
244          * flags is stored in LMA, and it is only for orphan directory */
245         if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
246                 struct osd_thread_info *info = osd_oti_get(env);
247                 struct lustre_mdt_attrs *lma;
248                 struct lu_buf buf;
249
250                 lma = (struct lustre_mdt_attrs *)info->oti_buf;
251                 buf.lb_buf = lma;
252                 buf.lb_len = sizeof(info->oti_buf);
253                 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
254                 if (rc > 0) {
255                         rc = 0;
256                         lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
257                         obj->oo_lma_flags =
258                                 lma_to_lustre_flags(lma->lma_incompat);
259
260                 } else if (rc == -ENODATA) {
261                         rc = 0;
262                 }
263         }
264
265         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
266                 rc = -sa_lookup(sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
267                 if (rc)
268                         GOTO(out_sa, rc);
269                 la->la_rdev = osa->rdev;
270                 la->la_valid |= LA_RDEV;
271         }
272 out_sa:
273         sa_handle_destroy(sa_hdl);
274
275         RETURN(rc);
276 }
277
278 int __osd_obj2dbuf(const struct lu_env *env, objset_t *os,
279                    uint64_t oid, dmu_buf_t **dbp)
280 {
281         dmu_object_info_t *doi = &osd_oti_get(env)->oti_doi;
282         int rc;
283
284         rc = -sa_buf_hold(os, oid, osd_obj_tag, dbp);
285         if (rc)
286                 return rc;
287
288         dmu_object_info_from_db(*dbp, doi);
289         if (unlikely (oid != DMU_USERUSED_OBJECT &&
290             oid != DMU_GROUPUSED_OBJECT && doi->doi_bonus_type != DMU_OT_SA)) {
291                 sa_buf_rele(*dbp, osd_obj_tag);
292                 *dbp = NULL;
293                 return -EINVAL;
294         }
295
296         LASSERT(*dbp);
297         LASSERT((*dbp)->db_object == oid);
298         LASSERT((*dbp)->db_offset == -1);
299         LASSERT((*dbp)->db_data != NULL);
300
301         return 0;
302 }
303
304 /*
305  * Concurrency: no concurrent access is possible that early in object
306  * life-cycle.
307  */
308 struct lu_object *osd_object_alloc(const struct lu_env *env,
309                                    const struct lu_object_header *hdr,
310                                    struct lu_device *d)
311 {
312         struct osd_object *mo;
313
314         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
315         if (mo != NULL) {
316                 struct lu_object *l;
317
318                 l = &mo->oo_dt.do_lu;
319                 dt_object_init(&mo->oo_dt, NULL, d);
320                 mo->oo_dt.do_ops = &osd_obj_ops;
321                 l->lo_ops = &osd_lu_obj_ops;
322                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
323                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
324                 init_rwsem(&mo->oo_sem);
325                 init_rwsem(&mo->oo_guard);
326                 rwlock_init(&mo->oo_attr_lock);
327                 mo->oo_destroy = OSD_DESTROY_NONE;
328                 return l;
329         } else {
330                 return NULL;
331         }
332 }
333
334 /*
335  * Concurrency: shouldn't matter.
336  */
337 int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
338 {
339         struct osd_device       *osd = osd_obj2dev(obj);
340         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
341         int                      rc = 0;
342         ENTRY;
343
344         if (obj->oo_db == NULL)
345                 RETURN(0);
346
347         /* object exist */
348
349         rc = osd_object_sa_init(obj, osd);
350         if (rc)
351                 RETURN(rc);
352
353         /* cache attrs in object */
354         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
355         if (rc)
356                 RETURN(rc);
357
358         if (likely(!fid_is_acct(fid)))
359                 /* no body operations for accounting objects */
360                 obj->oo_dt.do_body_ops = &osd_body_ops;
361
362         /*
363          * initialize object before marking it existing
364          */
365         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
366
367         smp_mb();
368         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
369
370         RETURN(0);
371 }
372
373 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
374 {
375         struct osd_thread_info  *info = osd_oti_get(env);
376         struct lu_buf           buf;
377         int                     rc;
378         struct lustre_mdt_attrs *lma;
379         ENTRY;
380
381         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
382         lma = (struct lustre_mdt_attrs *)info->oti_buf;
383         buf.lb_buf = lma;
384         buf.lb_len = sizeof(info->oti_buf);
385
386         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
387         if (rc > 0) {
388                 rc = 0;
389                 lustre_lma_swab(lma);
390                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
391                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
392                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
393                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
394                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
395                               PFID(lu_object_fid(&obj->oo_dt.do_lu)));
396                         rc = -EOPNOTSUPP;
397                 }
398         } else if (rc == -ENODATA) {
399                 /* haven't initialize LMA xattr */
400                 rc = 0;
401         }
402
403         RETURN(rc);
404 }
405
406 /*
407  * Concurrency: no concurrent access is possible that early in object
408  * life-cycle.
409  */
410 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
411                            const struct lu_object_conf *conf)
412 {
413         struct osd_object       *obj = osd_obj(l);
414         struct osd_device       *osd = osd_obj2dev(obj);
415         uint64_t                 oid;
416         int                      rc;
417         ENTRY;
418
419         LASSERT(osd_invariant(obj));
420
421         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
422                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
423                 l->lo_header->loh_attr |= LOHA_EXISTS;
424                 RETURN(0);
425         }
426
427         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
428                 GOTO(out, rc = 0);
429
430         rc = osd_fid_lookup(env, osd, lu_object_fid(l), &oid);
431         if (rc == 0) {
432                 LASSERT(obj->oo_db == NULL);
433                 rc = __osd_obj2dbuf(env, osd->od_os, oid, &obj->oo_db);
434                 /* EEXIST will be returned if object is being deleted in ZFS */
435                 if (rc == -EEXIST) {
436                         rc = 0;
437                         GOTO(out, rc);
438                 }
439                 if (rc != 0) {
440                         CERROR("%s: lookup "DFID"/"LPX64" failed: rc = %d\n",
441                                osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
442                         GOTO(out, rc);
443                 }
444                 LASSERT(obj->oo_db);
445                 rc = osd_object_init0(env, obj);
446                 if (rc != 0)
447                         GOTO(out, rc);
448
449                 rc = osd_check_lma(env, obj);
450                 if (rc != 0)
451                         GOTO(out, rc);
452         } else if (rc == -ENOENT) {
453                 rc = 0;
454         }
455         LASSERT(osd_invariant(obj));
456 out:
457         RETURN(rc);
458 }
459
460 /*
461  * Concurrency: no concurrent access is possible that late in object
462  * life-cycle.
463  */
464 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
465 {
466         struct osd_object *obj = osd_obj(l);
467
468         LASSERT(osd_invariant(obj));
469
470         dt_object_fini(&obj->oo_dt);
471         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
472 }
473
474 static int
475 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
476 {
477         int rc = -EBUSY;
478
479         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
480
481         /* the object is supposed to be exclusively locked by
482          * the caller (osd_object_destroy()), while the transaction
483          * (oh) is per-thread and not shared */
484         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
485                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
486                 rc = 0;
487         }
488
489         return rc;
490 }
491
492 /* Default to max data size covered by a level-1 indirect block */
493 static unsigned long osd_sync_destroy_max_size =
494         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
495 module_param(osd_sync_destroy_max_size, ulong, 0444);
496 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
497
498 static inline void
499 osd_object_set_destroy_type(struct osd_object *obj)
500 {
501         /*
502          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
503          * only once and use it consistently thereafter.
504          */
505         down_write(&obj->oo_guard);
506         if (obj->oo_destroy == OSD_DESTROY_NONE) {
507                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
508                         obj->oo_destroy = OSD_DESTROY_SYNC;
509                 else /* Larger objects are destroyed asynchronously */
510                         obj->oo_destroy = OSD_DESTROY_ASYNC;
511         }
512         up_write(&obj->oo_guard);
513 }
514
515 static int osd_declare_object_destroy(const struct lu_env *env,
516                                       struct dt_object *dt,
517                                       struct thandle *th)
518 {
519         char                    *buf = osd_oti_get(env)->oti_str;
520         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
521         struct osd_object       *obj = osd_dt_obj(dt);
522         struct osd_device       *osd = osd_obj2dev(obj);
523         struct osd_thandle      *oh;
524         int                      rc;
525         uint64_t                 zapid;
526         ENTRY;
527
528         LASSERT(th != NULL);
529         LASSERT(dt_object_exists(dt));
530
531         oh = container_of0(th, struct osd_thandle, ot_super);
532         LASSERT(oh->ot_tx != NULL);
533
534         /* declare that we'll remove object from fid-dnode mapping */
535         zapid = osd_get_name_n_idx(env, osd, fid, buf);
536         dmu_tx_hold_bonus(oh->ot_tx, zapid);
537         dmu_tx_hold_zap(oh->ot_tx, zapid, FALSE, buf);
538
539         osd_declare_xattrs_destroy(env, obj, oh);
540
541         /* declare that we'll remove object from inode accounting ZAPs */
542         dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
543         dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, FALSE, buf);
544         dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
545         dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, FALSE, buf);
546
547         /* one less inode */
548         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
549                                obj->oo_attr.la_gid, -1, oh, false, NULL, false);
550         if (rc)
551                 RETURN(rc);
552
553         /* data to be truncated */
554         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
555                                obj->oo_attr.la_gid, 0, oh, true, NULL, false);
556         if (rc)
557                 RETURN(rc);
558
559         osd_object_set_destroy_type(obj);
560         if (obj->oo_destroy == OSD_DESTROY_SYNC)
561                 dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object,
562                                  0, DMU_OBJECT_END);
563         else
564                 dmu_tx_hold_zap(oh->ot_tx, osd->od_unlinkedid, TRUE, NULL);
565
566         RETURN(0);
567 }
568
569 static int osd_object_destroy(const struct lu_env *env,
570                               struct dt_object *dt, struct thandle *th)
571 {
572         char                    *buf = osd_oti_get(env)->oti_str;
573         struct osd_object       *obj = osd_dt_obj(dt);
574         struct osd_device       *osd = osd_obj2dev(obj);
575         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
576         struct osd_thandle      *oh;
577         int                      rc;
578         uint64_t                 oid, zapid;
579         ENTRY;
580
581         down_write(&obj->oo_guard);
582
583         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
584                 GOTO(out, rc = -ENOENT);
585
586         LASSERT(obj->oo_db != NULL);
587
588         oh = container_of0(th, struct osd_thandle, ot_super);
589         LASSERT(oh != NULL);
590         LASSERT(oh->ot_tx != NULL);
591
592         /* remove obj ref from index dir (it depends) */
593         zapid = osd_get_name_n_idx(env, osd, fid, buf);
594         rc = -zap_remove(osd->od_os, zapid, buf, oh->ot_tx);
595         if (rc) {
596                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
597                        osd->od_svname, buf, rc);
598                 GOTO(out, rc);
599         }
600
601         rc = osd_xattrs_destroy(env, obj, oh);
602         if (rc) {
603                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
604                        osd->od_svname, buf, rc);
605                 GOTO(out, rc);
606         }
607
608         /* Remove object from inode accounting. It is not fatal for the destroy
609          * operation if something goes wrong while updating accounting, but we
610          * still log an error message to notify the administrator */
611         rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
612                                 obj->oo_attr.la_uid, -1, oh->ot_tx);
613         if (rc)
614                 CERROR("%s: failed to remove "DFID" from accounting ZAP for usr"
615                        " %d: rc = %d\n", osd->od_svname, PFID(fid),
616                        obj->oo_attr.la_uid, rc);
617         rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
618                                 obj->oo_attr.la_gid, -1, oh->ot_tx);
619         if (rc)
620                 CERROR("%s: failed to remove "DFID" from accounting ZAP for grp"
621                        " %d: rc = %d\n", osd->od_svname, PFID(fid),
622                        obj->oo_attr.la_gid, rc);
623
624         oid = obj->oo_db->db_object;
625         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
626                 /* this may happen if the destroy wasn't declared
627                  * e.g. when the object is created and then destroyed
628                  * in the same transaction - we don't need additional
629                  * space for destroy specifically */
630                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
631                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
632                 if (rc)
633                         CERROR("%s: failed to free %s "LPU64": rc = %d\n",
634                                osd->od_svname, buf, oid, rc);
635         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
636                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
637                 if (rc)
638                         CERROR("%s: failed to free %s "LPU64": rc = %d\n",
639                                osd->od_svname, buf, oid, rc);
640         } else { /* asynchronous destroy */
641                 rc = osd_object_unlinked_add(obj, oh);
642                 if (rc)
643                         GOTO(out, rc);
644
645                 rc = -zap_add_int(osd->od_os, osd->od_unlinkedid,
646                                   oid, oh->ot_tx);
647                 if (rc)
648                         CERROR("%s: zap_add_int() failed %s "LPU64": rc = %d\n",
649                                osd->od_svname, buf, oid, rc);
650         }
651
652 out:
653         /* not needed in the cache anymore */
654         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
655         if (rc == 0)
656                 obj->oo_destroyed = 1;
657         up_write(&obj->oo_guard);
658         RETURN (0);
659 }
660
661 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
662 {
663         struct osd_object *obj = osd_obj(l);
664
665         if (obj->oo_db != NULL) {
666                 osd_object_sa_fini(obj);
667                 if (obj->oo_sa_xattr) {
668                         nvlist_free(obj->oo_sa_xattr);
669                         obj->oo_sa_xattr = NULL;
670                 }
671                 sa_buf_rele(obj->oo_db, osd_obj_tag);
672                 list_del(&obj->oo_sa_linkage);
673                 obj->oo_db = NULL;
674         }
675 }
676
677 /*
678  * Concurrency: ->loo_object_release() is called under site spin-lock.
679  */
680 static void osd_object_release(const struct lu_env *env,
681                                struct lu_object *l)
682 {
683 }
684
685 /*
686  * Concurrency: shouldn't matter.
687  */
688 static int osd_object_print(const struct lu_env *env, void *cookie,
689                             lu_printer_t p, const struct lu_object *l)
690 {
691         struct osd_object *o = osd_obj(l);
692
693         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
694 }
695
696 static void osd_object_read_lock(const struct lu_env *env,
697                                  struct dt_object *dt, unsigned role)
698 {
699         struct osd_object *obj = osd_dt_obj(dt);
700
701         LASSERT(osd_invariant(obj));
702
703         down_read_nested(&obj->oo_sem, role);
704 }
705
706 static void osd_object_write_lock(const struct lu_env *env,
707                                   struct dt_object *dt, unsigned role)
708 {
709         struct osd_object *obj = osd_dt_obj(dt);
710
711         LASSERT(osd_invariant(obj));
712
713         down_write_nested(&obj->oo_sem, role);
714 }
715
716 static void osd_object_read_unlock(const struct lu_env *env,
717                                    struct dt_object *dt)
718 {
719         struct osd_object *obj = osd_dt_obj(dt);
720
721         LASSERT(osd_invariant(obj));
722         up_read(&obj->oo_sem);
723 }
724
725 static void osd_object_write_unlock(const struct lu_env *env,
726                                     struct dt_object *dt)
727 {
728         struct osd_object *obj = osd_dt_obj(dt);
729
730         LASSERT(osd_invariant(obj));
731         up_write(&obj->oo_sem);
732 }
733
734 static int osd_object_write_locked(const struct lu_env *env,
735                                    struct dt_object *dt)
736 {
737         struct osd_object *obj = osd_dt_obj(dt);
738         int rc = 1;
739
740         LASSERT(osd_invariant(obj));
741
742         if (down_write_trylock(&obj->oo_sem)) {
743                 rc = 0;
744                 up_write(&obj->oo_sem);
745         }
746         return rc;
747 }
748
749 static int osd_attr_get(const struct lu_env *env,
750                         struct dt_object *dt,
751                         struct lu_attr *attr)
752 {
753         struct osd_object       *obj = osd_dt_obj(dt);
754         uint64_t                 blocks;
755         uint32_t                 blksize;
756         int                      rc = 0;
757
758         down_read(&obj->oo_guard);
759
760         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
761                 GOTO(out, rc = -ENOENT);
762
763         LASSERT(osd_invariant(obj));
764         LASSERT(obj->oo_db);
765
766         read_lock(&obj->oo_attr_lock);
767         *attr = obj->oo_attr;
768         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
769                 attr->la_flags |= LUSTRE_ORPHAN_FL;
770         read_unlock(&obj->oo_attr_lock);
771
772         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
773          * from within sa_object_size() can block on a mutex, so
774          * we can't call sa_object_size() holding rwlock */
775         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
776         /* we do not control size of indices, so always calculate
777          * it from number of blocks reported by DMU */
778         if (S_ISDIR(attr->la_mode))
779                 attr->la_size = 512 * blocks;
780         /* Block size may be not set; suggest maximal I/O transfers. */
781         if (blksize == 0)
782                 blksize = osd_spa_maxblocksize(
783                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
784
785         attr->la_blksize = blksize;
786         attr->la_blocks = blocks;
787         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
788
789 out:
790         up_read(&obj->oo_guard);
791         return rc;
792 }
793
794 /* Simple wrapper on top of qsd API which implement quota transfer for osd
795  * setattr needs. As a reminder, only the root user can change ownership of
796  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
797 static inline int qsd_transfer(const struct lu_env *env,
798                                struct qsd_instance *qsd,
799                                struct lquota_trans *trans, int qtype,
800                                __u64 orig_id, __u64 new_id, __u64 bspace,
801                                struct lquota_id_info *qi)
802 {
803         int     rc;
804
805         if (unlikely(qsd == NULL))
806                 return 0;
807
808         LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
809         qi->lqi_type = qtype;
810
811         /* inode accounting */
812         qi->lqi_is_blk = false;
813
814         /* one more inode for the new owner ... */
815         qi->lqi_id.qid_uid = new_id;
816         qi->lqi_space      = 1;
817         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
818         if (rc == -EDQUOT || rc == -EINPROGRESS)
819                 rc = 0;
820         if (rc)
821                 return rc;
822
823         /* and one less inode for the current id */
824         qi->lqi_id.qid_uid = orig_id;;
825         qi->lqi_space      = -1;
826         /* can't get EDQUOT when reducing usage */
827         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
828         if (rc == -EINPROGRESS)
829                 rc = 0;
830         if (rc)
831                 return rc;
832
833         /* block accounting */
834         qi->lqi_is_blk = true;
835
836         /* more blocks for the new owner ... */
837         qi->lqi_id.qid_uid = new_id;
838         qi->lqi_space      = bspace;
839         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
840         if (rc == -EDQUOT || rc == -EINPROGRESS)
841                 rc = 0;
842         if (rc)
843                 return rc;
844
845         /* and finally less blocks for the current owner */
846         qi->lqi_id.qid_uid = orig_id;
847         qi->lqi_space      = -bspace;
848         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
849         /* can't get EDQUOT when reducing usage */
850         if (rc == -EINPROGRESS)
851                 rc = 0;
852         return rc;
853 }
854
855 static int osd_declare_attr_set(const struct lu_env *env,
856                                 struct dt_object *dt,
857                                 const struct lu_attr *attr,
858                                 struct thandle *handle)
859 {
860         struct osd_thread_info  *info = osd_oti_get(env);
861         char                    *buf = osd_oti_get(env)->oti_str;
862         struct osd_object       *obj = osd_dt_obj(dt);
863         struct osd_device       *osd = osd_obj2dev(obj);
864         struct osd_thandle      *oh;
865         uint64_t                 bspace;
866         uint32_t                 blksize;
867         int                      rc = 0;
868         ENTRY;
869
870
871         LASSERT(handle != NULL);
872         LASSERT(osd_invariant(obj));
873
874         oh = container_of0(handle, struct osd_thandle, ot_super);
875
876         down_read(&obj->oo_guard);
877         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
878                 GOTO(out, rc = 0);
879
880         LASSERT(obj->oo_sa_hdl != NULL);
881         LASSERT(oh->ot_tx != NULL);
882         dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
883         if (oh->ot_tx->tx_err != 0)
884                 GOTO(out, rc = -oh->ot_tx->tx_err);
885
886         sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
887         bspace = toqb(bspace * blksize);
888
889         __osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs),
890                                 XATTR_NAME_LMA, oh);
891
892         if (attr && attr->la_valid & LA_UID) {
893                 /* account for user inode tracking ZAP update */
894                 dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
895                 dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);
896
897                 /* quota enforcement for user */
898                 if (attr->la_uid != obj->oo_attr.la_uid) {
899                         rc = qsd_transfer(env, osd->od_quota_slave,
900                                           &oh->ot_quota_trans, USRQUOTA,
901                                           obj->oo_attr.la_uid, attr->la_uid,
902                                           bspace, &info->oti_qi);
903                         if (rc)
904                                 GOTO(out, rc);
905                 }
906         }
907         if (attr && attr->la_valid & LA_GID) {
908                 /* account for user inode tracking ZAP update */
909                 dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
910                 dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);
911
912                 /* quota enforcement for group */
913                 if (attr->la_gid != obj->oo_attr.la_gid) {
914                         rc = qsd_transfer(env, osd->od_quota_slave,
915                                           &oh->ot_quota_trans, GRPQUOTA,
916                                           obj->oo_attr.la_gid, attr->la_gid,
917                                           bspace, &info->oti_qi);
918                         if (rc)
919                                 GOTO(out, rc);
920                 }
921         }
922
923 out:
924         up_read(&obj->oo_guard);
925         RETURN(rc);
926 }
927
928 /*
929  * Set the attributes of an object
930  *
931  * The transaction passed to this routine must have
932  * dmu_tx_hold_bonus(tx, oid) called and then assigned
933  * to a transaction group.
934  */
935 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
936                         const struct lu_attr *la, struct thandle *handle)
937 {
938         struct osd_thread_info  *info = osd_oti_get(env);
939         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
940         struct osd_object       *obj = osd_dt_obj(dt);
941         struct osd_device       *osd = osd_obj2dev(obj);
942         struct osd_thandle      *oh;
943         struct osa_attr         *osa = &info->oti_osa;
944         __u64                    valid = la->la_valid;
945         int                      cnt;
946         int                      rc = 0;
947
948         ENTRY;
949
950         down_read(&obj->oo_guard);
951         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
952                 GOTO(out, rc = -ENOENT);
953
954         LASSERT(handle != NULL);
955         LASSERT(osd_invariant(obj));
956         LASSERT(obj->oo_sa_hdl);
957
958         oh = container_of0(handle, struct osd_thandle, ot_super);
959         /* Assert that the transaction has been assigned to a
960            transaction group. */
961         LASSERT(oh->ot_tx->tx_txg != 0);
962
963         /* Only allow set size for regular file */
964         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
965                 valid &= ~(LA_SIZE | LA_BLOCKS);
966
967         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
968                 valid &= ~LA_CTIME;
969
970         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
971                 valid &= ~LA_MTIME;
972
973         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
974                 valid &= ~LA_ATIME;
975
976         if (valid == 0)
977                 GOTO(out, rc = 0);
978
979         if (valid & LA_FLAGS) {
980                 struct lustre_mdt_attrs *lma;
981                 struct lu_buf buf;
982
983                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
984                         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
985                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
986                         buf.lb_buf = lma;
987                         buf.lb_len = sizeof(info->oti_buf);
988                         rc = osd_xattr_get(env, &obj->oo_dt, &buf,
989                                            XATTR_NAME_LMA);
990                         if (rc > 0) {
991                                 lma->lma_incompat =
992                                         le32_to_cpu(lma->lma_incompat);
993                                 lma->lma_incompat |=
994                                         lustre_to_lma_flags(la->la_flags);
995                                 lma->lma_incompat =
996                                         cpu_to_le32(lma->lma_incompat);
997                                 buf.lb_buf = lma;
998                                 buf.lb_len = sizeof(*lma);
999                                 rc = osd_xattr_set_internal(env, obj, &buf,
1000                                                             XATTR_NAME_LMA,
1001                                                             LU_XATTR_REPLACE,
1002                                                             oh);
1003                         }
1004                         if (rc < 0) {
1005                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
1006                                        osd->od_svname, rc);
1007                                 RETURN(rc);
1008                         }
1009                 }
1010         }
1011
1012         /* do both accounting updates outside oo_attr_lock below */
1013         if ((valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) {
1014                 /* Update user accounting. Failure isn't fatal, but we still
1015                  * log an error message */
1016                 rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
1017                                         la->la_uid, 1, oh->ot_tx);
1018                 if (rc)
1019                         CERROR("%s: failed to update accounting ZAP for user "
1020                                 "%d (%d)\n", osd->od_svname, la->la_uid, rc);
1021                 rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
1022                                         obj->oo_attr.la_uid, -1, oh->ot_tx);
1023                 if (rc)
1024                         CERROR("%s: failed to update accounting ZAP for user "
1025                                 "%d (%d)\n", osd->od_svname,
1026                                 obj->oo_attr.la_uid, rc);
1027         }
1028         if ((valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) {
1029                 /* Update group accounting. Failure isn't fatal, but we still
1030                  * log an error message */
1031                 rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
1032                                         la->la_gid, 1, oh->ot_tx);
1033                 if (rc)
1034                         CERROR("%s: failed to update accounting ZAP for user "
1035                                 "%d (%d)\n", osd->od_svname, la->la_gid, rc);
1036                 rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
1037                                         obj->oo_attr.la_gid, -1, oh->ot_tx);
1038                 if (rc)
1039                         CERROR("%s: failed to update accounting ZAP for user "
1040                                 "%d (%d)\n", osd->od_svname,
1041                                 obj->oo_attr.la_gid, rc);
1042         }
1043
1044         write_lock(&obj->oo_attr_lock);
1045         cnt = 0;
1046         if (valid & LA_ATIME) {
1047                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1048                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1049                                  osa->atime, 16);
1050         }
1051         if (valid & LA_MTIME) {
1052                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1053                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1054                                  osa->mtime, 16);
1055         }
1056         if (valid & LA_CTIME) {
1057                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1058                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1059                                  osa->ctime, 16);
1060         }
1061         if (valid & LA_MODE) {
1062                 /* mode is stored along with type, so read it first */
1063                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1064                         (la->la_mode & ~S_IFMT);
1065                 osa->mode = obj->oo_attr.la_mode;
1066                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1067                                  &osa->mode, 8);
1068         }
1069         if (valid & LA_SIZE) {
1070                 osa->size = obj->oo_attr.la_size = la->la_size;
1071                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1072                                  &osa->size, 8);
1073         }
1074         if (valid & LA_NLINK) {
1075                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1076                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1077                                  &osa->nlink, 8);
1078         }
1079         if (valid & LA_RDEV) {
1080                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1081                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1082                                  &osa->rdev, 8);
1083         }
1084         if (valid & LA_FLAGS) {
1085                 osa->flags = attrs_fs2zfs(la->la_flags);
1086                 /* many flags are not supported by zfs, so ensure a good cached
1087                  * copy */
1088                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1089                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1090                                  &osa->flags, 8);
1091         }
1092         if (valid & LA_UID) {
1093                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1094                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1095                                  &osa->uid, 8);
1096         }
1097         if (valid & LA_GID) {
1098                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1099                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1100                                  &osa->gid, 8);
1101         }
1102         obj->oo_attr.la_valid |= valid;
1103         write_unlock(&obj->oo_attr_lock);
1104
1105         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1106         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1107
1108 out:
1109         up_read(&obj->oo_guard);
1110         RETURN(rc);
1111 }
1112
1113 /*
1114  * Object creation.
1115  *
1116  * XXX temporary solution.
1117  */
1118
1119 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1120                         struct dt_object *parent, struct dt_object *child,
1121                         umode_t child_mode)
1122 {
1123         LASSERT(ah);
1124
1125         ah->dah_parent = parent;
1126         ah->dah_mode = child_mode;
1127 }
1128
1129 static int osd_declare_object_create(const struct lu_env *env,
1130                                      struct dt_object *dt,
1131                                      struct lu_attr *attr,
1132                                      struct dt_allocation_hint *hint,
1133                                      struct dt_object_format *dof,
1134                                      struct thandle *handle)
1135 {
1136         char                    *buf = osd_oti_get(env)->oti_str;
1137         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1138         struct osd_object       *obj = osd_dt_obj(dt);
1139         struct osd_device       *osd = osd_obj2dev(obj);
1140         struct osd_thandle      *oh;
1141         uint64_t                 zapid;
1142         int                      rc;
1143         ENTRY;
1144
1145         LASSERT(dof);
1146
1147         switch (dof->dof_type) {
1148                 case DFT_REGULAR:
1149                 case DFT_SYM:
1150                 case DFT_NODE:
1151                         if (obj->oo_dt.do_body_ops == NULL)
1152                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1153                         break;
1154                 default:
1155                         break;
1156         }
1157
1158         LASSERT(handle != NULL);
1159         oh = container_of0(handle, struct osd_thandle, ot_super);
1160         LASSERT(oh->ot_tx != NULL);
1161
1162         switch (dof->dof_type) {
1163                 case DFT_DIR:
1164                         dt->do_index_ops = &osd_dir_ops;
1165                 case DFT_INDEX:
1166                         /* for zap create */
1167                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, 1, NULL);
1168                         break;
1169                 case DFT_REGULAR:
1170                 case DFT_SYM:
1171                 case DFT_NODE:
1172                         /* first, we'll create new object */
1173                         dmu_tx_hold_bonus(oh->ot_tx, DMU_NEW_OBJECT);
1174                         break;
1175
1176                 default:
1177                         LBUG();
1178                         break;
1179         }
1180
1181         /* and we'll add it to some mapping */
1182         zapid = osd_get_name_n_idx(env, osd, fid, buf);
1183         dmu_tx_hold_bonus(oh->ot_tx, zapid);
1184         dmu_tx_hold_zap(oh->ot_tx, zapid, TRUE, buf);
1185
1186         /* we will also update inode accounting ZAPs */
1187         dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
1188         dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);
1189         dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
1190         dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);
1191
1192         dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE);
1193
1194         __osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs),
1195                                 XATTR_NAME_LMA, oh);
1196
1197         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
1198                                false, NULL, false);
1199         RETURN(rc);
1200 }
1201
1202 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1203                     uint64_t oid, dmu_tx_t *tx, struct lu_attr *la,
1204                     uint64_t parent)
1205 {
1206         sa_handle_t     *sa_hdl;
1207         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
1208         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1209         uint64_t         gen;
1210         uint64_t         crtime[2];
1211         timestruc_t      now;
1212         int              cnt;
1213         int              rc;
1214
1215         gethrestime(&now);
1216         gen = dmu_tx_get_txg(tx);
1217
1218         ZFS_TIME_ENCODE(&now, crtime);
1219
1220         osa->atime[0] = la->la_atime;
1221         osa->ctime[0] = la->la_ctime;
1222         osa->mtime[0] = la->la_mtime;
1223         osa->mode = la->la_mode;
1224         osa->uid = la->la_uid;
1225         osa->gid = la->la_gid;
1226         osa->rdev = la->la_rdev;
1227         osa->nlink = la->la_nlink;
1228         osa->flags = attrs_fs2zfs(la->la_flags);
1229         osa->size  = la->la_size;
1230
1231         /* Now add in all of the "SA" attributes */
1232         rc = -sa_handle_get(osd->od_os, oid, NULL, SA_HDL_PRIVATE, &sa_hdl);
1233         if (rc)
1234                 return rc;
1235
1236         /*
1237          * we need to create all SA below upon object create.
1238          *
1239          * XXX The attribute order matters since the accounting callback relies
1240          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1241          * look up the UID/GID attributes. Moreover, the callback does not seem
1242          * to support the spill block.
1243          * We define attributes in the same order as SA_*_OFFSET in order to
1244          * work around the problem. See ORI-610.
1245          */
1246         cnt = 0;
1247         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1248         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1249         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1250         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1251         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1252         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1253         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1254         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1255         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1256         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1257         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
1258         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1259         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1260         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1261
1262         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1263
1264         sa_handle_destroy(sa_hdl);
1265         return rc;
1266 }
1267
1268 /*
1269  * The transaction passed to this routine must have
1270  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1271  * to a transaction group.
1272  */
1273 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
1274                         dmu_buf_t **dbp, dmu_tx_t *tx, struct lu_attr *la,
1275                         uint64_t parent)
1276 {
1277         uint64_t             oid;
1278         int                  rc;
1279         struct osd_device   *osd = osd_obj2dev(obj);
1280         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1281         dmu_object_type_t    type = DMU_OT_PLAIN_FILE_CONTENTS;
1282
1283         /* Assert that the transaction has been assigned to a
1284            transaction group. */
1285         LASSERT(tx->tx_txg != 0);
1286
1287         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1288          * would get an additional ditto copy */
1289         if (unlikely(S_ISREG(la->la_mode) &&
1290                      fid_seq_is_local_file(fid_seq(fid))))
1291                 type = DMU_OTN_UINT8_METADATA;
1292
1293         /* Create a new DMU object using the default dnode size. */
1294         oid = osd_dmu_object_alloc(osd->od_os, type, 0, 0, tx);
1295         rc = -sa_buf_hold(osd->od_os, oid, osd_obj_tag, dbp);
1296         LASSERTF(rc == 0, "sa_buf_hold "LPU64" failed: %d\n", oid, rc);
1297
1298         LASSERT(la->la_valid & LA_MODE);
1299         la->la_size = 0;
1300         la->la_nlink = 1;
1301
1302         rc = __osd_attr_init(env, osd, oid, tx, la, parent);
1303         if (rc != 0) {
1304                 sa_buf_rele(*dbp, osd_obj_tag);
1305                 *dbp = NULL;
1306                 dmu_object_free(osd->od_os, oid, tx);
1307                 return rc;
1308         }
1309
1310         return 0;
1311 }
1312
1313 /*
1314  * The transaction passed to this routine must have
1315  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1316  * to a transaction group.
1317  *
1318  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1319  * This is fine for directories today, because storing the FID in the dirent
1320  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1321  * then we might need to re-evaluate the use of this flag and instead do
1322  * a conversion from the different internal ZAP hash formats being used. */
1323 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1324                      dmu_buf_t **zap_dbp, dmu_tx_t *tx,
1325                      struct lu_attr *la, uint64_t parent, zap_flags_t flags)
1326 {
1327         uint64_t oid;
1328         int      rc;
1329
1330         /* Assert that the transaction has been assigned to a
1331            transaction group. */
1332         LASSERT(tx->tx_txg != 0);
1333
1334         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1335                                    DMU_OT_DIRECTORY_CONTENTS,
1336                                    14, /* == ZFS fzap_default_blockshift */
1337                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1338                                    0, tx);
1339
1340         rc = -sa_buf_hold(osd->od_os, oid, osd_obj_tag, zap_dbp);
1341         if (rc)
1342                 return rc;
1343
1344         LASSERT(la->la_valid & LA_MODE);
1345         la->la_size = 2;
1346         la->la_nlink = 1;
1347
1348         return __osd_attr_init(env, osd, oid, tx, la, parent);
1349 }
1350
1351 static dmu_buf_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1352                             struct lu_attr *la, uint64_t parent,
1353                             struct osd_thandle *oh)
1354 {
1355         dmu_buf_t *db;
1356         int        rc;
1357
1358         /* Index file should be created as regular file in order not to confuse
1359          * ZPL which could interpret them as directory.
1360          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1361          * binary keys */
1362         LASSERT(S_ISREG(la->la_mode));
1363         rc = __osd_zap_create(env, osd_obj2dev(obj), &db, oh->ot_tx, la, parent,
1364                               ZAP_FLAG_UINT64_KEY);
1365         if (rc)
1366                 return ERR_PTR(rc);
1367         return db;
1368 }
1369
1370 static dmu_buf_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1371                             struct lu_attr *la, uint64_t parent,
1372                             struct osd_thandle *oh)
1373 {
1374         dmu_buf_t *db;
1375         int        rc;
1376
1377         LASSERT(S_ISDIR(la->la_mode));
1378         rc = __osd_zap_create(env, osd_obj2dev(obj), &db,
1379                               oh->ot_tx, la, parent, 0);
1380         if (rc)
1381                 return ERR_PTR(rc);
1382         return db;
1383 }
1384
1385 static dmu_buf_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1386                             struct lu_attr *la, uint64_t parent,
1387                             struct osd_thandle *oh)
1388 {
1389         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1390         dmu_buf_t           *db;
1391         int                  rc;
1392         struct osd_device *osd = osd_obj2dev(obj);
1393
1394         LASSERT(S_ISREG(la->la_mode));
1395         rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
1396         if (rc)
1397                 return ERR_PTR(rc);
1398
1399         /*
1400          * XXX: This heuristic is non-optimal.  It would be better to
1401          * increase the blocksize up to osd->od_max_blksz during the write.
1402          * This is exactly how the ZPL behaves and it ensures that the right
1403          * blocksize is selected based on the file size rather than the
1404          * making broad assumptions based on the osd type.
1405          */
1406         if ((fid_is_idif(fid) || fid_is_norm(fid)) && osd->od_is_ost) {
1407                 rc = -dmu_object_set_blocksize(osd->od_os, db->db_object,
1408                                                osd->od_max_blksz, 0, oh->ot_tx);
1409                 if (unlikely(rc)) {
1410                         CERROR("%s: can't change blocksize: %d\n",
1411                                osd->od_svname, rc);
1412                         return ERR_PTR(rc);
1413                 }
1414         }
1415
1416         return db;
1417 }
1418
1419 static dmu_buf_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1420                             struct lu_attr *la, uint64_t parent,
1421                             struct osd_thandle *oh)
1422 {
1423         dmu_buf_t *db;
1424         int        rc;
1425
1426         LASSERT(S_ISLNK(la->la_mode));
1427         rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
1428         if (rc)
1429                 return ERR_PTR(rc);
1430         return db;
1431 }
1432
1433 static dmu_buf_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1434                             struct lu_attr *la, uint64_t parent,
1435                             struct osd_thandle *oh)
1436 {
1437         dmu_buf_t *db;
1438         int        rc;
1439
1440         la->la_valid = LA_MODE;
1441         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1442                 la->la_valid |= LA_RDEV;
1443
1444         rc = __osd_object_create(env, obj, &db, oh->ot_tx, la, parent);
1445         if (rc)
1446                 return ERR_PTR(rc);
1447         return db;
1448 }
1449
1450 typedef dmu_buf_t *(*osd_obj_type_f)(const struct lu_env *env,
1451                                      struct osd_object *obj,
1452                                      struct lu_attr *la,
1453                                      uint64_t parent,
1454                                      struct osd_thandle *oh);
1455
1456 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1457 {
1458         osd_obj_type_f result;
1459
1460         switch (type) {
1461         case DFT_DIR:
1462                 result = osd_mkdir;
1463                 break;
1464         case DFT_INDEX:
1465                 result = osd_mkidx;
1466                 break;
1467         case DFT_REGULAR:
1468                 result = osd_mkreg;
1469                 break;
1470         case DFT_SYM:
1471                 result = osd_mksym;
1472                 break;
1473         case DFT_NODE:
1474                 result = osd_mknod;
1475                 break;
1476         default:
1477                 LBUG();
1478                 break;
1479         }
1480         return result;
1481 }
1482
1483 /*
1484  * Primitives for directory (i.e. ZAP) handling
1485  */
1486 static inline int osd_init_lma(const struct lu_env *env, struct osd_object *obj,
1487                                const struct lu_fid *fid, struct osd_thandle *oh)
1488 {
1489         struct osd_thread_info  *info = osd_oti_get(env);
1490         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1491         struct lu_buf            buf;
1492         int rc;
1493
1494         lustre_lma_init(lma, fid, 0, 0);
1495         lustre_lma_swab(lma);
1496         buf.lb_buf = lma;
1497         buf.lb_len = sizeof(*lma);
1498
1499         rc = osd_xattr_set_internal(env, obj, &buf, XATTR_NAME_LMA,
1500                                     LU_XATTR_CREATE, oh);
1501
1502         return rc;
1503 }
1504
1505 /*
1506  * Concurrency: @dt is write locked.
1507  */
1508 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1509                              struct lu_attr *attr,
1510                              struct dt_allocation_hint *hint,
1511                              struct dt_object_format *dof,
1512                              struct thandle *th)
1513 {
1514         struct zpl_direntry     *zde = &osd_oti_get(env)->oti_zde.lzd_reg;
1515         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1516         struct osd_object       *obj = osd_dt_obj(dt);
1517         struct osd_device       *osd = osd_obj2dev(obj);
1518         char                    *buf = osd_oti_get(env)->oti_str;
1519         struct osd_thandle      *oh;
1520         dmu_buf_t               *db;
1521         uint64_t                 zapid;
1522         int                      rc;
1523
1524         ENTRY;
1525
1526         /* concurrent create declarations should not see
1527          * the object inconsistent (db, attr, etc).
1528          * in regular cases acquisition should be cheap */
1529         down_write(&obj->oo_guard);
1530
1531         if (unlikely(dt_object_exists(dt)))
1532                 GOTO(out, rc = -EEXIST);
1533
1534         LASSERT(osd_invariant(obj));
1535         LASSERT(dof != NULL);
1536
1537         LASSERT(th != NULL);
1538         oh = container_of0(th, struct osd_thandle, ot_super);
1539
1540         /*
1541          * XXX missing: Quote handling.
1542          */
1543
1544         LASSERT(obj->oo_db == NULL);
1545
1546         /* to follow ZFS on-disk format we need
1547          * to initialize parent dnode properly */
1548         zapid = 0;
1549         if (hint != NULL && hint->dah_parent != NULL &&
1550             !dt_object_remote(hint->dah_parent))
1551                 zapid = osd_dt_obj(hint->dah_parent)->oo_db->db_object;
1552
1553         db = osd_create_type_f(dof->dof_type)(env, obj, attr, zapid, oh);
1554         if (IS_ERR(db))
1555                 GOTO(out, rc = PTR_ERR(db));
1556
1557         zde->zde_pad = 0;
1558         zde->zde_dnode = db->db_object;
1559         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1560
1561         zapid = osd_get_name_n_idx(env, osd, fid, buf);
1562
1563         rc = -zap_add(osd->od_os, zapid, buf, 8, 1, zde, oh->ot_tx);
1564         if (rc)
1565                 GOTO(out, rc);
1566
1567         /* Add new object to inode accounting.
1568          * Errors are not considered as fatal */
1569         rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid,
1570                                 (attr->la_valid & LA_UID) ? attr->la_uid : 0, 1,
1571                                 oh->ot_tx);
1572         if (rc)
1573                 CERROR("%s: failed to add "DFID" to accounting ZAP for usr %d "
1574                         "(%d)\n", osd->od_svname, PFID(fid), attr->la_uid, rc);
1575         rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid,
1576                                 (attr->la_valid & LA_GID) ? attr->la_gid : 0, 1,
1577                                 oh->ot_tx);
1578         if (rc)
1579                 CERROR("%s: failed to add "DFID" to accounting ZAP for grp %d "
1580                         "(%d)\n", osd->od_svname, PFID(fid), attr->la_gid, rc);
1581
1582         /* configure new osd object */
1583         obj->oo_db = db;
1584         rc = osd_object_init0(env, obj);
1585         LASSERT(ergo(rc == 0, dt_object_exists(dt)));
1586         LASSERT(osd_invariant(obj));
1587
1588         rc = osd_init_lma(env, obj, fid, oh);
1589         if (rc != 0)
1590                 CERROR("%s: can not set LMA on "DFID": rc = %d\n",
1591                        osd->od_svname, PFID(fid), rc);
1592
1593 out:
1594         up_write(&obj->oo_guard);
1595         RETURN(rc);
1596 }
1597
1598 static int osd_declare_object_ref_add(const struct lu_env *env,
1599                                       struct dt_object *dt,
1600                                       struct thandle *th)
1601 {
1602         return osd_declare_attr_set(env, dt, NULL, th);
1603 }
1604
1605 /*
1606  * Concurrency: @dt is write locked.
1607  */
1608 static int osd_object_ref_add(const struct lu_env *env,
1609                               struct dt_object *dt,
1610                               struct thandle *handle)
1611 {
1612         struct osd_object       *obj = osd_dt_obj(dt);
1613         struct osd_thandle      *oh;
1614         struct osd_device       *osd = osd_obj2dev(obj);
1615         uint64_t                 nlink;
1616         int rc;
1617
1618         ENTRY;
1619
1620         down_read(&obj->oo_guard);
1621         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1622                 GOTO(out, rc = -ENOENT);
1623
1624         LASSERT(osd_invariant(obj));
1625         LASSERT(obj->oo_sa_hdl != NULL);
1626
1627         oh = container_of0(handle, struct osd_thandle, ot_super);
1628
1629         write_lock(&obj->oo_attr_lock);
1630         nlink = ++obj->oo_attr.la_nlink;
1631         write_unlock(&obj->oo_attr_lock);
1632
1633         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1634
1635 out:
1636         up_read(&obj->oo_guard);
1637         RETURN(rc);
1638 }
1639
1640 static int osd_declare_object_ref_del(const struct lu_env *env,
1641                                       struct dt_object *dt,
1642                                       struct thandle *handle)
1643 {
1644         return osd_declare_attr_set(env, dt, NULL, handle);
1645 }
1646
1647 /*
1648  * Concurrency: @dt is write locked.
1649  */
1650 static int osd_object_ref_del(const struct lu_env *env,
1651                               struct dt_object *dt,
1652                               struct thandle *handle)
1653 {
1654         struct osd_object       *obj = osd_dt_obj(dt);
1655         struct osd_thandle      *oh;
1656         struct osd_device       *osd = osd_obj2dev(obj);
1657         uint64_t                 nlink;
1658         int                      rc;
1659
1660         ENTRY;
1661
1662         down_read(&obj->oo_guard);
1663
1664         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1665                 GOTO(out, rc = -ENOENT);
1666
1667         LASSERT(osd_invariant(obj));
1668         LASSERT(obj->oo_sa_hdl != NULL);
1669
1670         oh = container_of0(handle, struct osd_thandle, ot_super);
1671         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
1672
1673         write_lock(&obj->oo_attr_lock);
1674         nlink = --obj->oo_attr.la_nlink;
1675         write_unlock(&obj->oo_attr_lock);
1676
1677         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1678
1679 out:
1680         up_read(&obj->oo_guard);
1681         RETURN(rc);
1682 }
1683
1684 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
1685                            __u64 start, __u64 end)
1686 {
1687         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1688         ENTRY;
1689
1690         /* XXX: no other option than syncing the whole filesystem until we
1691          * support ZIL.  If the object tracked the txg that it was last
1692          * modified in, it could pass that txg here instead of "0".  Maybe
1693          * the changes are already committed, so no wait is needed at all? */
1694         txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
1695
1696         RETURN(0);
1697 }
1698
1699 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
1700 {
1701         return 0;
1702 }
1703
1704 static struct dt_object_operations osd_obj_ops = {
1705         .do_read_lock           = osd_object_read_lock,
1706         .do_write_lock          = osd_object_write_lock,
1707         .do_read_unlock         = osd_object_read_unlock,
1708         .do_write_unlock        = osd_object_write_unlock,
1709         .do_write_locked        = osd_object_write_locked,
1710         .do_attr_get            = osd_attr_get,
1711         .do_declare_attr_set    = osd_declare_attr_set,
1712         .do_attr_set            = osd_attr_set,
1713         .do_ah_init             = osd_ah_init,
1714         .do_declare_create      = osd_declare_object_create,
1715         .do_create              = osd_object_create,
1716         .do_declare_destroy     = osd_declare_object_destroy,
1717         .do_destroy             = osd_object_destroy,
1718         .do_index_try           = osd_index_try,
1719         .do_declare_ref_add     = osd_declare_object_ref_add,
1720         .do_ref_add             = osd_object_ref_add,
1721         .do_declare_ref_del     = osd_declare_object_ref_del,
1722         .do_ref_del             = osd_object_ref_del,
1723         .do_xattr_get           = osd_xattr_get,
1724         .do_declare_xattr_set   = osd_declare_xattr_set,
1725         .do_xattr_set           = osd_xattr_set,
1726         .do_declare_xattr_del   = osd_declare_xattr_del,
1727         .do_xattr_del           = osd_xattr_del,
1728         .do_xattr_list          = osd_xattr_list,
1729         .do_object_sync         = osd_object_sync,
1730         .do_invalidate          = osd_invalidate,
1731 };
1732
1733 static struct lu_object_operations osd_lu_obj_ops = {
1734         .loo_object_init        = osd_object_init,
1735         .loo_object_delete      = osd_object_delete,
1736         .loo_object_release     = osd_object_release,
1737         .loo_object_free        = osd_object_free,
1738         .loo_object_print       = osd_object_print,
1739         .loo_object_invariant   = osd_object_invariant,
1740 };
1741
1742 static int osd_otable_it_attr_get(const struct lu_env *env,
1743                                 struct dt_object *dt,
1744                                 struct lu_attr *attr)
1745 {
1746         attr->la_valid = 0;
1747         return 0;
1748 }
1749
1750 static struct dt_object_operations osd_obj_otable_it_ops = {
1751         .do_attr_get    = osd_otable_it_attr_get,
1752         .do_index_try   = osd_index_try,
1753 };