Whamcloud - gitweb
4a1dfe1d7fbc409d255b46ea326a1003024d1799
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd-zfs/osd_object.c
33  *
34  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35  * Author: Mike Pershin <tappro@whamcloud.com>
36  * Author: Johann Lombardi <johann@whamcloud.com>
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <lustre_ver.h>
42 #include <libcfs/libcfs.h>
43 #include <obd_support.h>
44 #include <lustre_net.h>
45 #include <obd.h>
46 #include <obd_class.h>
47 #include <lustre_disk.h>
48 #include <lustre_fid.h>
49
50 #include "osd_internal.h"
51
52 #include <sys/dnode.h>
53 #include <sys/dbuf.h>
54 #include <sys/spa.h>
55 #include <sys/stat.h>
56 #include <sys/zap.h>
57 #include <sys/spa_impl.h>
58 #include <sys/zfs_znode.h>
59 #include <sys/dmu_tx.h>
60 #include <sys/dmu_objset.h>
61 #include <sys/dsl_prop.h>
62 #include <sys/sa_impl.h>
63 #include <sys/txg.h>
64
65 char *osd_obj_tag = "osd_object";
66
67 static struct dt_object_operations osd_obj_ops;
68 static struct lu_object_operations osd_lu_obj_ops;
69 extern struct dt_body_operations osd_body_ops;
70 static struct dt_object_operations osd_obj_otable_it_ops;
71
72 extern struct kmem_cache *osd_object_kmem;
73
74 static void
75 osd_object_sa_fini(struct osd_object *obj)
76 {
77         if (obj->oo_sa_hdl) {
78                 sa_handle_destroy(obj->oo_sa_hdl);
79                 obj->oo_sa_hdl = NULL;
80         }
81 }
82
83 static int
84 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
85 {
86         int rc;
87
88         LASSERT(obj->oo_sa_hdl == NULL);
89         LASSERT(obj->oo_dn != NULL);
90
91         rc = osd_sa_handle_get(obj);
92         if (rc)
93                 return rc;
94
95         /* Cache the xattr object id, valid for the life of the object */
96         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
97         if (rc == -ENOENT) {
98                 obj->oo_xattr = ZFS_NO_OBJECT;
99                 rc = 0;
100         } else if (rc) {
101                 osd_object_sa_fini(obj);
102         }
103
104         return rc;
105 }
106
107 /*
108  * Add object to list of dirty objects in tx handle.
109  */
110 static void
111 osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
112 {
113         if (!list_empty(&obj->oo_sa_linkage))
114                 return;
115
116         down(&oh->ot_sa_lock);
117         write_lock(&obj->oo_attr_lock);
118         if (likely(list_empty(&obj->oo_sa_linkage)))
119                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
120         write_unlock(&obj->oo_attr_lock);
121         up(&oh->ot_sa_lock);
122 }
123
124 /*
125  * Release spill block dbuf hold for all dirty SAs.
126  */
127 void osd_object_sa_dirty_rele(struct osd_thandle *oh)
128 {
129         struct osd_object *obj;
130
131         down(&oh->ot_sa_lock);
132         while (!list_empty(&oh->ot_sa_list)) {
133                 obj = list_entry(oh->ot_sa_list.next,
134                                  struct osd_object, oo_sa_linkage);
135                 sa_spill_rele(obj->oo_sa_hdl);
136                 write_lock(&obj->oo_attr_lock);
137                 list_del_init(&obj->oo_sa_linkage);
138                 write_unlock(&obj->oo_attr_lock);
139         }
140         up(&oh->ot_sa_lock);
141 }
142
143 /*
144  * Update the SA and add the object to the dirty list.
145  */
146 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
147                          void *buf, uint32_t buflen, struct osd_thandle *oh)
148 {
149         int rc;
150
151         LASSERT(obj->oo_sa_hdl != NULL);
152         LASSERT(oh->ot_tx != NULL);
153
154         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
155         osd_object_sa_dirty_add(obj, oh);
156
157         return rc;
158 }
159
160 /*
161  * Bulk update the SA and add the object to the dirty list.
162  */
163 static int
164 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
165                           int count, struct osd_thandle *oh)
166 {
167         int rc;
168
169         LASSERT(obj->oo_sa_hdl != NULL);
170         LASSERT(oh->ot_tx != NULL);
171
172         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
173         osd_object_sa_dirty_add(obj, oh);
174
175         return rc;
176 }
177
178 /*
179  * Retrieve the attributes of a DMU object
180  */
181 int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
182                           struct osd_object *obj, struct lu_attr *la)
183 {
184         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
185         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
186         int              cnt = 0;
187         int              rc;
188         ENTRY;
189
190         LASSERT(obj->oo_dn != NULL);
191
192         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
193                         LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
194
195         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
196         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
197         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
198         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
199         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
200         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
201         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
202         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
203         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
204         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
205
206         rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
207         if (rc)
208                 GOTO(out_sa, rc);
209
210         la->la_atime = osa->atime[0];
211         la->la_mtime = osa->mtime[0];
212         la->la_ctime = osa->ctime[0];
213         la->la_mode = osa->mode;
214         la->la_uid = osa->uid;
215         la->la_gid = osa->gid;
216         la->la_nlink = osa->nlink;
217         la->la_flags = attrs_zfs2fs(osa->flags);
218         la->la_size = osa->size;
219
220         /* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
221          * flags is stored in LMA, and it is only for orphan directory */
222         if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
223                 struct osd_thread_info *info = osd_oti_get(env);
224                 struct lustre_mdt_attrs *lma;
225                 struct lu_buf buf;
226
227                 lma = (struct lustre_mdt_attrs *)info->oti_buf;
228                 buf.lb_buf = lma;
229                 buf.lb_len = sizeof(info->oti_buf);
230                 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
231                 if (rc > 0) {
232                         rc = 0;
233                         lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
234                         obj->oo_lma_flags =
235                                 lma_to_lustre_flags(lma->lma_incompat);
236
237                 } else if (rc == -ENODATA) {
238                         rc = 0;
239                 }
240         }
241
242         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
243                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
244                 if (rc)
245                         GOTO(out_sa, rc);
246                 la->la_rdev = osa->rdev;
247                 la->la_valid |= LA_RDEV;
248         }
249 out_sa:
250
251         RETURN(rc);
252 }
253
254 int __osd_obj2dnode(const struct lu_env *env, objset_t *os,
255                    uint64_t oid, dnode_t **dnp)
256 {
257         dmu_object_info_t *doi = &osd_oti_get(env)->oti_doi;
258         dmu_buf_t *db;
259         dmu_buf_impl_t *dbi;
260         int rc;
261
262         rc = dmu_bonus_hold(os, oid, osd_obj_tag, &db);
263         if (rc)
264                 return rc;
265
266         dbi = (dmu_buf_impl_t *)db;
267         DB_DNODE_ENTER(dbi);
268         *dnp = DB_DNODE(dbi);
269
270         LASSERT(*dnp != NULL);
271         dmu_object_info_from_dnode(*dnp, doi);
272         if (unlikely (oid != DMU_USERUSED_OBJECT &&
273             oid != DMU_GROUPUSED_OBJECT && doi->doi_bonus_type != DMU_OT_SA)) {
274                 osd_dnode_rele(*dnp);
275                 *dnp = NULL;
276                 return -EINVAL;
277         }
278
279         return 0;
280 }
281
282 /*
283  * Concurrency: no concurrent access is possible that early in object
284  * life-cycle.
285  */
286 struct lu_object *osd_object_alloc(const struct lu_env *env,
287                                    const struct lu_object_header *hdr,
288                                    struct lu_device *d)
289 {
290         struct osd_object *mo;
291
292         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
293         if (mo != NULL) {
294                 struct lu_object *l;
295
296                 l = &mo->oo_dt.do_lu;
297                 dt_object_init(&mo->oo_dt, NULL, d);
298                 mo->oo_dt.do_ops = &osd_obj_ops;
299                 l->lo_ops = &osd_lu_obj_ops;
300                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
301                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
302                 init_rwsem(&mo->oo_sem);
303                 init_rwsem(&mo->oo_guard);
304                 rwlock_init(&mo->oo_attr_lock);
305                 mo->oo_destroy = OSD_DESTROY_NONE;
306                 return l;
307         } else {
308                 return NULL;
309         }
310 }
311
312 /*
313  * Concurrency: shouldn't matter.
314  */
315 int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
316 {
317         struct osd_device       *osd = osd_obj2dev(obj);
318         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
319         int                      rc = 0;
320         ENTRY;
321
322         if (obj->oo_dn == NULL)
323                 RETURN(0);
324
325         /* object exist */
326
327         rc = osd_object_sa_init(obj, osd);
328         if (rc)
329                 RETURN(rc);
330
331         /* cache attrs in object */
332         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
333         if (rc)
334                 RETURN(rc);
335
336         if (likely(!fid_is_acct(fid)))
337                 /* no body operations for accounting objects */
338                 obj->oo_dt.do_body_ops = &osd_body_ops;
339
340         /*
341          * initialize object before marking it existing
342          */
343         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
344
345         smp_mb();
346         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
347
348         RETURN(0);
349 }
350
351 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
352 {
353         struct osd_thread_info  *info = osd_oti_get(env);
354         struct lu_buf           buf;
355         int                     rc;
356         struct lustre_mdt_attrs *lma;
357         ENTRY;
358
359         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
360         lma = (struct lustre_mdt_attrs *)info->oti_buf;
361         buf.lb_buf = lma;
362         buf.lb_len = sizeof(info->oti_buf);
363
364         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
365         if (rc > 0) {
366                 rc = 0;
367                 lustre_lma_swab(lma);
368                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
369                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
370                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
371                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
372                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
373                               PFID(lu_object_fid(&obj->oo_dt.do_lu)));
374                         rc = -EOPNOTSUPP;
375                 }
376         } else if (rc == -ENODATA) {
377                 /* haven't initialize LMA xattr */
378                 rc = 0;
379         }
380
381         RETURN(rc);
382 }
383
384 /*
385  * Concurrency: no concurrent access is possible that early in object
386  * life-cycle.
387  */
388 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
389                            const struct lu_object_conf *conf)
390 {
391         struct osd_object       *obj = osd_obj(l);
392         struct osd_device       *osd = osd_obj2dev(obj);
393         uint64_t                 oid;
394         int                      rc;
395         ENTRY;
396
397         LASSERT(osd_invariant(obj));
398
399         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
400                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
401                 l->lo_header->loh_attr |= LOHA_EXISTS;
402                 RETURN(0);
403         }
404
405         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
406                 GOTO(out, rc = 0);
407
408         rc = osd_fid_lookup(env, osd, lu_object_fid(l), &oid);
409         if (rc == 0) {
410                 LASSERT(obj->oo_dn == NULL);
411                 rc = __osd_obj2dnode(env, osd->od_os, oid, &obj->oo_dn);
412                 /* EEXIST will be returned if object is being deleted in ZFS */
413                 if (rc == -EEXIST) {
414                         rc = 0;
415                         GOTO(out, rc);
416                 }
417                 if (rc != 0) {
418                         CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
419                                osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
420                         GOTO(out, rc);
421                 }
422                 LASSERT(obj->oo_dn);
423                 rc = osd_object_init0(env, obj);
424                 if (rc != 0)
425                         GOTO(out, rc);
426
427                 rc = osd_check_lma(env, obj);
428                 if (rc != 0)
429                         GOTO(out, rc);
430         } else if (rc == -ENOENT) {
431                 rc = 0;
432         }
433         LASSERT(osd_invariant(obj));
434 out:
435         RETURN(rc);
436 }
437
438 /*
439  * Concurrency: no concurrent access is possible that late in object
440  * life-cycle.
441  */
442 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
443 {
444         struct osd_object *obj = osd_obj(l);
445
446         LASSERT(osd_invariant(obj));
447
448         dt_object_fini(&obj->oo_dt);
449         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
450 }
451
452 static int
453 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
454 {
455         int rc = -EBUSY;
456
457         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
458
459         /* the object is supposed to be exclusively locked by
460          * the caller (osd_object_destroy()), while the transaction
461          * (oh) is per-thread and not shared */
462         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
463                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
464                 rc = 0;
465         }
466
467         return rc;
468 }
469
470 /* Default to max data size covered by a level-1 indirect block */
471 static unsigned long osd_sync_destroy_max_size =
472         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
473 module_param(osd_sync_destroy_max_size, ulong, 0444);
474 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
475
476 static inline void
477 osd_object_set_destroy_type(struct osd_object *obj)
478 {
479         /*
480          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
481          * only once and use it consistently thereafter.
482          */
483         down_write(&obj->oo_guard);
484         if (obj->oo_destroy == OSD_DESTROY_NONE) {
485                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
486                         obj->oo_destroy = OSD_DESTROY_SYNC;
487                 else /* Larger objects are destroyed asynchronously */
488                         obj->oo_destroy = OSD_DESTROY_ASYNC;
489         }
490         up_write(&obj->oo_guard);
491 }
492
493 static int osd_declare_object_destroy(const struct lu_env *env,
494                                       struct dt_object *dt,
495                                       struct thandle *th)
496 {
497         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
498         struct osd_object       *obj = osd_dt_obj(dt);
499         struct osd_device       *osd = osd_obj2dev(obj);
500         struct osd_thandle      *oh;
501         int                      rc;
502         uint64_t                 zapid;
503         ENTRY;
504
505         LASSERT(th != NULL);
506         LASSERT(dt_object_exists(dt));
507
508         oh = container_of0(th, struct osd_thandle, ot_super);
509         LASSERT(oh->ot_tx != NULL);
510
511         /* declare that we'll remove object from fid-dnode mapping */
512         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0);
513         dmu_tx_hold_zap(oh->ot_tx, zapid, FALSE, NULL);
514
515         osd_declare_xattrs_destroy(env, obj, oh);
516
517         /* one less inode */
518         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
519                                obj->oo_attr.la_gid, -1, oh, false, NULL, false);
520         if (rc)
521                 RETURN(rc);
522
523         /* data to be truncated */
524         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
525                                obj->oo_attr.la_gid, 0, oh, true, NULL, false);
526         if (rc)
527                 RETURN(rc);
528
529         osd_object_set_destroy_type(obj);
530         if (obj->oo_destroy == OSD_DESTROY_SYNC)
531                 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
532                                  0, DMU_OBJECT_END);
533         else
534                 dmu_tx_hold_zap(oh->ot_tx, osd->od_unlinkedid, TRUE, NULL);
535
536         /* will help to find FID->ino when this object is being
537          * added to PENDING/ */
538         osd_idc_find_and_init(env, osd, obj);
539
540         RETURN(0);
541 }
542
543 static int osd_object_destroy(const struct lu_env *env,
544                               struct dt_object *dt, struct thandle *th)
545 {
546         struct osd_thread_info  *info = osd_oti_get(env);
547         char                    *buf = info->oti_str;
548         struct osd_object       *obj = osd_dt_obj(dt);
549         struct osd_device       *osd = osd_obj2dev(obj);
550         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
551         struct osd_thandle      *oh;
552         int                      rc;
553         uint64_t                 oid, zapid;
554         ENTRY;
555
556         down_write(&obj->oo_guard);
557
558         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
559                 GOTO(out, rc = -ENOENT);
560
561         LASSERT(obj->oo_dn != NULL);
562
563         oh = container_of0(th, struct osd_thandle, ot_super);
564         LASSERT(oh != NULL);
565         LASSERT(oh->ot_tx != NULL);
566
567         /* remove obj ref from index dir (it depends) */
568         zapid = osd_get_name_n_idx(env, osd, fid, buf, sizeof(info->oti_str));
569         rc = -zap_remove(osd->od_os, zapid, buf, oh->ot_tx);
570         if (rc) {
571                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
572                        osd->od_svname, buf, rc);
573                 GOTO(out, rc);
574         }
575
576         rc = osd_xattrs_destroy(env, obj, oh);
577         if (rc) {
578                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
579                        osd->od_svname, buf, rc);
580                 GOTO(out, rc);
581         }
582
583         oid = obj->oo_dn->dn_object;
584         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
585                 /* this may happen if the destroy wasn't declared
586                  * e.g. when the object is created and then destroyed
587                  * in the same transaction - we don't need additional
588                  * space for destroy specifically */
589                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
590                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
591                 if (rc)
592                         CERROR("%s: failed to free %s %llu: rc = %d\n",
593                                osd->od_svname, buf, oid, rc);
594         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
595                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
596                 if (rc)
597                         CERROR("%s: failed to free %s %llu: rc = %d\n",
598                                osd->od_svname, buf, oid, rc);
599         } else { /* asynchronous destroy */
600                 rc = osd_object_unlinked_add(obj, oh);
601                 if (rc)
602                         GOTO(out, rc);
603
604                 rc = -zap_add_int(osd->od_os, osd->od_unlinkedid,
605                                   oid, oh->ot_tx);
606                 if (rc)
607                         CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
608                                osd->od_svname, buf, oid, rc);
609         }
610
611 out:
612         /* not needed in the cache anymore */
613         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
614         if (rc == 0)
615                 obj->oo_destroyed = 1;
616         up_write(&obj->oo_guard);
617         RETURN (0);
618 }
619
620 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
621 {
622         struct osd_object *obj = osd_obj(l);
623
624         if (obj->oo_dn != NULL) {
625                 osd_object_sa_fini(obj);
626                 if (obj->oo_sa_xattr) {
627                         nvlist_free(obj->oo_sa_xattr);
628                         obj->oo_sa_xattr = NULL;
629                 }
630                 osd_dnode_rele(obj->oo_dn);
631                 list_del(&obj->oo_sa_linkage);
632                 obj->oo_dn = NULL;
633         }
634 }
635
636 /*
637  * Concurrency: ->loo_object_release() is called under site spin-lock.
638  */
639 static void osd_object_release(const struct lu_env *env,
640                                struct lu_object *l)
641 {
642 }
643
644 /*
645  * Concurrency: shouldn't matter.
646  */
647 static int osd_object_print(const struct lu_env *env, void *cookie,
648                             lu_printer_t p, const struct lu_object *l)
649 {
650         struct osd_object *o = osd_obj(l);
651
652         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
653 }
654
655 static void osd_object_read_lock(const struct lu_env *env,
656                                  struct dt_object *dt, unsigned role)
657 {
658         struct osd_object *obj = osd_dt_obj(dt);
659
660         LASSERT(osd_invariant(obj));
661
662         down_read_nested(&obj->oo_sem, role);
663 }
664
665 static void osd_object_write_lock(const struct lu_env *env,
666                                   struct dt_object *dt, unsigned role)
667 {
668         struct osd_object *obj = osd_dt_obj(dt);
669
670         LASSERT(osd_invariant(obj));
671
672         down_write_nested(&obj->oo_sem, role);
673 }
674
675 static void osd_object_read_unlock(const struct lu_env *env,
676                                    struct dt_object *dt)
677 {
678         struct osd_object *obj = osd_dt_obj(dt);
679
680         LASSERT(osd_invariant(obj));
681         up_read(&obj->oo_sem);
682 }
683
684 static void osd_object_write_unlock(const struct lu_env *env,
685                                     struct dt_object *dt)
686 {
687         struct osd_object *obj = osd_dt_obj(dt);
688
689         LASSERT(osd_invariant(obj));
690         up_write(&obj->oo_sem);
691 }
692
693 static int osd_object_write_locked(const struct lu_env *env,
694                                    struct dt_object *dt)
695 {
696         struct osd_object *obj = osd_dt_obj(dt);
697         int rc = 1;
698
699         LASSERT(osd_invariant(obj));
700
701         if (down_write_trylock(&obj->oo_sem)) {
702                 rc = 0;
703                 up_write(&obj->oo_sem);
704         }
705         return rc;
706 }
707
708 static int osd_attr_get(const struct lu_env *env,
709                         struct dt_object *dt,
710                         struct lu_attr *attr)
711 {
712         struct osd_object       *obj = osd_dt_obj(dt);
713         uint64_t                 blocks;
714         uint32_t                 blksize;
715         int                      rc = 0;
716
717         down_read(&obj->oo_guard);
718
719         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
720                 GOTO(out, rc = -ENOENT);
721
722         LASSERT(osd_invariant(obj));
723         LASSERT(obj->oo_dn);
724
725         read_lock(&obj->oo_attr_lock);
726         *attr = obj->oo_attr;
727         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
728                 attr->la_flags |= LUSTRE_ORPHAN_FL;
729         read_unlock(&obj->oo_attr_lock);
730
731         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
732          * from within sa_object_size() can block on a mutex, so
733          * we can't call sa_object_size() holding rwlock */
734         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
735         /* we do not control size of indices, so always calculate
736          * it from number of blocks reported by DMU */
737         if (S_ISDIR(attr->la_mode))
738                 attr->la_size = 512 * blocks;
739         /* Block size may be not set; suggest maximal I/O transfers. */
740         if (blksize == 0)
741                 blksize = osd_spa_maxblocksize(
742                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
743
744         attr->la_blksize = blksize;
745         attr->la_blocks = blocks;
746         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
747
748 out:
749         up_read(&obj->oo_guard);
750         return rc;
751 }
752
753 /* Simple wrapper on top of qsd API which implement quota transfer for osd
754  * setattr needs. As a reminder, only the root user can change ownership of
755  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
756 static inline int qsd_transfer(const struct lu_env *env,
757                                struct qsd_instance *qsd,
758                                struct lquota_trans *trans, int qtype,
759                                __u64 orig_id, __u64 new_id, __u64 bspace,
760                                struct lquota_id_info *qi)
761 {
762         int     rc;
763
764         if (unlikely(qsd == NULL))
765                 return 0;
766
767         LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
768         qi->lqi_type = qtype;
769
770         /* inode accounting */
771         qi->lqi_is_blk = false;
772
773         /* one more inode for the new owner ... */
774         qi->lqi_id.qid_uid = new_id;
775         qi->lqi_space      = 1;
776         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
777         if (rc == -EDQUOT || rc == -EINPROGRESS)
778                 rc = 0;
779         if (rc)
780                 return rc;
781
782         /* and one less inode for the current id */
783         qi->lqi_id.qid_uid = orig_id;;
784         qi->lqi_space      = -1;
785         /* can't get EDQUOT when reducing usage */
786         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
787         if (rc == -EINPROGRESS)
788                 rc = 0;
789         if (rc)
790                 return rc;
791
792         /* block accounting */
793         qi->lqi_is_blk = true;
794
795         /* more blocks for the new owner ... */
796         qi->lqi_id.qid_uid = new_id;
797         qi->lqi_space      = bspace;
798         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
799         if (rc == -EDQUOT || rc == -EINPROGRESS)
800                 rc = 0;
801         if (rc)
802                 return rc;
803
804         /* and finally less blocks for the current owner */
805         qi->lqi_id.qid_uid = orig_id;
806         qi->lqi_space      = -bspace;
807         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
808         /* can't get EDQUOT when reducing usage */
809         if (rc == -EINPROGRESS)
810                 rc = 0;
811         return rc;
812 }
813
814 static int osd_declare_attr_set(const struct lu_env *env,
815                                 struct dt_object *dt,
816                                 const struct lu_attr *attr,
817                                 struct thandle *handle)
818 {
819         struct osd_thread_info  *info = osd_oti_get(env);
820         struct osd_object       *obj = osd_dt_obj(dt);
821         struct osd_device       *osd = osd_obj2dev(obj);
822         dmu_tx_hold_t           *txh;
823         struct osd_thandle      *oh;
824         uint64_t                 bspace;
825         uint32_t                 blksize;
826         int                      rc = 0;
827         bool                     found;
828         ENTRY;
829
830
831         LASSERT(handle != NULL);
832         LASSERT(osd_invariant(obj));
833
834         oh = container_of0(handle, struct osd_thandle, ot_super);
835
836         down_read(&obj->oo_guard);
837         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
838                 GOTO(out, rc = 0);
839
840         LASSERT(obj->oo_sa_hdl != NULL);
841         LASSERT(oh->ot_tx != NULL);
842         /* regular attributes are part of the bonus buffer */
843         /* let's check whether this object is already part of
844          * transaction.. */
845         found = false;
846         for (txh = list_head(&oh->ot_tx->tx_holds); txh;
847              txh = list_next(&oh->ot_tx->tx_holds, txh)) {
848                 if (txh->txh_dnode == NULL)
849                         continue;
850                 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
851                         continue;
852                 /* this object is part of the transaction already
853                  * we don't need to declare bonus again */
854                 found = true;
855                 break;
856         }
857         if (!found)
858                 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
859         if (oh->ot_tx->tx_err != 0)
860                 GOTO(out, rc = -oh->ot_tx->tx_err);
861
862         if (attr && attr->la_valid & LA_FLAGS) {
863                 /* LMA is usually a part of bonus, no need to declare
864                  * anything else */
865         }
866
867         if (attr && (attr->la_valid & (LA_UID | LA_GID))) {
868                 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
869                 bspace = toqb(bspace * blksize);
870         }
871
872         if (attr && attr->la_valid & LA_UID) {
873                 /* quota enforcement for user */
874                 if (attr->la_uid != obj->oo_attr.la_uid) {
875                         rc = qsd_transfer(env, osd->od_quota_slave,
876                                           &oh->ot_quota_trans, USRQUOTA,
877                                           obj->oo_attr.la_uid, attr->la_uid,
878                                           bspace, &info->oti_qi);
879                         if (rc)
880                                 GOTO(out, rc);
881                 }
882         }
883         if (attr && attr->la_valid & LA_GID) {
884                 /* quota enforcement for group */
885                 if (attr->la_gid != obj->oo_attr.la_gid) {
886                         rc = qsd_transfer(env, osd->od_quota_slave,
887                                           &oh->ot_quota_trans, GRPQUOTA,
888                                           obj->oo_attr.la_gid, attr->la_gid,
889                                           bspace, &info->oti_qi);
890                         if (rc)
891                                 GOTO(out, rc);
892                 }
893         }
894
895 out:
896         up_read(&obj->oo_guard);
897         RETURN(rc);
898 }
899
900 /*
901  * Set the attributes of an object
902  *
903  * The transaction passed to this routine must have
904  * dmu_tx_hold_bonus(tx, oid) called and then assigned
905  * to a transaction group.
906  */
907 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
908                         const struct lu_attr *la, struct thandle *handle)
909 {
910         struct osd_thread_info  *info = osd_oti_get(env);
911         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
912         struct osd_object       *obj = osd_dt_obj(dt);
913         struct osd_device       *osd = osd_obj2dev(obj);
914         struct osd_thandle      *oh;
915         struct osa_attr         *osa = &info->oti_osa;
916         __u64                    valid = la->la_valid;
917         int                      cnt;
918         int                      rc = 0;
919
920         ENTRY;
921
922         down_read(&obj->oo_guard);
923         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
924                 GOTO(out, rc = -ENOENT);
925
926         LASSERT(handle != NULL);
927         LASSERT(osd_invariant(obj));
928         LASSERT(obj->oo_sa_hdl);
929
930         oh = container_of0(handle, struct osd_thandle, ot_super);
931         /* Assert that the transaction has been assigned to a
932            transaction group. */
933         LASSERT(oh->ot_tx->tx_txg != 0);
934
935         /* Only allow set size for regular file */
936         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
937                 valid &= ~(LA_SIZE | LA_BLOCKS);
938
939         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
940                 valid &= ~LA_CTIME;
941
942         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
943                 valid &= ~LA_MTIME;
944
945         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
946                 valid &= ~LA_ATIME;
947
948         if (valid == 0)
949                 GOTO(out, rc = 0);
950
951         if (valid & LA_FLAGS) {
952                 struct lustre_mdt_attrs *lma;
953                 struct lu_buf buf;
954
955                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
956                         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
957                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
958                         buf.lb_buf = lma;
959                         buf.lb_len = sizeof(info->oti_buf);
960                         rc = osd_xattr_get(env, &obj->oo_dt, &buf,
961                                            XATTR_NAME_LMA);
962                         if (rc > 0) {
963                                 lma->lma_incompat =
964                                         le32_to_cpu(lma->lma_incompat);
965                                 lma->lma_incompat |=
966                                         lustre_to_lma_flags(la->la_flags);
967                                 lma->lma_incompat =
968                                         cpu_to_le32(lma->lma_incompat);
969                                 buf.lb_buf = lma;
970                                 buf.lb_len = sizeof(*lma);
971                                 rc = osd_xattr_set_internal(env, obj, &buf,
972                                                             XATTR_NAME_LMA,
973                                                             LU_XATTR_REPLACE,
974                                                             oh);
975                         }
976                         if (rc < 0) {
977                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
978                                        osd->od_svname, rc);
979                                 RETURN(rc);
980                         }
981                 }
982         }
983
984         write_lock(&obj->oo_attr_lock);
985         cnt = 0;
986         if (valid & LA_ATIME) {
987                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
988                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
989                                  osa->atime, 16);
990         }
991         if (valid & LA_MTIME) {
992                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
993                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
994                                  osa->mtime, 16);
995         }
996         if (valid & LA_CTIME) {
997                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
998                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
999                                  osa->ctime, 16);
1000         }
1001         if (valid & LA_MODE) {
1002                 /* mode is stored along with type, so read it first */
1003                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1004                         (la->la_mode & ~S_IFMT);
1005                 osa->mode = obj->oo_attr.la_mode;
1006                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1007                                  &osa->mode, 8);
1008         }
1009         if (valid & LA_SIZE) {
1010                 osa->size = obj->oo_attr.la_size = la->la_size;
1011                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1012                                  &osa->size, 8);
1013         }
1014         if (valid & LA_NLINK) {
1015                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1016                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1017                                  &osa->nlink, 8);
1018         }
1019         if (valid & LA_RDEV) {
1020                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1021                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1022                                  &osa->rdev, 8);
1023         }
1024         if (valid & LA_FLAGS) {
1025                 osa->flags = attrs_fs2zfs(la->la_flags);
1026                 /* many flags are not supported by zfs, so ensure a good cached
1027                  * copy */
1028                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1029                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1030                                  &osa->flags, 8);
1031         }
1032         if (valid & LA_UID) {
1033                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1034                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1035                                  &osa->uid, 8);
1036         }
1037         if (valid & LA_GID) {
1038                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1039                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1040                                  &osa->gid, 8);
1041         }
1042         obj->oo_attr.la_valid |= valid;
1043         write_unlock(&obj->oo_attr_lock);
1044
1045         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1046         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1047
1048 out:
1049         up_read(&obj->oo_guard);
1050         RETURN(rc);
1051 }
1052
1053 /*
1054  * Object creation.
1055  *
1056  * XXX temporary solution.
1057  */
1058
1059 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1060                         struct dt_object *parent, struct dt_object *child,
1061                         umode_t child_mode)
1062 {
1063         LASSERT(ah);
1064
1065         ah->dah_parent = parent;
1066         ah->dah_mode = child_mode;
1067
1068         if (parent != NULL && !dt_object_remote(parent)) {
1069                 /* will help to find FID->ino at dt_insert("..") */
1070                 struct osd_object *pobj = osd_dt_obj(parent);
1071
1072                 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1073         }
1074 }
1075
1076 static int osd_declare_object_create(const struct lu_env *env,
1077                                      struct dt_object *dt,
1078                                      struct lu_attr *attr,
1079                                      struct dt_allocation_hint *hint,
1080                                      struct dt_object_format *dof,
1081                                      struct thandle *handle)
1082 {
1083         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1084         struct osd_object       *obj = osd_dt_obj(dt);
1085         struct osd_device       *osd = osd_obj2dev(obj);
1086         struct osd_thandle      *oh;
1087         uint64_t                 zapid;
1088         int                      rc, dnode_size;
1089         ENTRY;
1090
1091         LASSERT(dof);
1092
1093         switch (dof->dof_type) {
1094                 case DFT_REGULAR:
1095                 case DFT_SYM:
1096                 case DFT_NODE:
1097                         if (obj->oo_dt.do_body_ops == NULL)
1098                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1099                         break;
1100                 default:
1101                         break;
1102         }
1103
1104         LASSERT(handle != NULL);
1105         oh = container_of0(handle, struct osd_thandle, ot_super);
1106         LASSERT(oh->ot_tx != NULL);
1107
1108         /* this is the minimum set of EAs on every Lustre object */
1109         obj->oo_ea_in_bonus = ZFS_SA_BASE_ATTR_SIZE +
1110                                 sizeof(__u64) + /* VBR VERSION */
1111                                 sizeof(struct lustre_mdt_attrs); /* LMA */
1112         /* reserve 32 bytes for extra stuff like ACLs */
1113         dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1114
1115         switch (dof->dof_type) {
1116                 case DFT_DIR:
1117                         dt->do_index_ops = &osd_dir_ops;
1118                 case DFT_INDEX:
1119                         /* for zap create */
1120                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1121                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1122                         break;
1123                 case DFT_REGULAR:
1124                 case DFT_SYM:
1125                 case DFT_NODE:
1126                         /* first, we'll create new object */
1127                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1128                         break;
1129
1130                 default:
1131                         LBUG();
1132                         break;
1133         }
1134
1135         /* and we'll add it to some mapping */
1136         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0);
1137         dmu_tx_hold_zap(oh->ot_tx, zapid, TRUE, NULL);
1138
1139         /* will help to find FID->ino mapping at dt_insert() */
1140         osd_idc_find_and_init(env, osd, obj);
1141
1142         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
1143                                false, NULL, false);
1144
1145         RETURN(rc);
1146 }
1147
1148 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1149                     sa_handle_t *sa_hdl, dmu_tx_t *tx,
1150                     struct lu_attr *la, uint64_t parent)
1151 {
1152         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
1153         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1154         uint64_t         gen;
1155         uint64_t         crtime[2];
1156         timestruc_t      now;
1157         int              cnt;
1158         int              rc;
1159
1160         LASSERT(sa_hdl);
1161
1162         gen = dmu_tx_get_txg(tx);
1163         gethrestime(&now);
1164         ZFS_TIME_ENCODE(&now, crtime);
1165
1166         osa->atime[0] = la->la_atime;
1167         osa->ctime[0] = la->la_ctime;
1168         osa->mtime[0] = la->la_mtime;
1169         osa->mode = la->la_mode;
1170         osa->uid = la->la_uid;
1171         osa->gid = la->la_gid;
1172         osa->rdev = la->la_rdev;
1173         osa->nlink = la->la_nlink;
1174         osa->flags = attrs_fs2zfs(la->la_flags);
1175         osa->size  = la->la_size;
1176
1177         /*
1178          * we need to create all SA below upon object create.
1179          *
1180          * XXX The attribute order matters since the accounting callback relies
1181          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1182          * look up the UID/GID attributes. Moreover, the callback does not seem
1183          * to support the spill block.
1184          * We define attributes in the same order as SA_*_OFFSET in order to
1185          * work around the problem. See ORI-610.
1186          */
1187         cnt = 0;
1188         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1189         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1190         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1191         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1192         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1193         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1194         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1195         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1196         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1197         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1198         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
1199         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1200         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1201         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1202
1203         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1204
1205         return rc;
1206 }
1207
1208 static int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1209                               uint64_t oid, dnode_t **dnp)
1210 {
1211         dmu_tx_hold_t *txh;
1212         int rc = 0;
1213
1214         /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1215         for (txh = list_tail(&tx->tx_holds); txh;
1216              txh = list_prev(&tx->tx_holds, txh)) {
1217                 dnode_t *dn = txh->txh_dnode;
1218                 dmu_buf_impl_t *db;
1219
1220                 if (dn == NULL)
1221                         continue;
1222                 if (dn->dn_object != oid)
1223                         continue;
1224                 db = dn->dn_bonus;
1225                 if (db == NULL) {
1226                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1227                         if (dn->dn_bonus == NULL)
1228                                 dbuf_create_bonus(dn);
1229                         rw_exit(&dn->dn_struct_rwlock);
1230                 }
1231                 db = dn->dn_bonus;
1232                 LASSERT(db);
1233                 LASSERT(dn->dn_handle);
1234                 DB_DNODE_ENTER(db);
1235                 if (refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1236                         refcount_add(&dn->dn_holds, tag);
1237                         atomic_inc_32(&dn->dn_dbufs_count);
1238                 }
1239                 *dnp = dn;
1240                 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1241                 break;
1242         }
1243
1244         if (unlikely(*dnp == NULL))
1245                 rc = __osd_obj2dnode(env, tx->tx_objset, oid, dnp);
1246
1247         return rc;
1248 }
1249
1250 /*
1251  * The transaction passed to this routine must have
1252  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1253  * to a transaction group.
1254  */
1255 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
1256                         dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1257 {
1258         struct osd_device   *osd = osd_obj2dev(obj);
1259         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1260         dmu_object_type_t    type = DMU_OT_PLAIN_FILE_CONTENTS;
1261         uint64_t oid;
1262
1263         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1264          * would get an additional ditto copy */
1265         if (unlikely(S_ISREG(la->la_mode) &&
1266                      fid_seq_is_local_file(fid_seq(fid))))
1267                 type = DMU_OTN_UINT8_METADATA;
1268
1269         /* Create a new DMU object using the default dnode size. */
1270         oid = osd_dmu_object_alloc(osd->od_os, type, 0, 0, tx);
1271
1272         LASSERT(la->la_valid & LA_MODE);
1273         la->la_size = 0;
1274         la->la_nlink = 1;
1275
1276         return osd_find_new_dnode(env, tx, oid, dnp);
1277 }
1278
1279 /*
1280  * The transaction passed to this routine must have
1281  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1282  * to a transaction group.
1283  *
1284  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1285  * This is fine for directories today, because storing the FID in the dirent
1286  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1287  * then we might need to re-evaluate the use of this flag and instead do
1288  * a conversion from the different internal ZAP hash formats being used. */
1289 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1290                      dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1291                      zap_flags_t flags)
1292 {
1293         uint64_t oid;
1294
1295         /* Assert that the transaction has been assigned to a
1296            transaction group. */
1297         LASSERT(tx->tx_txg != 0);
1298         *dnp = NULL;
1299
1300         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1301                                    DMU_OT_DIRECTORY_CONTENTS,
1302                                    14, /* == ZFS fzap_default_blockshift */
1303                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1304                                    0, tx);
1305
1306         la->la_size = 2;
1307         la->la_nlink = 1;
1308
1309         return osd_find_new_dnode(env, tx, oid, dnp);
1310 }
1311
1312 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1313                           struct lu_attr *la, struct osd_thandle *oh)
1314 {
1315         dnode_t *dn;
1316         int rc;
1317
1318         /* Index file should be created as regular file in order not to confuse
1319          * ZPL which could interpret them as directory.
1320          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1321          * binary keys */
1322         LASSERT(S_ISREG(la->la_mode));
1323         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la,
1324                               ZAP_FLAG_UINT64_KEY);
1325         if (rc)
1326                 return ERR_PTR(rc);
1327         return dn;
1328 }
1329
1330 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1331                           struct lu_attr *la, struct osd_thandle *oh)
1332 {
1333         dnode_t *dn;
1334         int rc;
1335
1336         LASSERT(S_ISDIR(la->la_mode));
1337         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la, 0);
1338         if (rc)
1339                 return ERR_PTR(rc);
1340         return dn;
1341 }
1342
1343 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1344                           struct lu_attr *la, struct osd_thandle *oh)
1345 {
1346         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1347         struct osd_device *osd = osd_obj2dev(obj);
1348         dnode_t *dn;
1349         int rc;
1350
1351         LASSERT(S_ISREG(la->la_mode));
1352         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1353         if (rc)
1354                 return ERR_PTR(rc);
1355
1356         if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid)) &&
1357             osd->od_is_ost) {
1358                 /* The minimum block size must be at least page size otherwise
1359                  * it will break the assumption in tgt_thread_big_cache where
1360                  * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1361                  * RDMA due to subpage transfer size */
1362                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1363                                                PAGE_SIZE, 0, oh->ot_tx);
1364                 if (unlikely(rc)) {
1365                         CERROR("%s: can't change blocksize: %d\n",
1366                                osd->od_svname, rc);
1367                         return ERR_PTR(rc);
1368                 }
1369         }
1370
1371         return dn;
1372 }
1373
1374 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1375                           struct lu_attr *la, struct osd_thandle *oh)
1376 {
1377         dnode_t *dn;
1378         int rc;
1379
1380         LASSERT(S_ISLNK(la->la_mode));
1381         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1382         if (rc)
1383                 return ERR_PTR(rc);
1384         return dn;
1385 }
1386
1387 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1388                           struct lu_attr *la, struct osd_thandle *oh)
1389 {
1390         dnode_t *dn;
1391         int rc;
1392
1393         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1394                 la->la_valid |= LA_RDEV;
1395
1396         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1397         if (rc)
1398                 return ERR_PTR(rc);
1399         return dn;
1400 }
1401
1402 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1403                                    struct osd_object *obj,
1404                                    struct lu_attr *la,
1405                                    struct osd_thandle *oh);
1406
1407 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1408 {
1409         osd_obj_type_f result;
1410
1411         switch (type) {
1412         case DFT_DIR:
1413                 result = osd_mkdir;
1414                 break;
1415         case DFT_INDEX:
1416                 result = osd_mkidx;
1417                 break;
1418         case DFT_REGULAR:
1419                 result = osd_mkreg;
1420                 break;
1421         case DFT_SYM:
1422                 result = osd_mksym;
1423                 break;
1424         case DFT_NODE:
1425                 result = osd_mknod;
1426                 break;
1427         default:
1428                 LBUG();
1429                 break;
1430         }
1431         return result;
1432 }
1433
1434 /*
1435  * Concurrency: @dt is write locked.
1436  */
1437 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1438                              struct lu_attr *attr,
1439                              struct dt_allocation_hint *hint,
1440                              struct dt_object_format *dof,
1441                              struct thandle *th)
1442 {
1443         struct osd_thread_info  *info = osd_oti_get(env);
1444         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1445         struct zpl_direntry     *zde = &info->oti_zde.lzd_reg;
1446         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1447         struct osd_object       *obj = osd_dt_obj(dt);
1448         struct osd_device       *osd = osd_obj2dev(obj);
1449         char                    *buf = info->oti_str;
1450         struct osd_thandle      *oh;
1451         dnode_t *dn = NULL;
1452         uint64_t                 zapid, parent = 0;
1453         int                      rc;
1454
1455         ENTRY;
1456
1457         /* concurrent create declarations should not see
1458          * the object inconsistent (db, attr, etc).
1459          * in regular cases acquisition should be cheap */
1460         down_write(&obj->oo_guard);
1461
1462         if (unlikely(dt_object_exists(dt)))
1463                 GOTO(out, rc = -EEXIST);
1464
1465         LASSERT(osd_invariant(obj));
1466         LASSERT(dof != NULL);
1467
1468         LASSERT(th != NULL);
1469         oh = container_of0(th, struct osd_thandle, ot_super);
1470
1471         LASSERT(obj->oo_dn == NULL);
1472
1473         /* to follow ZFS on-disk format we need
1474          * to initialize parent dnode properly */
1475         if (hint != NULL && hint->dah_parent != NULL &&
1476             !dt_object_remote(hint->dah_parent))
1477                 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1478
1479         /* we may fix some attributes, better do not change the source */
1480         obj->oo_attr = *attr;
1481         obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1482
1483         dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1484         if (IS_ERR(dn)) {
1485                 rc = PTR_ERR(dn);
1486                 dn = NULL;
1487                 GOTO(out, rc);
1488         }
1489
1490         zde->zde_pad = 0;
1491         zde->zde_dnode = dn->dn_object;
1492         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1493
1494         zapid = osd_get_name_n_idx(env, osd, fid, buf, sizeof(info->oti_str));
1495
1496         rc = -zap_add(osd->od_os, zapid, buf, 8, 1, zde, oh->ot_tx);
1497         if (rc)
1498                 GOTO(out, rc);
1499         obj->oo_dn = dn;
1500         /* Now add in all of the "SA" attributes */
1501         rc = osd_sa_handle_get(obj);
1502         if (rc)
1503                 GOTO(out, rc);
1504
1505         /* configure new osd object */
1506         parent = parent != 0 ? parent : zapid;
1507         rc = __osd_attr_init(env, osd, obj->oo_sa_hdl, oh->ot_tx,
1508                              &obj->oo_attr, parent);
1509         if (rc)
1510                 GOTO(out, rc);
1511
1512         /* XXX: oo_lma_flags */
1513         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
1514         if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
1515                 /* no body operations for accounting objects */
1516                 obj->oo_dt.do_body_ops = &osd_body_ops;
1517
1518         rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
1519         if (rc)
1520                 GOTO(out, rc);
1521
1522         /* initialize LMA */
1523         lustre_lma_init(lma, lu_object_fid(&obj->oo_dt.do_lu), 0, 0);
1524         lustre_lma_swab(lma);
1525         rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
1526                                     (uchar_t *)lma, sizeof(*lma));
1527         if (rc)
1528                 GOTO(out, rc);
1529         rc = __osd_sa_xattr_update(env, obj, oh);
1530         if (rc)
1531                 GOTO(out, rc);
1532         osd_idc_find_and_init(env, osd, obj);
1533
1534 out:
1535         if (unlikely(rc && dn)) {
1536                 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
1537                 osd_dnode_rele(dn);
1538                 obj->oo_dn = NULL;
1539         } else if (!rc) {
1540                 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
1541         }
1542         up_write(&obj->oo_guard);
1543         RETURN(rc);
1544 }
1545
1546 static int osd_declare_object_ref_add(const struct lu_env *env,
1547                                       struct dt_object *dt,
1548                                       struct thandle *th)
1549 {
1550         return osd_declare_attr_set(env, dt, NULL, th);
1551 }
1552
1553 /*
1554  * Concurrency: @dt is write locked.
1555  */
1556 static int osd_object_ref_add(const struct lu_env *env,
1557                               struct dt_object *dt,
1558                               struct thandle *handle)
1559 {
1560         struct osd_object       *obj = osd_dt_obj(dt);
1561         struct osd_thandle      *oh;
1562         struct osd_device       *osd = osd_obj2dev(obj);
1563         uint64_t                 nlink;
1564         int rc;
1565
1566         ENTRY;
1567
1568         down_read(&obj->oo_guard);
1569         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1570                 GOTO(out, rc = -ENOENT);
1571
1572         LASSERT(osd_invariant(obj));
1573         LASSERT(obj->oo_sa_hdl != NULL);
1574
1575         oh = container_of0(handle, struct osd_thandle, ot_super);
1576
1577         write_lock(&obj->oo_attr_lock);
1578         nlink = ++obj->oo_attr.la_nlink;
1579         write_unlock(&obj->oo_attr_lock);
1580
1581         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1582
1583 out:
1584         up_read(&obj->oo_guard);
1585         RETURN(rc);
1586 }
1587
1588 static int osd_declare_object_ref_del(const struct lu_env *env,
1589                                       struct dt_object *dt,
1590                                       struct thandle *handle)
1591 {
1592         return osd_declare_attr_set(env, dt, NULL, handle);
1593 }
1594
1595 /*
1596  * Concurrency: @dt is write locked.
1597  */
1598 static int osd_object_ref_del(const struct lu_env *env,
1599                               struct dt_object *dt,
1600                               struct thandle *handle)
1601 {
1602         struct osd_object       *obj = osd_dt_obj(dt);
1603         struct osd_thandle      *oh;
1604         struct osd_device       *osd = osd_obj2dev(obj);
1605         uint64_t                 nlink;
1606         int                      rc;
1607
1608         ENTRY;
1609
1610         down_read(&obj->oo_guard);
1611
1612         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1613                 GOTO(out, rc = -ENOENT);
1614
1615         LASSERT(osd_invariant(obj));
1616         LASSERT(obj->oo_sa_hdl != NULL);
1617
1618         oh = container_of0(handle, struct osd_thandle, ot_super);
1619         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
1620
1621         write_lock(&obj->oo_attr_lock);
1622         nlink = --obj->oo_attr.la_nlink;
1623         write_unlock(&obj->oo_attr_lock);
1624
1625         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1626
1627 out:
1628         up_read(&obj->oo_guard);
1629         RETURN(rc);
1630 }
1631
1632 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
1633                            __u64 start, __u64 end)
1634 {
1635         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1636         ENTRY;
1637
1638         /* XXX: no other option than syncing the whole filesystem until we
1639          * support ZIL.  If the object tracked the txg that it was last
1640          * modified in, it could pass that txg here instead of "0".  Maybe
1641          * the changes are already committed, so no wait is needed at all? */
1642         if (!osd->od_dt_dev.dd_rdonly)
1643                 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
1644
1645         RETURN(0);
1646 }
1647
1648 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
1649 {
1650         return 0;
1651 }
1652
1653 static struct dt_object_operations osd_obj_ops = {
1654         .do_read_lock           = osd_object_read_lock,
1655         .do_write_lock          = osd_object_write_lock,
1656         .do_read_unlock         = osd_object_read_unlock,
1657         .do_write_unlock        = osd_object_write_unlock,
1658         .do_write_locked        = osd_object_write_locked,
1659         .do_attr_get            = osd_attr_get,
1660         .do_declare_attr_set    = osd_declare_attr_set,
1661         .do_attr_set            = osd_attr_set,
1662         .do_ah_init             = osd_ah_init,
1663         .do_declare_create      = osd_declare_object_create,
1664         .do_create              = osd_object_create,
1665         .do_declare_destroy     = osd_declare_object_destroy,
1666         .do_destroy             = osd_object_destroy,
1667         .do_index_try           = osd_index_try,
1668         .do_declare_ref_add     = osd_declare_object_ref_add,
1669         .do_ref_add             = osd_object_ref_add,
1670         .do_declare_ref_del     = osd_declare_object_ref_del,
1671         .do_ref_del             = osd_object_ref_del,
1672         .do_xattr_get           = osd_xattr_get,
1673         .do_declare_xattr_set   = osd_declare_xattr_set,
1674         .do_xattr_set           = osd_xattr_set,
1675         .do_declare_xattr_del   = osd_declare_xattr_del,
1676         .do_xattr_del           = osd_xattr_del,
1677         .do_xattr_list          = osd_xattr_list,
1678         .do_object_sync         = osd_object_sync,
1679         .do_invalidate          = osd_invalidate,
1680 };
1681
1682 static struct lu_object_operations osd_lu_obj_ops = {
1683         .loo_object_init        = osd_object_init,
1684         .loo_object_delete      = osd_object_delete,
1685         .loo_object_release     = osd_object_release,
1686         .loo_object_free        = osd_object_free,
1687         .loo_object_print       = osd_object_print,
1688         .loo_object_invariant   = osd_object_invariant,
1689 };
1690
1691 static int osd_otable_it_attr_get(const struct lu_env *env,
1692                                 struct dt_object *dt,
1693                                 struct lu_attr *attr)
1694 {
1695         attr->la_valid = 0;
1696         return 0;
1697 }
1698
1699 static struct dt_object_operations osd_obj_otable_it_ops = {
1700         .do_attr_get    = osd_otable_it_attr_get,
1701         .do_index_try   = osd_index_try,
1702 };