4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/osd-zfs/osd_object.c
33 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
34 * Author: Mike Pershin <tappro@whamcloud.com>
35 * Author: Johann Lombardi <johann@whamcloud.com>
38 #define DEBUG_SUBSYSTEM S_OSD
40 #include <libcfs/libcfs.h>
41 #include <obd_support.h>
42 #include <lustre_net.h>
44 #include <obd_class.h>
45 #include <lustre_disk.h>
46 #include <lustre_fid.h>
48 #include "osd_internal.h"
50 #include <sys/dnode.h>
55 #include <sys/spa_impl.h>
56 #include <sys/zfs_znode.h>
57 #include <sys/dmu_tx.h>
58 #include <sys/dmu_objset.h>
59 #include <sys/dsl_prop.h>
60 #include <sys/sa_impl.h>
63 char *osd_obj_tag = "osd_object";
64 static int osd_object_sync_delay_us = -1;
66 static const struct dt_object_operations osd_obj_ops;
67 static const struct lu_object_operations osd_lu_obj_ops;
68 static const struct dt_object_operations osd_obj_otable_it_ops;
71 osd_object_sa_fini(struct osd_object *obj)
74 sa_handle_destroy(obj->oo_sa_hdl);
75 obj->oo_sa_hdl = NULL;
80 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
84 LASSERT(obj->oo_sa_hdl == NULL);
85 LASSERT(obj->oo_dn != NULL);
87 rc = osd_sa_handle_get(obj);
91 /* Cache the xattr object id, valid for the life of the object */
92 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
94 obj->oo_xattr = ZFS_NO_OBJECT;
97 osd_object_sa_fini(obj);
104 * Add object to list of dirty objects in tx handle.
106 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
108 if (!list_empty(&obj->oo_sa_linkage))
111 write_lock(&obj->oo_attr_lock);
112 if (likely(list_empty(&obj->oo_sa_linkage)))
113 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
114 write_unlock(&obj->oo_attr_lock);
118 * Release spill block dbuf hold for all dirty SAs.
120 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
122 struct osd_object *obj;
124 while (!list_empty(&oh->ot_sa_list)) {
125 obj = list_entry(oh->ot_sa_list.next,
126 struct osd_object, oo_sa_linkage);
127 write_lock(&obj->oo_attr_lock);
128 list_del_init(&obj->oo_sa_linkage);
129 write_unlock(&obj->oo_attr_lock);
130 if (obj->oo_late_xattr && obj->oo_destroyed == 0) {
132 * take oo_guard to protect oo_sa_xattr buffer
133 * from concurrent update by osd_xattr_set()
135 LASSERT(oh->ot_assigned != 0);
136 down_write(&obj->oo_guard);
137 if (obj->oo_destroyed == 0) {
138 if (obj->oo_late_attr_set)
139 __osd_sa_attr_init(env, obj, oh);
140 else if (obj->oo_late_xattr)
141 __osd_sa_xattr_update(env, obj, oh);
143 up_write(&obj->oo_guard);
145 sa_spill_rele(obj->oo_sa_hdl);
150 * Update the SA and add the object to the dirty list.
152 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
153 void *buf, uint32_t buflen, struct osd_thandle *oh)
157 LASSERT(obj->oo_sa_hdl != NULL);
158 LASSERT(oh->ot_tx != NULL);
160 rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
161 osd_object_sa_dirty_add(obj, oh);
167 * Bulk update the SA and add the object to the dirty list.
170 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
171 int count, struct osd_thandle *oh)
175 LASSERT(obj->oo_sa_hdl != NULL);
176 LASSERT(oh->ot_tx != NULL);
178 rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
179 osd_object_sa_dirty_add(obj, oh);
185 * Retrieve the attributes of a DMU object
187 static int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
188 struct osd_object *obj, struct lu_attr *la)
190 struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
191 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
192 struct lustre_mdt_attrs *lma;
198 LASSERT(obj->oo_dn != NULL);
200 la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_BTIME | LA_MODE |
201 LA_TYPE | LA_SIZE | LA_UID | LA_GID | LA_FLAGS |
204 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
205 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
206 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
207 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(o), NULL, osa->btime, 16);
208 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
209 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
210 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
211 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
212 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
213 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
214 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
216 rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
220 #ifdef ZFS_PROJINHERIT
221 if (o->od_projectused_dn && osa->flags & ZFS_PROJID) {
222 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(o),
227 la->la_projid = osa->projid;
228 la->la_valid |= LA_PROJID;
229 obj->oo_with_projid = 1;
231 la->la_projid = ZFS_DEFAULT_PROJID;
232 la->la_valid &= ~LA_PROJID;
236 la->la_valid &= ~LA_PROJID;
239 la->la_atime = osa->atime[0];
240 la->la_mtime = osa->mtime[0];
241 la->la_ctime = osa->ctime[0];
242 la->la_btime = osa->btime[0];
243 la->la_mode = osa->mode;
244 la->la_uid = osa->uid;
245 la->la_gid = osa->gid;
246 la->la_nlink = osa->nlink;
247 la->la_flags = attrs_zfs2fs(osa->flags);
248 la->la_size = osa->size;
250 /* Try to get extra flags from LMA */
251 lma = (struct lustre_mdt_attrs *)osd_oti_get(env)->oti_buf;
253 buf.lb_len = sizeof(osd_oti_get(env)->oti_buf);
254 down_read(&obj->oo_guard);
255 rc = osd_xattr_get_lma(env, obj, &buf);
257 lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
259 lma_to_lustre_flags(lma->lma_incompat);
260 } else if (rc == -ENODATA ||
261 !(S_ISDIR(la->la_mode) &&
262 dt_object_exists(&obj->oo_dt))) {
265 up_read(&obj->oo_guard);
267 if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
268 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
271 la->la_rdev = osa->rdev;
272 la->la_valid |= LA_RDEV;
279 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
285 rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
289 dbi = (dmu_buf_impl_t *)db;
291 *dnp = DB_DNODE(dbi);
293 LASSERT(*dnp != NULL);
299 * Concurrency: no concurrent access is possible that early in object
302 struct lu_object *osd_object_alloc(const struct lu_env *env,
303 const struct lu_object_header *hdr,
306 struct osd_object *mo;
308 OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
311 struct lu_object_header *h;
312 struct osd_device *o = osd_dev(d);
314 l = &mo->oo_dt.do_lu;
315 if (unlikely(o->od_in_init)) {
322 lu_object_header_init(h);
323 lu_object_init(l, h, d);
324 lu_object_add_top(h, l);
327 dt_object_init(&mo->oo_dt, NULL, d);
328 mo->oo_header = NULL;
331 mo->oo_dt.do_ops = &osd_obj_ops;
332 l->lo_ops = &osd_lu_obj_ops;
333 INIT_LIST_HEAD(&mo->oo_sa_linkage);
334 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
335 init_rwsem(&mo->oo_sem);
336 init_rwsem(&mo->oo_guard);
337 rwlock_init(&mo->oo_attr_lock);
338 mo->oo_destroy = OSD_DESTROY_NONE;
345 static void osd_obj_set_blksize(const struct lu_env *env,
346 struct osd_device *osd, struct osd_object *obj)
348 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
350 dnode_t *dn = obj->oo_dn;
355 LASSERT(!osd_oti_get(env)->oti_in_trans);
357 tx = dmu_tx_create(osd->od_os);
359 CERROR("%s: fail to create tx to set blksize for "DFID"\n",
360 osd->od_svname, PFID(fid));
364 dmu_tx_hold_bonus(tx, dn->dn_object);
365 rc = -dmu_tx_assign(tx, TXG_WAIT);
368 CERROR("%s: fail to assign tx to set blksize for "DFID
369 ": rc = %d\n", osd->od_svname, PFID(fid), rc);
373 down_write(&obj->oo_guard);
374 if (unlikely((1 << dn->dn_datablkshift) >= PAGE_SIZE))
377 blksz = dn->dn_datablksz;
378 if (!is_power_of_2(blksz))
379 blksz = size_roundup_power2(blksz);
381 if (blksz > osd->od_max_blksz)
382 blksz = osd->od_max_blksz;
383 else if (blksz < PAGE_SIZE)
385 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object, blksz, 0, tx);
390 up_write(&obj->oo_guard);
393 if (unlikely(obj->oo_dn->dn_maxblkid > 0))
396 CERROR("%s: fail to set blksize for "DFID": rc = %d\n",
397 osd->od_svname, PFID(fid), rc);
400 CDEBUG(D_INODE, "%s: set blksize as %u for "DFID"\n",
401 osd->od_svname, blksz, PFID(fid));
406 * Concurrency: shouldn't matter.
408 static int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
410 struct osd_device *osd = osd_obj2dev(obj);
411 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
417 rc = osd_object_sa_init(obj, osd);
421 /* cache attrs in object */
422 rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
426 if (likely(!fid_is_acct(fid))) {
427 /* no body operations for accounting objects */
428 obj->oo_dt.do_body_ops = &osd_body_ops;
430 if (S_ISREG(obj->oo_attr.la_mode) &&
431 obj->oo_dn->dn_maxblkid == 0 &&
432 (1 << obj->oo_dn->dn_datablkshift) < PAGE_SIZE &&
433 (fid_is_idif(fid) || fid_is_norm(fid) ||
435 osd->od_is_ost && !osd->od_dt_dev.dd_rdonly)
436 osd_obj_set_blksize(env, osd, obj);
440 * initialize object before marking it existing
442 obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
445 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
450 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
452 struct osd_thread_info *info = osd_oti_get(env);
455 struct lustre_mdt_attrs *lma;
456 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
459 BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
460 lma = (struct lustre_mdt_attrs *)info->oti_buf;
462 buf.lb_len = sizeof(info->oti_buf);
464 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
467 lustre_lma_swab(lma);
468 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
469 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
470 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
471 "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
472 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
475 } else if (unlikely(!lu_fid_eq(rfid, &lma->lma_self_fid))) {
476 CERROR("%s: FID-in-LMA "DFID" does not match the "
477 "object self-fid "DFID"\n",
478 osd_obj2dev(obj)->od_svname,
479 PFID(&lma->lma_self_fid), PFID(rfid));
482 struct osd_device *osd = osd_obj2dev(obj);
484 if (lma->lma_compat & LMAC_STRIPE_INFO &&
486 obj->oo_pfid_in_lma = 1;
487 if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
488 osd->od_remote_parent_dir != ZFS_NO_OBJECT)
489 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
491 } else if (rc == -ENODATA) {
492 /* haven't initialize LMA xattr */
500 * Helper function to retrieve DMU object id from fid for accounting object
502 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
503 const struct lu_fid *fid)
507 LASSERT(fid_is_acct(fid));
509 switch (fid_oid(fid)) {
511 dn = osd->od_userused_dn;
514 dn = osd->od_groupused_dn;
516 #ifdef ZFS_PROJINHERIT
517 case ACCT_PROJECT_OID:
518 dn = osd->od_projectused_dn;
529 * Concurrency: no concurrent access is possible that early in object
532 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
533 const struct lu_object_conf *conf)
535 struct osd_object *obj = osd_obj(l);
536 struct osd_device *osd = osd_obj2dev(obj);
537 const struct lu_fid *fid = lu_object_fid(l);
538 struct lustre_scrub *scrub = &osd->od_scrub;
539 struct osd_thread_info *info = osd_oti_get(env);
540 struct luz_direntry *zde = &info->oti_zde;
541 struct osd_idmap_cache *idc;
542 char *name = info->oti_str;
549 LASSERT(osd_invariant(obj));
551 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
552 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
553 l->lo_header->loh_attr |= LOHA_EXISTS;
558 if (conf && conf->loc_flags & LOC_F_NEW)
561 if (unlikely(fid_is_acct(fid))) {
562 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
564 obj->oo_dt.do_index_ops = &osd_acct_index_ops;
565 l->lo_header->loh_attr |= LOHA_EXISTS;
571 idc = osd_idc_find(env, osd, fid);
572 if (idc && !idc->oic_remote && idc->oic_dnode != ZFS_NO_OBJECT) {
573 oid = idc->oic_dnode;
578 if (!list_empty(&osd->od_scrub.os_inconsistent_items))
579 rc = osd_oii_lookup(osd, fid, &oid);
582 rc = osd_fid_lookup(env, osd, fid, &oid);
585 if (likely(!(fid_is_norm(fid) || fid_is_igif(fid)) ||
586 fid_is_on_ost(env, osd, fid) ||
587 !zfs_test_bit(osd_oi_fid2idx(osd, fid),
588 scrub->os_file.sf_oi_bitmap)))
599 LASSERT(obj->oo_dn == NULL);
601 rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
602 /* EEXIST will be returned if object is being deleted in ZFS */
607 CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
608 osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
612 rc = osd_object_init0(env, obj);
616 if (unlikely(obj->oo_header))
619 rc = osd_check_lma(env, obj);
623 osd_scrub_refresh_mapping(env, osd, fid, oid, DTO_INDEX_DELETE, true,
627 /* We still have chance to get the valid dnode: for the object that is
628 * referenced by remote name entry, the object on the local MDT will be
629 * linked under the dir /REMOTE_PARENT_DIR with its FID string as name.
631 * During the OI scrub, if we cannot find the OI mapping, we may still
632 * have change to map the FID to local OID via lookup the dir
633 * /REMOTE_PARENT_DIR. */
634 if (!remote && !fid_is_on_ost(env, osd, fid)) {
635 osd_fid2str(name, fid, sizeof(info->oti_str));
636 rc = osd_zap_lookup(osd, osd->od_remote_parent_dir,
637 NULL, name, 8, 3, (void *)zde);
639 oid = zde->lzd_reg.zde_dnode;
640 osd_dnode_rele(obj->oo_dn);
647 /* The case someone triggered the OI scrub already. */
648 if (scrub->os_running) {
652 lu_object_set_agent_entry(l);
653 osd_oii_insert(env, osd, fid, oid, false);
661 /* The case NOT allow to trigger OI scrub automatically. */
662 if (osd->od_auto_scrub_interval == AS_NEVER)
665 /* It is me to trigger the OI scrub. */
666 rc1 = osd_scrub_start(env, osd, SS_CLEAR_DRYRUN |
667 SS_CLEAR_FAILOUT | SS_AUTO_FULL);
668 CDEBUG_LIMIT(D_LFSCK | D_CONSOLE | D_WARNING,
669 "%s: trigger OI scrub by RPC for "DFID"/%#llx: rc = %d\n",
670 osd_name(osd), PFID(fid), oid, rc1);
674 lu_object_set_agent_entry(l);
676 osd_oii_insert(env, osd, fid, oid, false);
691 * Concurrency: no concurrent access is possible that late in object
694 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
696 struct osd_object *obj = osd_obj(l);
697 struct lu_object_header *h = obj->oo_header;
699 LASSERT(osd_invariant(obj));
701 dt_object_fini(&obj->oo_dt);
702 /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
703 OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
705 lu_object_header_free(h);
709 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
713 LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
715 /* the object is supposed to be exclusively locked by
716 * the caller (osd_destroy()), while the transaction
717 * (oh) is per-thread and not shared */
718 if (likely(list_empty(&obj->oo_unlinked_linkage))) {
719 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
726 /* Default to max data size covered by a level-1 indirect block */
727 static unsigned long osd_sync_destroy_max_size =
728 1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
729 module_param(osd_sync_destroy_max_size, ulong, 0444);
730 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
733 osd_object_set_destroy_type(struct osd_object *obj)
736 * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
737 * only once and use it consistently thereafter.
739 down_write(&obj->oo_guard);
740 if (obj->oo_destroy == OSD_DESTROY_NONE) {
741 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
742 obj->oo_destroy = OSD_DESTROY_SYNC;
743 else /* Larger objects are destroyed asynchronously */
744 obj->oo_destroy = OSD_DESTROY_ASYNC;
746 up_write(&obj->oo_guard);
749 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
752 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
753 struct osd_object *obj = osd_dt_obj(dt);
754 struct osd_device *osd = osd_obj2dev(obj);
755 struct osd_thandle *oh;
762 LASSERT(dt_object_exists(dt));
764 oh = container_of(th, struct osd_thandle, ot_super);
765 LASSERT(oh->ot_tx != NULL);
767 dmu_tx_mark_netfree(oh->ot_tx);
769 /* declare that we'll remove object from fid-dnode mapping */
770 zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
771 osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
773 osd_declare_xattrs_destroy(env, obj, oh);
776 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
777 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
778 -1, oh, NULL, OSD_QID_INODE);
782 /* data to be truncated */
783 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
784 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
785 0, oh, NULL, OSD_QID_BLK);
789 osd_object_set_destroy_type(obj);
790 if (obj->oo_destroy == OSD_DESTROY_SYNC)
791 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
794 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
795 osd->od_unlinked, TRUE, NULL);
797 /* remove agent entry (if have) from remote parent */
798 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
799 osd_tx_hold_zap(oh->ot_tx, osd->od_remote_parent_dir,
802 /* will help to find FID->ino when this object is being
803 * added to PENDING/ */
804 osd_idc_find_and_init(env, osd, obj);
809 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
812 struct osd_thread_info *info = osd_oti_get(env);
813 char *buf = info->oti_str;
814 struct osd_object *obj = osd_dt_obj(dt);
815 struct osd_device *osd = osd_obj2dev(obj);
816 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
817 struct osd_thandle *oh;
823 down_write(&obj->oo_guard);
825 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
826 GOTO(out, rc = -ENOENT);
828 LASSERT(obj->oo_dn != NULL);
830 oh = container_of(th, struct osd_thandle, ot_super);
832 LASSERT(oh->ot_tx != NULL);
834 /* remove obj ref from index dir (it depends) */
835 zapid = osd_get_name_n_idx(env, osd, fid, buf,
836 sizeof(info->oti_str), &zdn);
837 rc = osd_xattrs_destroy(env, obj, oh);
839 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
840 osd->od_svname, buf, rc);
844 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
845 rc = osd_delete_from_remote_parent(env, osd, obj, oh, true);
850 oid = obj->oo_dn->dn_object;
851 if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
852 /* this may happen if the destroy wasn't declared
853 * e.g. when the object is created and then destroyed
854 * in the same transaction - we don't need additional
855 * space for destroy specifically */
856 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
857 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
859 CERROR("%s: failed to free %s/%#llx: rc = %d\n",
860 osd->od_svname, buf, oid, rc);
861 } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
862 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
864 CERROR("%s: failed to free %s/%#llx: rc = %d\n",
865 osd->od_svname, buf, oid, rc);
866 } else { /* asynchronous destroy */
867 char *key = info->oti_key;
869 rc = osd_object_unlinked_add(obj, oh);
873 snprintf(key, sizeof(info->oti_key), "%llx", oid);
874 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
875 osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
877 CERROR("%s: zap_add_int() failed %s/%#llx: rc = %d\n",
878 osd->od_svname, buf, oid, rc);
881 /* Remove the OI mapping after the destroy to handle the race with
882 * OI scrub that may insert missed OI mapping during the interval. */
883 rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
884 if (unlikely(rc == -ENOENT))
887 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
888 osd->od_svname, buf, rc);
893 /* not needed in the cache anymore */
894 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
896 obj->oo_destroyed = 1;
897 up_write(&obj->oo_guard);
901 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
903 struct osd_object *obj = osd_obj(l);
904 const struct lu_fid *fid = lu_object_fid(l);
907 if (likely(!fid_is_acct(fid))) {
908 osd_object_sa_fini(obj);
909 if (obj->oo_sa_xattr) {
910 nvlist_free(obj->oo_sa_xattr);
911 obj->oo_sa_xattr = NULL;
913 osd_dnode_rele(obj->oo_dn);
914 list_del(&obj->oo_sa_linkage);
921 * Concurrency: ->loo_object_release() is called under site spin-lock.
923 static void osd_object_release(const struct lu_env *env,
929 * Concurrency: shouldn't matter.
931 static int osd_object_print(const struct lu_env *env, void *cookie,
932 lu_printer_t p, const struct lu_object *l)
934 struct osd_object *o = osd_obj(l);
936 return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
939 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
942 struct osd_object *obj = osd_dt_obj(dt);
944 LASSERT(osd_invariant(obj));
946 down_read_nested(&obj->oo_sem, role);
949 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
952 struct osd_object *obj = osd_dt_obj(dt);
954 LASSERT(osd_invariant(obj));
956 down_write_nested(&obj->oo_sem, role);
959 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
961 struct osd_object *obj = osd_dt_obj(dt);
963 LASSERT(osd_invariant(obj));
964 up_read(&obj->oo_sem);
967 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
969 struct osd_object *obj = osd_dt_obj(dt);
971 LASSERT(osd_invariant(obj));
972 up_write(&obj->oo_sem);
975 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
977 struct osd_object *obj = osd_dt_obj(dt);
980 LASSERT(osd_invariant(obj));
982 if (down_write_trylock(&obj->oo_sem)) {
984 up_write(&obj->oo_sem);
989 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
990 struct lu_attr *attr)
992 struct osd_object *obj = osd_dt_obj(dt);
993 struct osd_device *osd = osd_obj2dev(obj);
998 down_read(&obj->oo_guard);
1000 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1001 GOTO(out, rc = -ENOENT);
1003 if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
1006 LASSERT(osd_invariant(obj));
1007 LASSERT(obj->oo_dn);
1009 read_lock(&obj->oo_attr_lock);
1010 *attr = obj->oo_attr;
1011 if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
1012 attr->la_valid |= LA_FLAGS;
1013 attr->la_flags |= LUSTRE_ORPHAN_FL;
1015 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
1016 attr->la_valid |= LA_FLAGS;
1017 attr->la_flags |= LUSTRE_ENCRYPT_FL;
1019 read_unlock(&obj->oo_attr_lock);
1020 if (attr->la_valid & LA_FLAGS && attr->la_flags & LUSTRE_ORPHAN_FL)
1021 CDEBUG(D_INFO, "%s: set orphan flag on "DFID" (%#llx/%#x)\n",
1022 osd_obj2dev(obj)->od_svname,
1023 PFID(lu_object_fid(&dt->do_lu)),
1024 attr->la_valid, obj->oo_lma_flags);
1026 /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
1027 * from within sa_object_size() can block on a mutex, so
1028 * we can't call sa_object_size() holding rwlock */
1029 sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
1030 /* we do not control size of indices, so always calculate
1031 * it from number of blocks reported by DMU */
1032 if (S_ISDIR(attr->la_mode)) {
1033 attr->la_size = 512 * blocks;
1034 rc = -zap_count(osd->od_os, obj->oo_dn->dn_object,
1035 &attr->la_dirent_count);
1037 /* Block size may be not set; suggest maximal I/O transfers. */
1039 blksize = osd_spa_maxblocksize(
1040 dmu_objset_spa(osd_obj2dev(obj)->od_os));
1042 attr->la_blksize = blksize;
1043 attr->la_blocks = blocks;
1044 attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
1047 up_read(&obj->oo_guard);
1051 /* Simple wrapper on top of qsd API which implement quota transfer for osd
1052 * setattr needs. As a reminder, only the root user can change ownership of
1053 * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
1054 static inline int qsd_transfer(const struct lu_env *env,
1055 struct qsd_instance *qsd,
1056 struct lquota_trans *trans, int qtype,
1057 __u64 orig_id, __u64 new_id, __u64 bspace,
1058 struct lquota_id_info *qi)
1062 if (unlikely(qsd == NULL))
1065 LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
1066 qi->lqi_type = qtype;
1068 /* inode accounting */
1069 qi->lqi_is_blk = false;
1071 /* one more inode for the new owner ... */
1072 qi->lqi_id.qid_uid = new_id;
1074 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1075 if (rc == -EDQUOT || rc == -EINPROGRESS)
1080 /* and one less inode for the current id */
1081 qi->lqi_id.qid_uid = orig_id;;
1083 /* can't get EDQUOT when reducing usage */
1084 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1085 if (rc == -EINPROGRESS)
1090 /* block accounting */
1091 qi->lqi_is_blk = true;
1093 /* more blocks for the new owner ... */
1094 qi->lqi_id.qid_uid = new_id;
1095 qi->lqi_space = bspace;
1096 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1097 if (rc == -EDQUOT || rc == -EINPROGRESS)
1102 /* and finally less blocks for the current owner */
1103 qi->lqi_id.qid_uid = orig_id;
1104 qi->lqi_space = -bspace;
1105 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1106 /* can't get EDQUOT when reducing usage */
1107 if (rc == -EINPROGRESS)
1112 static int osd_declare_attr_set(const struct lu_env *env,
1113 struct dt_object *dt,
1114 const struct lu_attr *attr,
1115 struct thandle *handle)
1117 struct osd_thread_info *info = osd_oti_get(env);
1118 struct osd_object *obj = osd_dt_obj(dt);
1119 struct osd_device *osd = osd_obj2dev(obj);
1121 struct osd_thandle *oh;
1129 LASSERT(handle != NULL);
1130 LASSERT(osd_invariant(obj));
1132 oh = container_of(handle, struct osd_thandle, ot_super);
1134 down_read(&obj->oo_guard);
1135 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1136 GOTO(out_sem, rc = 0);
1138 LASSERT(obj->oo_sa_hdl != NULL);
1139 LASSERT(oh->ot_tx != NULL);
1140 /* regular attributes are part of the bonus buffer */
1141 /* let's check whether this object is already part of
1144 for (txh = list_head(&oh->ot_tx->tx_holds); txh;
1145 txh = list_next(&oh->ot_tx->tx_holds, txh)) {
1146 if (txh->txh_dnode == NULL)
1148 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
1150 /* this object is part of the transaction already
1151 * we don't need to declare bonus again */
1156 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
1157 if (oh->ot_tx->tx_err != 0)
1158 GOTO(out_sem, rc = -oh->ot_tx->tx_err);
1160 if (attr && attr->la_valid & LA_FLAGS) {
1161 /* LMA is usually a part of bonus, no need to declare
1165 if (attr && (attr->la_valid & (LA_UID | LA_GID | LA_PROJID))) {
1166 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
1167 bspace = toqb(bspace * 512);
1170 "%s: enforce quota on UID %u, GID %u, the quota space is %lld (%u)\n",
1172 attr->la_uid, attr->la_gid, bspace, blksize);
1174 /* to preserve locking order - qsd_transfer() may need to flush
1175 * currently running transaction when we're out of quota. */
1176 up_read(&obj->oo_guard);
1178 if (attr && attr->la_valid & LA_UID) {
1179 /* quota enforcement for user */
1180 if (attr->la_uid != obj->oo_attr.la_uid) {
1181 rc = qsd_transfer(env, osd_def_qsd(osd),
1182 &oh->ot_quota_trans, USRQUOTA,
1183 obj->oo_attr.la_uid, attr->la_uid,
1184 bspace, &info->oti_qi);
1189 if (attr && attr->la_valid & LA_GID) {
1190 /* quota enforcement for group */
1191 if (attr->la_gid != obj->oo_attr.la_gid) {
1192 rc = qsd_transfer(env, osd_def_qsd(osd),
1193 &oh->ot_quota_trans, GRPQUOTA,
1194 obj->oo_attr.la_gid, attr->la_gid,
1195 bspace, &info->oti_qi);
1200 #ifdef ZFS_PROJINHERIT
1201 if (attr && attr->la_valid & LA_PROJID) {
1202 /* quota enforcement for project */
1203 if (attr->la_projid != obj->oo_attr.la_projid) {
1204 if (!osd->od_projectused_dn)
1205 GOTO(out, rc = -EOPNOTSUPP);
1207 if (!projid_valid(make_kprojid(&init_user_ns, attr->la_projid)))
1208 GOTO(out, rc = -EINVAL);
1210 /* Usually, if project quota is upgradable for the
1211 * device, then the upgrade will be done before or when
1212 * mount the device. So when we come here, this project
1213 * should have project ID attribute already (that is
1214 * zero by default). Otherwise, there was something
1215 * wrong during the former upgrade, let's return failure
1218 * Please note that, different from other attributes,
1219 * you can NOT simply set the project ID attribute under
1220 * such case, because adding (NOT change) project ID
1221 * attribute needs to change the object's attribute
1222 * layout to match zfs backend quota accounting
1224 if (unlikely(!obj->oo_with_projid))
1225 GOTO(out, rc = -ENXIO);
1227 rc = qsd_transfer(env, osd_def_qsd(osd),
1228 &oh->ot_quota_trans, PRJQUOTA,
1229 obj->oo_attr.la_projid,
1230 attr->la_projid, bspace,
1240 up_read(&obj->oo_guard);
1245 * Set the attributes of an object
1247 * The transaction passed to this routine must have
1248 * dmu_tx_hold_bonus(tx, oid) called and then assigned
1249 * to a transaction group.
1251 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
1252 const struct lu_attr *la, struct thandle *handle)
1254 struct osd_thread_info *info = osd_oti_get(env);
1255 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1256 struct osd_object *obj = osd_dt_obj(dt);
1257 struct osd_device *osd = osd_obj2dev(obj);
1258 struct osd_thandle *oh;
1259 struct osa_attr *osa = &info->oti_osa;
1260 __u64 valid = la->la_valid;
1266 down_read(&obj->oo_guard);
1267 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1268 GOTO(out, rc = -ENOENT);
1270 LASSERT(handle != NULL);
1271 LASSERT(osd_invariant(obj));
1272 LASSERT(obj->oo_sa_hdl);
1274 oh = container_of(handle, struct osd_thandle, ot_super);
1275 /* Assert that the transaction has been assigned to a
1276 transaction group. */
1277 LASSERT(oh->ot_tx->tx_txg != 0);
1279 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) && !osd->od_is_ost) {
1280 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1281 char *buf = info->oti_str;
1282 dnode_t *zdn = NULL;
1285 zapid = osd_get_name_n_idx(env, osd, lu_object_fid(&dt->do_lu),
1286 buf, sizeof(info->oti_str), &zdn);
1287 rc = osd_zap_lookup(osd, zapid, zdn, buf, 8,
1288 sizeof(*zde) / 8, zde);
1290 zde->zde_dnode -= 1;
1291 rc = -zap_update(osd->od_os, zapid, buf, 8,
1292 sizeof(*zde) / 8, zde, oh->ot_tx);
1299 /* Only allow set size for regular file */
1300 if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
1301 valid &= ~(LA_SIZE | LA_BLOCKS);
1303 if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
1306 if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
1309 if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
1315 if (valid & LA_FLAGS) {
1316 struct lustre_mdt_attrs *lma;
1320 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
1321 LASSERT(!obj->oo_pfid_in_lma);
1322 BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
1323 lma = (struct lustre_mdt_attrs *)&info->oti_buf;
1325 buf.lb_len = sizeof(info->oti_buf);
1327 /* Please do NOT call osd_xattr_get() directly, that
1328 * will cause recursive down_read() on oo_guard. */
1329 rc = osd_xattr_get_internal(env, obj, &buf,
1330 XATTR_NAME_LMA, &size);
1331 if (!rc && unlikely(size < sizeof(*lma))) {
1335 le32_to_cpu(lma->lma_incompat);
1336 lma->lma_incompat |=
1337 lustre_to_lma_flags(la->la_flags);
1339 cpu_to_le32(lma->lma_incompat);
1341 buf.lb_len = sizeof(*lma);
1342 rc = osd_xattr_set_internal(env, obj, &buf,
1348 CWARN("%s: failed to set LMA flags: rc = %d\n",
1349 osd->od_svname, rc);
1353 la->la_flags & LUSTRE_LMA_FL_MASKS;
1358 write_lock(&obj->oo_attr_lock);
1361 if (valid & LA_PROJID) {
1362 #ifdef ZFS_PROJINHERIT
1363 if (osd->od_projectused_dn) {
1364 LASSERT(obj->oo_with_projid);
1366 osa->projid = obj->oo_attr.la_projid = la->la_projid;
1367 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1371 valid &= ~LA_PROJID;
1374 if (valid & LA_ATIME) {
1375 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1376 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1379 if (valid & LA_MTIME) {
1380 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1381 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1384 if (valid & LA_CTIME) {
1385 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1386 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1389 if (valid & LA_MODE) {
1390 /* mode is stored along with type, so read it first */
1391 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1392 (la->la_mode & ~S_IFMT);
1393 osa->mode = obj->oo_attr.la_mode;
1394 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1397 if (valid & LA_SIZE) {
1398 osa->size = obj->oo_attr.la_size = la->la_size;
1399 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1402 if (valid & LA_NLINK) {
1403 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1404 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1407 if (valid & LA_RDEV) {
1408 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1409 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1412 if (valid & LA_FLAGS) {
1413 osa->flags = attrs_fs2zfs(la->la_flags);
1414 /* many flags are not supported by zfs, so ensure a good cached
1416 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1417 #ifdef ZFS_PROJINHERIT
1418 if (obj->oo_with_projid && osd->od_projectused_dn)
1419 osa->flags |= ZFS_PROJID;
1421 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1424 if (valid & LA_UID) {
1425 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1426 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1429 if (valid & LA_GID) {
1430 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1431 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1434 obj->oo_attr.la_valid |= valid;
1435 write_unlock(&obj->oo_attr_lock);
1437 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1438 rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1441 up_read(&obj->oo_guard);
1448 * XXX temporary solution.
1451 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1452 struct dt_object *parent, struct dt_object *child,
1457 ah->dah_parent = parent;
1458 ah->dah_mode = child_mode;
1460 if (parent != NULL && !dt_object_remote(parent)) {
1461 /* will help to find FID->ino at dt_insert("..") */
1462 struct osd_object *pobj = osd_dt_obj(parent);
1464 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1468 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1469 struct lu_attr *attr,
1470 struct dt_allocation_hint *hint,
1471 struct dt_object_format *dof,
1472 struct thandle *handle)
1474 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1475 struct osd_object *obj = osd_dt_obj(dt);
1476 struct osd_device *osd = osd_obj2dev(obj);
1477 struct osd_thandle *oh;
1485 switch (dof->dof_type) {
1489 if (obj->oo_dt.do_body_ops == NULL)
1490 obj->oo_dt.do_body_ops = &osd_body_ops;
1496 LASSERT(handle != NULL);
1497 oh = container_of(handle, struct osd_thandle, ot_super);
1498 LASSERT(oh->ot_tx != NULL);
1500 /* this is the minimum set of EAs on every Lustre object */
1501 obj->oo_ea_in_bonus = OSD_BASE_EA_IN_BONUS;
1502 /* reserve 32 bytes for extra stuff like ACLs */
1503 dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1505 switch (dof->dof_type) {
1507 dt->do_index_ops = &osd_dir_ops;
1510 /* for zap create */
1511 dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1512 dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1517 /* first, we'll create new object */
1518 dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1526 /* and we'll add it to some mapping */
1527 zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1528 osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1530 /* will help to find FID->ino mapping at dt_insert() */
1531 osd_idc_find_and_init(env, osd, obj);
1533 rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid,
1534 attr->la_projid, 1, oh, NULL, OSD_QID_INODE);
1539 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1540 struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
1541 struct lu_attr *la, uint64_t parent,
1544 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1545 struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1547 inode_timespec_t now;
1550 char *dxattr = NULL;
1556 gen = dmu_tx_get_txg(tx);
1558 ZFS_TIME_ENCODE(&now, osa->btime);
1560 osa->atime[0] = la->la_atime;
1561 osa->ctime[0] = la->la_ctime;
1562 osa->mtime[0] = la->la_mtime;
1563 osa->mode = la->la_mode;
1564 osa->uid = la->la_uid;
1565 osa->gid = la->la_gid;
1566 osa->rdev = la->la_rdev;
1567 osa->nlink = la->la_nlink;
1568 if (la->la_valid & LA_FLAGS)
1569 osa->flags = attrs_fs2zfs(la->la_flags);
1572 osa->size = la->la_size;
1573 #ifdef ZFS_PROJINHERIT
1574 if (osd->od_projectused_dn) {
1575 if (la->la_valid & LA_PROJID)
1576 osa->projid = la->la_projid;
1578 osa->projid = ZFS_DEFAULT_PROJID;
1579 osa->flags |= ZFS_PROJID;
1581 obj->oo_with_projid = 1;
1583 osa->flags &= ~ZFS_PROJID;
1588 * we need to create all SA below upon object create.
1590 * XXX The attribute order matters since the accounting callback relies
1591 * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1592 * look up the UID/GID/PROJID attributes. Moreover, the callback does
1593 * not seem to support the spill block.
1594 * We define attributes in the same order as SA_*_OFFSET in order to
1595 * work around the problem. See ORI-610.
1598 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1599 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1600 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1601 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1602 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1603 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1604 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1605 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1606 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1607 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1608 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, osa->btime, 16);
1609 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1610 #ifdef ZFS_PROJINHERIT
1611 if (osd->od_projectused_dn)
1612 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1615 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1616 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1619 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1622 dxattr = osd_zio_buf_alloc(sa_size);
1625 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1626 NV_ENCODE_XDR, KM_SLEEP);
1629 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1630 NULL, dxattr, sa_size);
1633 rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1635 osd_zio_buf_free(dxattr, sa_size);
1640 int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1641 uint64_t oid, dnode_t **dnp)
1646 /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1647 for (txh = list_tail(&tx->tx_holds); txh;
1648 txh = list_prev(&tx->tx_holds, txh)) {
1649 dnode_t *dn = txh->txh_dnode;
1654 if (dn->dn_object != oid)
1658 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1659 if (dn->dn_bonus == NULL)
1660 dbuf_create_bonus(dn);
1661 rw_exit(&dn->dn_struct_rwlock);
1665 LASSERT(dn->dn_handle);
1667 if (zfs_refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1668 zfs_refcount_add(&dn->dn_holds, osd_obj_tag);
1669 atomic_inc_32(&dn->dn_dbufs_count);
1673 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1677 if (unlikely(*dnp == NULL))
1678 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1683 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1684 int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
1688 if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1689 dnsize = DNODE_MIN_SIZE;
1691 if (DN_BONUS_SIZE(dnsize) >= ea_in_bonus + 32)
1694 } while (dnsize < DNODE_MAX_SIZE);
1695 if (dnsize > DNODE_MAX_SIZE)
1696 dnsize = DNODE_MAX_SIZE;
1697 } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1699 } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1701 } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1703 } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1705 } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1708 dnsize = DNODE_MIN_SIZE;
1715 * The transaction passed to this routine must have
1716 * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1717 * to a transaction group.
1719 int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
1720 struct osd_object *obj, const struct lu_fid *fid,
1721 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1723 dmu_object_type_t type = DMU_OT_PLAIN_FILE_CONTENTS;
1727 /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1728 * would get an additional ditto copy */
1729 if (unlikely(S_ISREG(la->la_mode) &&
1730 fid_seq_is_local_file(fid_seq(fid))))
1731 type = DMU_OTN_UINT8_METADATA;
1733 /* Create a new DMU object using the default dnode size. */
1735 size = obj->oo_ea_in_bonus;
1737 size = OSD_BASE_EA_IN_BONUS;
1738 oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1739 osd_find_dnsize(osd, size), tx);
1741 LASSERT(la->la_valid & LA_MODE);
1745 return osd_find_new_dnode(env, tx, oid, dnp);
1749 * The transaction passed to this routine must have
1750 * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1751 * to a transaction group.
1753 * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1754 * This is fine for directories today, because storing the FID in the dirent
1755 * will also require a FAT ZAP. If there is a new type of micro ZAP created
1756 * then we might need to re-evaluate the use of this flag and instead do
1757 * a conversion from the different internal ZAP hash formats being used. */
1758 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1759 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1760 unsigned dnsize, zap_flags_t flags)
1764 /* Assert that the transaction has been assigned to a
1765 transaction group. */
1766 LASSERT(tx->tx_txg != 0);
1769 oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1770 DMU_OT_DIRECTORY_CONTENTS,
1771 14, /* == ZFS fzap_default_blockshift */
1772 DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1778 return osd_find_new_dnode(env, tx, oid, dnp);
1781 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1782 struct lu_attr *la, struct osd_thandle *oh)
1784 struct osd_device *osd = osd_obj2dev(obj);
1788 /* Index file should be created as regular file in order not to confuse
1789 * ZPL which could interpret them as directory.
1790 * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1792 LASSERT(S_ISREG(la->la_mode));
1793 rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1794 osd_find_dnsize(osd, obj->oo_ea_in_bonus), ZAP_FLAG_UINT64_KEY);
1800 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1801 struct lu_attr *la, struct osd_thandle *oh)
1803 struct osd_device *osd = osd_obj2dev(obj);
1807 LASSERT(S_ISDIR(la->la_mode));
1808 rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1809 osd_find_dnsize(osd, obj->oo_ea_in_bonus), 0);
1815 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1816 struct lu_attr *la, struct osd_thandle *oh)
1818 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1819 struct osd_device *osd = osd_obj2dev(obj);
1823 LASSERT(S_ISREG(la->la_mode));
1824 rc = __osd_object_create(env, osd, obj, fid, &dn, oh->ot_tx, la);
1828 if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid))) {
1829 /* The minimum block size must be at least page size otherwise
1830 * it will break the assumption in tgt_thread_big_cache where
1831 * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1832 * RDMA due to subpage transfer size */
1833 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1834 PAGE_SIZE, 0, oh->ot_tx);
1836 CERROR("%s: can't change blocksize: %d\n",
1837 osd->od_svname, rc);
1840 } else if ((fid_is_llog(fid))) {
1841 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1842 LLOG_MIN_CHUNK_SIZE, 0, oh->ot_tx);
1844 CERROR("%s: can't change blocksize: %d\n",
1845 osd->od_svname, rc);
1853 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1854 struct lu_attr *la, struct osd_thandle *oh)
1859 LASSERT(S_ISLNK(la->la_mode));
1860 rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1861 lu_object_fid(&obj->oo_dt.do_lu),
1862 &dn, oh->ot_tx, la);
1868 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1869 struct lu_attr *la, struct osd_thandle *oh)
1874 if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1875 la->la_valid |= LA_RDEV;
1877 rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1878 lu_object_fid(&obj->oo_dt.do_lu),
1879 &dn, oh->ot_tx, la);
1885 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1886 struct osd_object *obj,
1888 struct osd_thandle *oh);
1890 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1892 osd_obj_type_f result;
1918 * Concurrency: @dt is write locked.
1920 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1921 struct lu_attr *attr, struct dt_allocation_hint *hint,
1922 struct dt_object_format *dof, struct thandle *th)
1924 struct osd_thread_info *info = osd_oti_get(env);
1925 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1926 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1927 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1928 struct osd_object *obj = osd_dt_obj(dt);
1929 struct osd_device *osd = osd_obj2dev(obj);
1930 char *buf = info->oti_str;
1931 struct osd_thandle *oh;
1932 dnode_t *dn = NULL, *zdn = NULL;
1933 uint64_t zapid, parent = 0;
1939 LASSERT(!fid_is_acct(fid));
1941 /* concurrent create declarations should not see
1942 * the object inconsistent (db, attr, etc).
1943 * in regular cases acquisition should be cheap */
1944 down_write(&obj->oo_guard);
1946 if (unlikely(dt_object_exists(dt)))
1947 GOTO(out, rc = -EEXIST);
1949 LASSERT(osd_invariant(obj));
1950 LASSERT(dof != NULL);
1952 LASSERT(th != NULL);
1953 oh = container_of(th, struct osd_thandle, ot_super);
1955 LASSERT(obj->oo_dn == NULL);
1957 /* to follow ZFS on-disk format we need
1958 * to initialize parent dnode properly */
1959 if (hint != NULL && hint->dah_parent != NULL &&
1960 !dt_object_remote(hint->dah_parent))
1961 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1963 /* we may fix some attributes, better do not change the source */
1964 obj->oo_attr = *attr;
1965 obj->oo_attr.la_size = 0;
1966 obj->oo_attr.la_nlink = 0;
1967 obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1969 #ifdef ZFS_PROJINHERIT
1970 if (osd->od_projectused_dn) {
1971 if (!(obj->oo_attr.la_valid & LA_PROJID))
1972 obj->oo_attr.la_projid = ZFS_DEFAULT_PROJID;
1973 obj->oo_with_projid = 1;
1977 dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1985 zde->zde_dnode = dn->dn_object;
1986 zde->zde_type = S_DT(attr->la_mode & S_IFMT);
1988 zapid = osd_get_name_n_idx(env, osd, fid, buf,
1989 sizeof(info->oti_str), &zdn);
1990 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY) ||
1991 (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_NO_ENTRY)))
1994 if (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
1997 rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
2003 /* Now add in all of the "SA" attributes */
2004 rc = osd_sa_handle_get(obj);
2008 rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
2012 /* initialize LMA */
2013 if (fid_is_idif(fid) || (fid_is_norm(fid) && osd->od_is_ost))
2014 compat |= LMAC_FID_ON_OST;
2015 lustre_lma_init(lma, fid, compat, 0);
2016 lustre_lma_swab(lma);
2017 rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
2018 (uchar_t *)lma, sizeof(*lma));
2022 /* configure new osd object */
2023 obj->oo_parent = parent != 0 ? parent : zapid;
2024 obj->oo_late_attr_set = 1;
2025 rc = __osd_sa_xattr_schedule_update(env, obj, oh);
2029 /* XXX: oo_lma_flags */
2030 obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
2031 if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
2032 /* no body operations for accounting objects */
2033 obj->oo_dt.do_body_ops = &osd_body_ops;
2035 osd_idc_find_and_init(env, osd, obj);
2038 if (unlikely(rc && dn)) {
2039 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
2043 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
2045 up_write(&obj->oo_guard);
2049 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
2052 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2053 return osd_declare_attr_set(env, dt, NULL, th);
2057 * Concurrency: @dt is write locked.
2059 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
2060 struct thandle *handle)
2062 struct osd_object *obj = osd_dt_obj(dt);
2063 struct osd_thandle *oh;
2064 struct osd_device *osd = osd_obj2dev(obj);
2070 down_read(&obj->oo_guard);
2071 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2072 GOTO(out, rc = -ENOENT);
2074 LASSERT(osd_invariant(obj));
2075 LASSERT(obj->oo_sa_hdl != NULL);
2077 oh = container_of(handle, struct osd_thandle, ot_super);
2079 write_lock(&obj->oo_attr_lock);
2080 nlink = ++obj->oo_attr.la_nlink;
2081 write_unlock(&obj->oo_attr_lock);
2083 rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2086 up_read(&obj->oo_guard);
2090 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
2091 struct thandle *handle)
2093 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2094 return osd_declare_attr_set(env, dt, NULL, handle);
2098 * Concurrency: @dt is write locked.
2100 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
2101 struct thandle *handle)
2103 struct osd_object *obj = osd_dt_obj(dt);
2104 struct osd_thandle *oh;
2105 struct osd_device *osd = osd_obj2dev(obj);
2111 down_read(&obj->oo_guard);
2113 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2114 GOTO(out, rc = -ENOENT);
2116 LASSERT(osd_invariant(obj));
2117 LASSERT(obj->oo_sa_hdl != NULL);
2119 oh = container_of(handle, struct osd_thandle, ot_super);
2120 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2122 write_lock(&obj->oo_attr_lock);
2123 nlink = --obj->oo_attr.la_nlink;
2124 write_unlock(&obj->oo_attr_lock);
2126 rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2129 up_read(&obj->oo_guard);
2133 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
2134 __u64 start, __u64 end)
2136 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2140 if (osd->od_dt_dev.dd_rdonly)
2143 txg = osd_db_dirty_txg(osd_dt_obj(dt)->oo_dn->dn_dbuf);
2145 /* the object is dirty or being synced */
2146 if (osd_object_sync_delay_us < 0)
2147 txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
2149 udelay(osd_object_sync_delay_us);
2155 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
2160 static bool osd_check_stale(struct dt_object *dt)
2165 static const struct dt_object_operations osd_obj_ops = {
2166 .do_read_lock = osd_read_lock,
2167 .do_write_lock = osd_write_lock,
2168 .do_read_unlock = osd_read_unlock,
2169 .do_write_unlock = osd_write_unlock,
2170 .do_write_locked = osd_write_locked,
2171 .do_attr_get = osd_attr_get,
2172 .do_declare_attr_set = osd_declare_attr_set,
2173 .do_attr_set = osd_attr_set,
2174 .do_ah_init = osd_ah_init,
2175 .do_declare_create = osd_declare_create,
2176 .do_create = osd_create,
2177 .do_declare_destroy = osd_declare_destroy,
2178 .do_destroy = osd_destroy,
2179 .do_index_try = osd_index_try,
2180 .do_declare_ref_add = osd_declare_ref_add,
2181 .do_ref_add = osd_ref_add,
2182 .do_declare_ref_del = osd_declare_ref_del,
2183 .do_ref_del = osd_ref_del,
2184 .do_xattr_get = osd_xattr_get,
2185 .do_declare_xattr_set = osd_declare_xattr_set,
2186 .do_xattr_set = osd_xattr_set,
2187 .do_declare_xattr_del = osd_declare_xattr_del,
2188 .do_xattr_del = osd_xattr_del,
2189 .do_xattr_list = osd_xattr_list,
2190 .do_object_sync = osd_object_sync,
2191 .do_invalidate = osd_invalidate,
2192 .do_check_stale = osd_check_stale,
2195 static const struct lu_object_operations osd_lu_obj_ops = {
2196 .loo_object_init = osd_object_init,
2197 .loo_object_delete = osd_object_delete,
2198 .loo_object_release = osd_object_release,
2199 .loo_object_free = osd_object_free,
2200 .loo_object_print = osd_object_print,
2201 .loo_object_invariant = osd_object_invariant,
2204 static int osd_otable_it_attr_get(const struct lu_env *env,
2205 struct dt_object *dt,
2206 struct lu_attr *attr)
2212 static const struct dt_object_operations osd_obj_otable_it_ops = {
2213 .do_attr_get = osd_otable_it_attr_get,
2214 .do_index_try = osd_index_try,
2217 module_param(osd_object_sync_delay_us, int, 0644);
2218 MODULE_PARM_DESC(osd_object_sync_delay_us,
2219 "If zero or larger delay N usec instead of doing object sync");