4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_object.c
34 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35 * Author: Mike Pershin <tappro@whamcloud.com>
36 * Author: Johann Lombardi <johann@whamcloud.com>
39 #define DEBUG_SUBSYSTEM S_OSD
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
49 #include "osd_internal.h"
51 #include <sys/dnode.h>
56 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/dmu_tx.h>
59 #include <sys/dmu_objset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/sa_impl.h>
64 char *osd_obj_tag = "osd_object";
65 static int osd_object_sync_delay_us = -1;
67 static const struct dt_object_operations osd_obj_ops;
68 static const struct lu_object_operations osd_lu_obj_ops;
69 static const struct dt_object_operations osd_obj_otable_it_ops;
72 osd_object_sa_fini(struct osd_object *obj)
75 sa_handle_destroy(obj->oo_sa_hdl);
76 obj->oo_sa_hdl = NULL;
81 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
85 LASSERT(obj->oo_sa_hdl == NULL);
86 LASSERT(obj->oo_dn != NULL);
88 rc = osd_sa_handle_get(obj);
92 /* Cache the xattr object id, valid for the life of the object */
93 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
95 obj->oo_xattr = ZFS_NO_OBJECT;
98 osd_object_sa_fini(obj);
105 * Add object to list of dirty objects in tx handle.
107 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
109 if (!list_empty(&obj->oo_sa_linkage))
112 write_lock(&obj->oo_attr_lock);
113 if (likely(list_empty(&obj->oo_sa_linkage)))
114 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
115 write_unlock(&obj->oo_attr_lock);
119 * Release spill block dbuf hold for all dirty SAs.
121 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
123 struct osd_object *obj;
125 while (!list_empty(&oh->ot_sa_list)) {
126 obj = list_entry(oh->ot_sa_list.next,
127 struct osd_object, oo_sa_linkage);
128 write_lock(&obj->oo_attr_lock);
129 list_del_init(&obj->oo_sa_linkage);
130 write_unlock(&obj->oo_attr_lock);
131 if (obj->oo_late_xattr) {
133 * take oo_guard to protect oo_sa_xattr buffer
134 * from concurrent update by osd_xattr_set()
136 LASSERT(oh->ot_assigned != 0);
137 down_write(&obj->oo_guard);
138 if (obj->oo_late_attr_set)
139 __osd_sa_attr_init(env, obj, oh);
140 else if (obj->oo_late_xattr)
141 __osd_sa_xattr_update(env, obj, oh);
142 up_write(&obj->oo_guard);
144 sa_spill_rele(obj->oo_sa_hdl);
149 * Update the SA and add the object to the dirty list.
151 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
152 void *buf, uint32_t buflen, struct osd_thandle *oh)
156 LASSERT(obj->oo_sa_hdl != NULL);
157 LASSERT(oh->ot_tx != NULL);
159 rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
160 osd_object_sa_dirty_add(obj, oh);
166 * Bulk update the SA and add the object to the dirty list.
169 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
170 int count, struct osd_thandle *oh)
174 LASSERT(obj->oo_sa_hdl != NULL);
175 LASSERT(oh->ot_tx != NULL);
177 rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
178 osd_object_sa_dirty_add(obj, oh);
184 * Retrieve the attributes of a DMU object
186 static int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
187 struct osd_object *obj, struct lu_attr *la)
189 struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
190 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
191 struct lustre_mdt_attrs *lma;
197 LASSERT(obj->oo_dn != NULL);
199 la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_BTIME | LA_MODE |
200 LA_TYPE | LA_SIZE | LA_UID | LA_GID | LA_FLAGS |
203 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
204 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
205 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
206 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(o), NULL, osa->btime, 16);
207 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
208 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
209 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
210 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
211 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
212 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
213 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
215 rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
219 #ifdef ZFS_PROJINHERIT
220 if (o->od_projectused_dn && osa->flags & ZFS_PROJID) {
221 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(o),
226 la->la_projid = osa->projid;
227 la->la_valid |= LA_PROJID;
228 obj->oo_with_projid = 1;
230 la->la_projid = ZFS_DEFAULT_PROJID;
231 la->la_valid &= ~LA_PROJID;
235 la->la_valid &= ~LA_PROJID;
238 la->la_atime = osa->atime[0];
239 la->la_mtime = osa->mtime[0];
240 la->la_ctime = osa->ctime[0];
241 la->la_btime = osa->btime[0];
242 la->la_mode = osa->mode;
243 la->la_uid = osa->uid;
244 la->la_gid = osa->gid;
245 la->la_nlink = osa->nlink;
246 la->la_flags = attrs_zfs2fs(osa->flags);
247 la->la_size = osa->size;
249 /* Try to get extra flags from LMA */
250 lma = (struct lustre_mdt_attrs *)osd_oti_get(env)->oti_buf;
252 buf.lb_len = sizeof(osd_oti_get(env)->oti_buf);
253 down_read(&obj->oo_guard);
254 rc = osd_xattr_get_lma(env, obj, &buf);
256 lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
258 lma_to_lustre_flags(lma->lma_incompat);
259 } else if (rc == -ENODATA ||
260 !(S_ISDIR(la->la_mode) &&
261 dt_object_exists(&obj->oo_dt))) {
264 up_read(&obj->oo_guard);
266 if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
267 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
270 la->la_rdev = osa->rdev;
271 la->la_valid |= LA_RDEV;
278 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
284 rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
288 dbi = (dmu_buf_impl_t *)db;
290 *dnp = DB_DNODE(dbi);
292 LASSERT(*dnp != NULL);
298 * Concurrency: no concurrent access is possible that early in object
301 struct lu_object *osd_object_alloc(const struct lu_env *env,
302 const struct lu_object_header *hdr,
305 struct osd_object *mo;
307 OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
310 struct lu_object_header *h;
311 struct osd_device *o = osd_dev(d);
313 l = &mo->oo_dt.do_lu;
314 if (unlikely(o->od_in_init)) {
321 lu_object_header_init(h);
322 lu_object_init(l, h, d);
323 lu_object_add_top(h, l);
326 dt_object_init(&mo->oo_dt, NULL, d);
327 mo->oo_header = NULL;
330 mo->oo_dt.do_ops = &osd_obj_ops;
331 l->lo_ops = &osd_lu_obj_ops;
332 INIT_LIST_HEAD(&mo->oo_sa_linkage);
333 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
334 init_rwsem(&mo->oo_sem);
335 init_rwsem(&mo->oo_guard);
336 rwlock_init(&mo->oo_attr_lock);
337 mo->oo_destroy = OSD_DESTROY_NONE;
344 static void osd_obj_set_blksize(const struct lu_env *env,
345 struct osd_device *osd, struct osd_object *obj)
347 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
349 dnode_t *dn = obj->oo_dn;
354 LASSERT(!osd_oti_get(env)->oti_in_trans);
356 tx = dmu_tx_create(osd->od_os);
358 CERROR("%s: fail to create tx to set blksize for "DFID"\n",
359 osd->od_svname, PFID(fid));
363 dmu_tx_hold_bonus(tx, dn->dn_object);
364 rc = -dmu_tx_assign(tx, TXG_WAIT);
367 CERROR("%s: fail to assign tx to set blksize for "DFID
368 ": rc = %d\n", osd->od_svname, PFID(fid), rc);
372 down_write(&obj->oo_guard);
373 if (unlikely((1 << dn->dn_datablkshift) >= PAGE_SIZE))
376 blksz = dn->dn_datablksz;
377 if (!is_power_of_2(blksz))
378 blksz = size_roundup_power2(blksz);
380 if (blksz > osd->od_max_blksz)
381 blksz = osd->od_max_blksz;
382 else if (blksz < PAGE_SIZE)
384 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object, blksz, 0, tx);
389 up_write(&obj->oo_guard);
392 if (unlikely(obj->oo_dn->dn_maxblkid > 0))
395 CERROR("%s: fail to set blksize for "DFID": rc = %d\n",
396 osd->od_svname, PFID(fid), rc);
399 CDEBUG(D_INODE, "%s: set blksize as %u for "DFID"\n",
400 osd->od_svname, blksz, PFID(fid));
405 * Concurrency: shouldn't matter.
407 static int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
409 struct osd_device *osd = osd_obj2dev(obj);
410 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
416 rc = osd_object_sa_init(obj, osd);
420 /* cache attrs in object */
421 rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
425 if (likely(!fid_is_acct(fid))) {
426 /* no body operations for accounting objects */
427 obj->oo_dt.do_body_ops = &osd_body_ops;
429 if (S_ISREG(obj->oo_attr.la_mode) &&
430 obj->oo_dn->dn_maxblkid == 0 &&
431 (1 << obj->oo_dn->dn_datablkshift) < PAGE_SIZE &&
432 (fid_is_idif(fid) || fid_is_norm(fid) ||
434 osd->od_is_ost && !osd->od_dt_dev.dd_rdonly)
435 osd_obj_set_blksize(env, osd, obj);
439 * initialize object before marking it existing
441 obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
444 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
449 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
451 struct osd_thread_info *info = osd_oti_get(env);
454 struct lustre_mdt_attrs *lma;
455 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
458 BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
459 lma = (struct lustre_mdt_attrs *)info->oti_buf;
461 buf.lb_len = sizeof(info->oti_buf);
463 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
466 lustre_lma_swab(lma);
467 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
468 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
469 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
470 "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
471 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
474 } else if (unlikely(!lu_fid_eq(rfid, &lma->lma_self_fid))) {
475 CERROR("%s: FID-in-LMA "DFID" does not match the "
476 "object self-fid "DFID"\n",
477 osd_obj2dev(obj)->od_svname,
478 PFID(&lma->lma_self_fid), PFID(rfid));
481 struct osd_device *osd = osd_obj2dev(obj);
483 if (lma->lma_compat & LMAC_STRIPE_INFO &&
485 obj->oo_pfid_in_lma = 1;
486 if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
487 osd->od_remote_parent_dir != ZFS_NO_OBJECT)
488 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
490 } else if (rc == -ENODATA) {
491 /* haven't initialize LMA xattr */
499 * Helper function to retrieve DMU object id from fid for accounting object
501 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
502 const struct lu_fid *fid)
506 LASSERT(fid_is_acct(fid));
508 switch (fid_oid(fid)) {
510 dn = osd->od_userused_dn;
513 dn = osd->od_groupused_dn;
515 #ifdef ZFS_PROJINHERIT
516 case ACCT_PROJECT_OID:
517 dn = osd->od_projectused_dn;
528 * Concurrency: no concurrent access is possible that early in object
531 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
532 const struct lu_object_conf *conf)
534 struct osd_object *obj = osd_obj(l);
535 struct osd_device *osd = osd_obj2dev(obj);
536 const struct lu_fid *fid = lu_object_fid(l);
537 struct lustre_scrub *scrub = &osd->od_scrub;
538 struct osd_thread_info *info = osd_oti_get(env);
539 struct luz_direntry *zde = &info->oti_zde;
540 struct osd_idmap_cache *idc;
541 char *name = info->oti_str;
548 LASSERT(osd_invariant(obj));
550 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
551 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
552 l->lo_header->loh_attr |= LOHA_EXISTS;
557 if (conf && conf->loc_flags & LOC_F_NEW)
560 if (unlikely(fid_is_acct(fid))) {
561 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
563 obj->oo_dt.do_index_ops = &osd_acct_index_ops;
564 l->lo_header->loh_attr |= LOHA_EXISTS;
570 idc = osd_idc_find(env, osd, fid);
571 if (idc && !idc->oic_remote && idc->oic_dnode != ZFS_NO_OBJECT) {
572 oid = idc->oic_dnode;
577 if (!list_empty(&osd->od_scrub.os_inconsistent_items))
578 rc = osd_oii_lookup(osd, fid, &oid);
581 rc = osd_fid_lookup(env, osd, fid, &oid);
584 if (likely(!(fid_is_norm(fid) || fid_is_igif(fid)) ||
585 fid_is_on_ost(env, osd, fid) ||
586 !zfs_test_bit(osd_oi_fid2idx(osd, fid),
587 scrub->os_file.sf_oi_bitmap)))
598 LASSERT(obj->oo_dn == NULL);
600 rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
601 /* EEXIST will be returned if object is being deleted in ZFS */
606 CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
607 osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
611 rc = osd_object_init0(env, obj);
615 if (unlikely(obj->oo_header))
618 rc = osd_check_lma(env, obj);
622 osd_scrub_refresh_mapping(env, osd, fid, oid, DTO_INDEX_DELETE, true,
626 /* We still have chance to get the valid dnode: for the object that is
627 * referenced by remote name entry, the object on the local MDT will be
628 * linked under the dir /REMOTE_PARENT_DIR with its FID string as name.
630 * During the OI scrub, if we cannot find the OI mapping, we may still
631 * have change to map the FID to local OID via lookup the dir
632 * /REMOTE_PARENT_DIR. */
633 if (!remote && !fid_is_on_ost(env, osd, fid)) {
634 osd_fid2str(name, fid, sizeof(info->oti_str));
635 rc = osd_zap_lookup(osd, osd->od_remote_parent_dir,
636 NULL, name, 8, 3, (void *)zde);
638 oid = zde->lzd_reg.zde_dnode;
639 osd_dnode_rele(obj->oo_dn);
646 /* The case someone triggered the OI scrub already. */
647 if (scrub->os_running) {
651 lu_object_set_agent_entry(l);
652 osd_oii_insert(env, osd, fid, oid, false);
660 /* The case NOT allow to trigger OI scrub automatically. */
661 if (osd->od_auto_scrub_interval == AS_NEVER)
664 /* It is me to trigger the OI scrub. */
665 rc1 = osd_scrub_start(env, osd, SS_CLEAR_DRYRUN |
666 SS_CLEAR_FAILOUT | SS_AUTO_FULL);
667 CDEBUG_LIMIT(D_LFSCK | D_CONSOLE | D_WARNING,
668 "%s: trigger OI scrub by RPC for "DFID"/%#llx: rc = %d\n",
669 osd_name(osd), PFID(fid), oid, rc1);
673 lu_object_set_agent_entry(l);
675 osd_oii_insert(env, osd, fid, oid, false);
690 * Concurrency: no concurrent access is possible that late in object
693 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
695 struct osd_object *obj = osd_obj(l);
696 struct lu_object_header *h = obj->oo_header;
698 LASSERT(osd_invariant(obj));
700 dt_object_fini(&obj->oo_dt);
701 /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
702 OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
704 lu_object_header_free(h);
708 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
712 LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
714 /* the object is supposed to be exclusively locked by
715 * the caller (osd_destroy()), while the transaction
716 * (oh) is per-thread and not shared */
717 if (likely(list_empty(&obj->oo_unlinked_linkage))) {
718 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
725 /* Default to max data size covered by a level-1 indirect block */
726 static unsigned long osd_sync_destroy_max_size =
727 1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
728 module_param(osd_sync_destroy_max_size, ulong, 0444);
729 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
732 osd_object_set_destroy_type(struct osd_object *obj)
735 * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
736 * only once and use it consistently thereafter.
738 down_write(&obj->oo_guard);
739 if (obj->oo_destroy == OSD_DESTROY_NONE) {
740 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
741 obj->oo_destroy = OSD_DESTROY_SYNC;
742 else /* Larger objects are destroyed asynchronously */
743 obj->oo_destroy = OSD_DESTROY_ASYNC;
745 up_write(&obj->oo_guard);
748 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
751 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
752 struct osd_object *obj = osd_dt_obj(dt);
753 struct osd_device *osd = osd_obj2dev(obj);
754 struct osd_thandle *oh;
761 LASSERT(dt_object_exists(dt));
763 oh = container_of(th, struct osd_thandle, ot_super);
764 LASSERT(oh->ot_tx != NULL);
766 dmu_tx_mark_netfree(oh->ot_tx);
768 /* declare that we'll remove object from fid-dnode mapping */
769 zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
770 osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
772 osd_declare_xattrs_destroy(env, obj, oh);
775 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
776 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
777 -1, oh, NULL, OSD_QID_INODE);
781 /* data to be truncated */
782 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
783 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
784 0, oh, NULL, OSD_QID_BLK);
788 osd_object_set_destroy_type(obj);
789 if (obj->oo_destroy == OSD_DESTROY_SYNC)
790 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
793 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
794 osd->od_unlinked, TRUE, NULL);
796 /* remove agent entry (if have) from remote parent */
797 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
798 osd_tx_hold_zap(oh->ot_tx, osd->od_remote_parent_dir,
801 /* will help to find FID->ino when this object is being
802 * added to PENDING/ */
803 osd_idc_find_and_init(env, osd, obj);
808 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
811 struct osd_thread_info *info = osd_oti_get(env);
812 char *buf = info->oti_str;
813 struct osd_object *obj = osd_dt_obj(dt);
814 struct osd_device *osd = osd_obj2dev(obj);
815 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
816 struct osd_thandle *oh;
822 down_write(&obj->oo_guard);
824 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
825 GOTO(out, rc = -ENOENT);
827 LASSERT(obj->oo_dn != NULL);
829 oh = container_of(th, struct osd_thandle, ot_super);
831 LASSERT(oh->ot_tx != NULL);
833 /* remove obj ref from index dir (it depends) */
834 zapid = osd_get_name_n_idx(env, osd, fid, buf,
835 sizeof(info->oti_str), &zdn);
836 rc = osd_xattrs_destroy(env, obj, oh);
838 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
839 osd->od_svname, buf, rc);
843 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
844 rc = osd_delete_from_remote_parent(env, osd, obj, oh, true);
849 oid = obj->oo_dn->dn_object;
850 if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
851 /* this may happen if the destroy wasn't declared
852 * e.g. when the object is created and then destroyed
853 * in the same transaction - we don't need additional
854 * space for destroy specifically */
855 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
856 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
858 CERROR("%s: failed to free %s/%#llx: rc = %d\n",
859 osd->od_svname, buf, oid, rc);
860 } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
861 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
863 CERROR("%s: failed to free %s/%#llx: rc = %d\n",
864 osd->od_svname, buf, oid, rc);
865 } else { /* asynchronous destroy */
866 char *key = info->oti_key;
868 rc = osd_object_unlinked_add(obj, oh);
872 snprintf(key, sizeof(info->oti_key), "%llx", oid);
873 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
874 osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
876 CERROR("%s: zap_add_int() failed %s/%#llx: rc = %d\n",
877 osd->od_svname, buf, oid, rc);
880 /* Remove the OI mapping after the destroy to handle the race with
881 * OI scrub that may insert missed OI mapping during the interval. */
882 rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
883 if (unlikely(rc == -ENOENT))
886 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
887 osd->od_svname, buf, rc);
892 /* not needed in the cache anymore */
893 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
895 obj->oo_destroyed = 1;
896 up_write(&obj->oo_guard);
900 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
902 struct osd_object *obj = osd_obj(l);
903 const struct lu_fid *fid = lu_object_fid(l);
906 if (likely(!fid_is_acct(fid))) {
907 osd_object_sa_fini(obj);
908 if (obj->oo_sa_xattr) {
909 nvlist_free(obj->oo_sa_xattr);
910 obj->oo_sa_xattr = NULL;
912 osd_dnode_rele(obj->oo_dn);
913 list_del(&obj->oo_sa_linkage);
920 * Concurrency: ->loo_object_release() is called under site spin-lock.
922 static void osd_object_release(const struct lu_env *env,
928 * Concurrency: shouldn't matter.
930 static int osd_object_print(const struct lu_env *env, void *cookie,
931 lu_printer_t p, const struct lu_object *l)
933 struct osd_object *o = osd_obj(l);
935 return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
938 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
941 struct osd_object *obj = osd_dt_obj(dt);
943 LASSERT(osd_invariant(obj));
945 down_read_nested(&obj->oo_sem, role);
948 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
951 struct osd_object *obj = osd_dt_obj(dt);
953 LASSERT(osd_invariant(obj));
955 down_write_nested(&obj->oo_sem, role);
958 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
960 struct osd_object *obj = osd_dt_obj(dt);
962 LASSERT(osd_invariant(obj));
963 up_read(&obj->oo_sem);
966 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
968 struct osd_object *obj = osd_dt_obj(dt);
970 LASSERT(osd_invariant(obj));
971 up_write(&obj->oo_sem);
974 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
976 struct osd_object *obj = osd_dt_obj(dt);
979 LASSERT(osd_invariant(obj));
981 if (down_write_trylock(&obj->oo_sem)) {
983 up_write(&obj->oo_sem);
988 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
989 struct lu_attr *attr)
991 struct osd_object *obj = osd_dt_obj(dt);
992 struct osd_device *osd = osd_obj2dev(obj);
997 down_read(&obj->oo_guard);
999 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1000 GOTO(out, rc = -ENOENT);
1002 if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
1005 LASSERT(osd_invariant(obj));
1006 LASSERT(obj->oo_dn);
1008 read_lock(&obj->oo_attr_lock);
1009 *attr = obj->oo_attr;
1010 if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
1011 attr->la_valid |= LA_FLAGS;
1012 attr->la_flags |= LUSTRE_ORPHAN_FL;
1014 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
1015 attr->la_valid |= LA_FLAGS;
1016 attr->la_flags |= LUSTRE_ENCRYPT_FL;
1018 read_unlock(&obj->oo_attr_lock);
1019 if (attr->la_valid & LA_FLAGS && attr->la_flags & LUSTRE_ORPHAN_FL)
1020 CDEBUG(D_INFO, "%s: set orphan flag on "DFID" (%#llx/%#x)\n",
1021 osd_obj2dev(obj)->od_svname,
1022 PFID(lu_object_fid(&dt->do_lu)),
1023 attr->la_valid, obj->oo_lma_flags);
1025 /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
1026 * from within sa_object_size() can block on a mutex, so
1027 * we can't call sa_object_size() holding rwlock */
1028 sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
1029 /* we do not control size of indices, so always calculate
1030 * it from number of blocks reported by DMU */
1031 if (S_ISDIR(attr->la_mode)) {
1032 attr->la_size = 512 * blocks;
1033 rc = -zap_count(osd->od_os, obj->oo_dn->dn_object,
1034 &attr->la_dirent_count);
1036 /* Block size may be not set; suggest maximal I/O transfers. */
1038 blksize = osd_spa_maxblocksize(
1039 dmu_objset_spa(osd_obj2dev(obj)->od_os));
1041 attr->la_blksize = blksize;
1042 attr->la_blocks = blocks;
1043 attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
1046 up_read(&obj->oo_guard);
1050 /* Simple wrapper on top of qsd API which implement quota transfer for osd
1051 * setattr needs. As a reminder, only the root user can change ownership of
1052 * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
1053 static inline int qsd_transfer(const struct lu_env *env,
1054 struct qsd_instance *qsd,
1055 struct lquota_trans *trans, int qtype,
1056 __u64 orig_id, __u64 new_id, __u64 bspace,
1057 struct lquota_id_info *qi, bool ignore_edquot)
1061 if (unlikely(qsd == NULL))
1064 LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
1065 qi->lqi_type = qtype;
1067 /* inode accounting */
1068 qi->lqi_is_blk = false;
1070 /* one more inode for the new owner ... */
1071 qi->lqi_id.qid_uid = new_id;
1073 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1074 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1079 /* and one less inode for the current id */
1080 qi->lqi_id.qid_uid = orig_id;;
1082 /* can't get EDQUOT when reducing usage */
1083 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1084 if (rc == -EINPROGRESS)
1089 /* block accounting */
1090 qi->lqi_is_blk = true;
1092 /* more blocks for the new owner ... */
1093 qi->lqi_id.qid_uid = new_id;
1094 qi->lqi_space = bspace;
1095 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1096 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1101 /* and finally less blocks for the current owner */
1102 qi->lqi_id.qid_uid = orig_id;
1103 qi->lqi_space = -bspace;
1104 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1105 /* can't get EDQUOT when reducing usage */
1106 if (rc == -EINPROGRESS)
1111 static int osd_declare_attr_set(const struct lu_env *env,
1112 struct dt_object *dt,
1113 const struct lu_attr *attr,
1114 struct thandle *handle)
1116 struct osd_thread_info *info = osd_oti_get(env);
1117 struct osd_object *obj = osd_dt_obj(dt);
1118 struct osd_device *osd = osd_obj2dev(obj);
1120 struct osd_thandle *oh;
1128 LASSERT(handle != NULL);
1129 LASSERT(osd_invariant(obj));
1131 oh = container_of(handle, struct osd_thandle, ot_super);
1133 down_read(&obj->oo_guard);
1134 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1137 LASSERT(obj->oo_sa_hdl != NULL);
1138 LASSERT(oh->ot_tx != NULL);
1139 /* regular attributes are part of the bonus buffer */
1140 /* let's check whether this object is already part of
1143 for (txh = list_head(&oh->ot_tx->tx_holds); txh;
1144 txh = list_next(&oh->ot_tx->tx_holds, txh)) {
1145 if (txh->txh_dnode == NULL)
1147 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
1149 /* this object is part of the transaction already
1150 * we don't need to declare bonus again */
1155 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
1156 if (oh->ot_tx->tx_err != 0)
1157 GOTO(out, rc = -oh->ot_tx->tx_err);
1159 if (attr && attr->la_valid & LA_FLAGS) {
1160 /* LMA is usually a part of bonus, no need to declare
1164 if (attr && (attr->la_valid & (LA_UID | LA_GID | LA_PROJID))) {
1165 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
1166 bspace = toqb(bspace * 512);
1169 "%s: enforce quota on UID %u, GID %u, the quota space is %lld (%u)\n",
1171 attr->la_uid, attr->la_gid, bspace, blksize);
1174 if (attr && attr->la_valid & LA_UID) {
1175 /* quota enforcement for user */
1176 if (attr->la_uid != obj->oo_attr.la_uid) {
1177 rc = qsd_transfer(env, osd_def_qsd(osd),
1178 &oh->ot_quota_trans, USRQUOTA,
1179 obj->oo_attr.la_uid, attr->la_uid,
1180 bspace, &info->oti_qi, true);
1185 if (attr && attr->la_valid & LA_GID) {
1186 /* quota enforcement for group */
1187 if (attr->la_gid != obj->oo_attr.la_gid) {
1188 rc = qsd_transfer(env, osd_def_qsd(osd),
1189 &oh->ot_quota_trans, GRPQUOTA,
1190 obj->oo_attr.la_gid, attr->la_gid,
1191 bspace, &info->oti_qi,
1193 LUSTRE_SET_SYNC_FL));
1198 #ifdef ZFS_PROJINHERIT
1199 if (attr && attr->la_valid & LA_PROJID) {
1200 /* quota enforcement for project */
1201 if (attr->la_projid != obj->oo_attr.la_projid) {
1202 if (!osd->od_projectused_dn)
1203 GOTO(out, rc = -EOPNOTSUPP);
1205 /* Usually, if project quota is upgradable for the
1206 * device, then the upgrade will be done before or when
1207 * mount the device. So when we come here, this project
1208 * should have project ID attribute already (that is
1209 * zero by default). Otherwise, there was something
1210 * wrong during the former upgrade, let's return failure
1213 * Please note that, different from other attributes,
1214 * you can NOT simply set the project ID attribute under
1215 * such case, because adding (NOT change) project ID
1216 * attribute needs to change the object's attribute
1217 * layout to match zfs backend quota accounting
1219 if (unlikely(!obj->oo_with_projid))
1220 GOTO(out, rc = -ENXIO);
1222 rc = qsd_transfer(env, osd_def_qsd(osd),
1223 &oh->ot_quota_trans, PRJQUOTA,
1224 obj->oo_attr.la_projid,
1225 attr->la_projid, bspace,
1226 &info->oti_qi, true);
1233 up_read(&obj->oo_guard);
1238 * Set the attributes of an object
1240 * The transaction passed to this routine must have
1241 * dmu_tx_hold_bonus(tx, oid) called and then assigned
1242 * to a transaction group.
1244 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
1245 const struct lu_attr *la, struct thandle *handle)
1247 struct osd_thread_info *info = osd_oti_get(env);
1248 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1249 struct osd_object *obj = osd_dt_obj(dt);
1250 struct osd_device *osd = osd_obj2dev(obj);
1251 struct osd_thandle *oh;
1252 struct osa_attr *osa = &info->oti_osa;
1253 __u64 valid = la->la_valid;
1259 down_read(&obj->oo_guard);
1260 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1261 GOTO(out, rc = -ENOENT);
1263 LASSERT(handle != NULL);
1264 LASSERT(osd_invariant(obj));
1265 LASSERT(obj->oo_sa_hdl);
1267 oh = container_of(handle, struct osd_thandle, ot_super);
1268 /* Assert that the transaction has been assigned to a
1269 transaction group. */
1270 LASSERT(oh->ot_tx->tx_txg != 0);
1272 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) && !osd->od_is_ost) {
1273 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1274 char *buf = info->oti_str;
1275 dnode_t *zdn = NULL;
1278 zapid = osd_get_name_n_idx(env, osd, lu_object_fid(&dt->do_lu),
1279 buf, sizeof(info->oti_str), &zdn);
1280 rc = osd_zap_lookup(osd, zapid, zdn, buf, 8,
1281 sizeof(*zde) / 8, zde);
1283 zde->zde_dnode -= 1;
1284 rc = -zap_update(osd->od_os, zapid, buf, 8,
1285 sizeof(*zde) / 8, zde, oh->ot_tx);
1292 /* Only allow set size for regular file */
1293 if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
1294 valid &= ~(LA_SIZE | LA_BLOCKS);
1296 if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
1299 if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
1302 if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
1308 if (valid & LA_FLAGS) {
1309 struct lustre_mdt_attrs *lma;
1313 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
1314 LASSERT(!obj->oo_pfid_in_lma);
1315 BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
1316 lma = (struct lustre_mdt_attrs *)&info->oti_buf;
1318 buf.lb_len = sizeof(info->oti_buf);
1320 /* Please do NOT call osd_xattr_get() directly, that
1321 * will cause recursive down_read() on oo_guard. */
1322 rc = osd_xattr_get_internal(env, obj, &buf,
1323 XATTR_NAME_LMA, &size);
1324 if (!rc && unlikely(size < sizeof(*lma))) {
1328 le32_to_cpu(lma->lma_incompat);
1329 lma->lma_incompat |=
1330 lustre_to_lma_flags(la->la_flags);
1332 cpu_to_le32(lma->lma_incompat);
1334 buf.lb_len = sizeof(*lma);
1335 rc = osd_xattr_set_internal(env, obj, &buf,
1341 CWARN("%s: failed to set LMA flags: rc = %d\n",
1342 osd->od_svname, rc);
1346 la->la_flags & LUSTRE_LMA_FL_MASKS;
1351 write_lock(&obj->oo_attr_lock);
1354 if (valid & LA_PROJID) {
1355 #ifdef ZFS_PROJINHERIT
1356 if (osd->od_projectused_dn) {
1357 LASSERT(obj->oo_with_projid);
1359 osa->projid = obj->oo_attr.la_projid = la->la_projid;
1360 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1364 valid &= ~LA_PROJID;
1367 if (valid & LA_ATIME) {
1368 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1369 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1372 if (valid & LA_MTIME) {
1373 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1374 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1377 if (valid & LA_CTIME) {
1378 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1379 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1382 if (valid & LA_MODE) {
1383 /* mode is stored along with type, so read it first */
1384 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1385 (la->la_mode & ~S_IFMT);
1386 osa->mode = obj->oo_attr.la_mode;
1387 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1390 if (valid & LA_SIZE) {
1391 osa->size = obj->oo_attr.la_size = la->la_size;
1392 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1395 if (valid & LA_NLINK) {
1396 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1397 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1400 if (valid & LA_RDEV) {
1401 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1402 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1405 if (valid & LA_FLAGS) {
1406 osa->flags = attrs_fs2zfs(la->la_flags);
1407 /* many flags are not supported by zfs, so ensure a good cached
1409 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1410 #ifdef ZFS_PROJINHERIT
1411 if (obj->oo_with_projid && osd->od_projectused_dn)
1412 osa->flags |= ZFS_PROJID;
1414 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1417 if (valid & LA_UID) {
1418 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1419 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1422 if (valid & LA_GID) {
1423 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1424 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1427 obj->oo_attr.la_valid |= valid;
1428 write_unlock(&obj->oo_attr_lock);
1430 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1431 rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1434 up_read(&obj->oo_guard);
1441 * XXX temporary solution.
1444 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1445 struct dt_object *parent, struct dt_object *child,
1450 ah->dah_parent = parent;
1451 ah->dah_mode = child_mode;
1453 if (parent != NULL && !dt_object_remote(parent)) {
1454 /* will help to find FID->ino at dt_insert("..") */
1455 struct osd_object *pobj = osd_dt_obj(parent);
1457 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1461 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1462 struct lu_attr *attr,
1463 struct dt_allocation_hint *hint,
1464 struct dt_object_format *dof,
1465 struct thandle *handle)
1467 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1468 struct osd_object *obj = osd_dt_obj(dt);
1469 struct osd_device *osd = osd_obj2dev(obj);
1470 struct osd_thandle *oh;
1478 switch (dof->dof_type) {
1482 if (obj->oo_dt.do_body_ops == NULL)
1483 obj->oo_dt.do_body_ops = &osd_body_ops;
1489 LASSERT(handle != NULL);
1490 oh = container_of(handle, struct osd_thandle, ot_super);
1491 LASSERT(oh->ot_tx != NULL);
1493 /* this is the minimum set of EAs on every Lustre object */
1494 obj->oo_ea_in_bonus = OSD_BASE_EA_IN_BONUS;
1495 /* reserve 32 bytes for extra stuff like ACLs */
1496 dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1498 switch (dof->dof_type) {
1500 dt->do_index_ops = &osd_dir_ops;
1503 /* for zap create */
1504 dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1505 dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1510 /* first, we'll create new object */
1511 dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1519 /* and we'll add it to some mapping */
1520 zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1521 osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1523 /* will help to find FID->ino mapping at dt_insert() */
1524 osd_idc_find_and_init(env, osd, obj);
1526 rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid,
1527 attr->la_projid, 1, oh, NULL, OSD_QID_INODE);
1532 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1533 struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
1534 struct lu_attr *la, uint64_t parent,
1537 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1538 struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1540 inode_timespec_t now;
1543 char *dxattr = NULL;
1549 gen = dmu_tx_get_txg(tx);
1551 ZFS_TIME_ENCODE(&now, osa->btime);
1553 osa->atime[0] = la->la_atime;
1554 osa->ctime[0] = la->la_ctime;
1555 osa->mtime[0] = la->la_mtime;
1556 osa->mode = la->la_mode;
1557 osa->uid = la->la_uid;
1558 osa->gid = la->la_gid;
1559 osa->rdev = la->la_rdev;
1560 osa->nlink = la->la_nlink;
1561 if (la->la_valid & LA_FLAGS)
1562 osa->flags = attrs_fs2zfs(la->la_flags);
1565 osa->size = la->la_size;
1566 #ifdef ZFS_PROJINHERIT
1567 if (osd->od_projectused_dn) {
1568 if (la->la_valid & LA_PROJID)
1569 osa->projid = la->la_projid;
1571 osa->projid = ZFS_DEFAULT_PROJID;
1572 osa->flags |= ZFS_PROJID;
1574 obj->oo_with_projid = 1;
1576 osa->flags &= ~ZFS_PROJID;
1581 * we need to create all SA below upon object create.
1583 * XXX The attribute order matters since the accounting callback relies
1584 * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1585 * look up the UID/GID/PROJID attributes. Moreover, the callback does
1586 * not seem to support the spill block.
1587 * We define attributes in the same order as SA_*_OFFSET in order to
1588 * work around the problem. See ORI-610.
1591 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1592 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1593 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1594 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1595 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1596 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1597 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1598 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1599 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1600 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1601 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, osa->btime, 16);
1602 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1603 #ifdef ZFS_PROJINHERIT
1604 if (osd->od_projectused_dn)
1605 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1608 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1609 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1612 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1615 dxattr = osd_zio_buf_alloc(sa_size);
1618 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1619 NV_ENCODE_XDR, KM_SLEEP);
1622 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1623 NULL, dxattr, sa_size);
1626 rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1628 osd_zio_buf_free(dxattr, sa_size);
1633 int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1634 uint64_t oid, dnode_t **dnp)
1639 /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1640 for (txh = list_tail(&tx->tx_holds); txh;
1641 txh = list_prev(&tx->tx_holds, txh)) {
1642 dnode_t *dn = txh->txh_dnode;
1647 if (dn->dn_object != oid)
1651 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1652 if (dn->dn_bonus == NULL)
1653 dbuf_create_bonus(dn);
1654 rw_exit(&dn->dn_struct_rwlock);
1658 LASSERT(dn->dn_handle);
1660 if (zfs_refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1661 zfs_refcount_add(&dn->dn_holds, osd_obj_tag);
1662 atomic_inc_32(&dn->dn_dbufs_count);
1666 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1670 if (unlikely(*dnp == NULL))
1671 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1676 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1677 int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
1681 if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1682 dnsize = DNODE_MIN_SIZE;
1684 if (DN_BONUS_SIZE(dnsize) >= ea_in_bonus + 32)
1687 } while (dnsize < DNODE_MAX_SIZE);
1688 if (dnsize > DNODE_MAX_SIZE)
1689 dnsize = DNODE_MAX_SIZE;
1690 } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1692 } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1694 } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1696 } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1698 } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1701 dnsize = DNODE_MIN_SIZE;
1708 * The transaction passed to this routine must have
1709 * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1710 * to a transaction group.
1712 int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
1713 struct osd_object *obj, const struct lu_fid *fid,
1714 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1716 dmu_object_type_t type = DMU_OT_PLAIN_FILE_CONTENTS;
1720 /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1721 * would get an additional ditto copy */
1722 if (unlikely(S_ISREG(la->la_mode) &&
1723 fid_seq_is_local_file(fid_seq(fid))))
1724 type = DMU_OTN_UINT8_METADATA;
1726 /* Create a new DMU object using the default dnode size. */
1728 size = obj->oo_ea_in_bonus;
1730 size = OSD_BASE_EA_IN_BONUS;
1731 oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1732 osd_find_dnsize(osd, size), tx);
1734 LASSERT(la->la_valid & LA_MODE);
1738 return osd_find_new_dnode(env, tx, oid, dnp);
1742 * The transaction passed to this routine must have
1743 * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1744 * to a transaction group.
1746 * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1747 * This is fine for directories today, because storing the FID in the dirent
1748 * will also require a FAT ZAP. If there is a new type of micro ZAP created
1749 * then we might need to re-evaluate the use of this flag and instead do
1750 * a conversion from the different internal ZAP hash formats being used. */
1751 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1752 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1753 unsigned dnsize, zap_flags_t flags)
1757 /* Assert that the transaction has been assigned to a
1758 transaction group. */
1759 LASSERT(tx->tx_txg != 0);
1762 oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1763 DMU_OT_DIRECTORY_CONTENTS,
1764 14, /* == ZFS fzap_default_blockshift */
1765 DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1771 return osd_find_new_dnode(env, tx, oid, dnp);
1774 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1775 struct lu_attr *la, struct osd_thandle *oh)
1777 struct osd_device *osd = osd_obj2dev(obj);
1781 /* Index file should be created as regular file in order not to confuse
1782 * ZPL which could interpret them as directory.
1783 * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1785 LASSERT(S_ISREG(la->la_mode));
1786 rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1787 osd_find_dnsize(osd, obj->oo_ea_in_bonus), ZAP_FLAG_UINT64_KEY);
1793 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1794 struct lu_attr *la, struct osd_thandle *oh)
1796 struct osd_device *osd = osd_obj2dev(obj);
1800 LASSERT(S_ISDIR(la->la_mode));
1801 rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1802 osd_find_dnsize(osd, obj->oo_ea_in_bonus), 0);
1808 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1809 struct lu_attr *la, struct osd_thandle *oh)
1811 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1812 struct osd_device *osd = osd_obj2dev(obj);
1816 LASSERT(S_ISREG(la->la_mode));
1817 rc = __osd_object_create(env, osd, obj, fid, &dn, oh->ot_tx, la);
1821 if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid))) {
1822 /* The minimum block size must be at least page size otherwise
1823 * it will break the assumption in tgt_thread_big_cache where
1824 * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1825 * RDMA due to subpage transfer size */
1826 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1827 PAGE_SIZE, 0, oh->ot_tx);
1829 CERROR("%s: can't change blocksize: %d\n",
1830 osd->od_svname, rc);
1833 } else if ((fid_is_llog(fid))) {
1834 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1835 LLOG_MIN_CHUNK_SIZE, 0, oh->ot_tx);
1837 CERROR("%s: can't change blocksize: %d\n",
1838 osd->od_svname, rc);
1846 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1847 struct lu_attr *la, struct osd_thandle *oh)
1852 LASSERT(S_ISLNK(la->la_mode));
1853 rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1854 lu_object_fid(&obj->oo_dt.do_lu),
1855 &dn, oh->ot_tx, la);
1861 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1862 struct lu_attr *la, struct osd_thandle *oh)
1867 if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1868 la->la_valid |= LA_RDEV;
1870 rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1871 lu_object_fid(&obj->oo_dt.do_lu),
1872 &dn, oh->ot_tx, la);
1878 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1879 struct osd_object *obj,
1881 struct osd_thandle *oh);
1883 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1885 osd_obj_type_f result;
1911 * Concurrency: @dt is write locked.
1913 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1914 struct lu_attr *attr, struct dt_allocation_hint *hint,
1915 struct dt_object_format *dof, struct thandle *th)
1917 struct osd_thread_info *info = osd_oti_get(env);
1918 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1919 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1920 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1921 struct osd_object *obj = osd_dt_obj(dt);
1922 struct osd_device *osd = osd_obj2dev(obj);
1923 char *buf = info->oti_str;
1924 struct osd_thandle *oh;
1925 dnode_t *dn = NULL, *zdn = NULL;
1926 uint64_t zapid, parent = 0;
1932 LASSERT(!fid_is_acct(fid));
1934 /* concurrent create declarations should not see
1935 * the object inconsistent (db, attr, etc).
1936 * in regular cases acquisition should be cheap */
1937 down_write(&obj->oo_guard);
1939 if (unlikely(dt_object_exists(dt)))
1940 GOTO(out, rc = -EEXIST);
1942 LASSERT(osd_invariant(obj));
1943 LASSERT(dof != NULL);
1945 LASSERT(th != NULL);
1946 oh = container_of(th, struct osd_thandle, ot_super);
1948 LASSERT(obj->oo_dn == NULL);
1950 /* to follow ZFS on-disk format we need
1951 * to initialize parent dnode properly */
1952 if (hint != NULL && hint->dah_parent != NULL &&
1953 !dt_object_remote(hint->dah_parent))
1954 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1956 /* we may fix some attributes, better do not change the source */
1957 obj->oo_attr = *attr;
1958 obj->oo_attr.la_size = 0;
1959 obj->oo_attr.la_nlink = 0;
1960 obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1962 #ifdef ZFS_PROJINHERIT
1963 if (osd->od_projectused_dn) {
1964 if (!(obj->oo_attr.la_valid & LA_PROJID))
1965 obj->oo_attr.la_projid = ZFS_DEFAULT_PROJID;
1966 obj->oo_with_projid = 1;
1970 dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1978 zde->zde_dnode = dn->dn_object;
1979 zde->zde_type = S_DT(attr->la_mode & S_IFMT);
1981 zapid = osd_get_name_n_idx(env, osd, fid, buf,
1982 sizeof(info->oti_str), &zdn);
1983 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY) ||
1984 (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_NO_ENTRY)))
1987 if (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
1990 rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
1996 /* Now add in all of the "SA" attributes */
1997 rc = osd_sa_handle_get(obj);
2001 rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
2005 /* initialize LMA */
2006 if (fid_is_idif(fid) || (fid_is_norm(fid) && osd->od_is_ost))
2007 compat |= LMAC_FID_ON_OST;
2008 lustre_lma_init(lma, fid, compat, 0);
2009 lustre_lma_swab(lma);
2010 rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
2011 (uchar_t *)lma, sizeof(*lma));
2015 /* configure new osd object */
2016 obj->oo_parent = parent != 0 ? parent : zapid;
2017 obj->oo_late_attr_set = 1;
2018 rc = __osd_sa_xattr_schedule_update(env, obj, oh);
2022 /* XXX: oo_lma_flags */
2023 obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
2024 if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
2025 /* no body operations for accounting objects */
2026 obj->oo_dt.do_body_ops = &osd_body_ops;
2028 osd_idc_find_and_init(env, osd, obj);
2031 if (unlikely(rc && dn)) {
2032 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
2036 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
2038 up_write(&obj->oo_guard);
2042 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
2045 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2046 return osd_declare_attr_set(env, dt, NULL, th);
2050 * Concurrency: @dt is write locked.
2052 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
2053 struct thandle *handle)
2055 struct osd_object *obj = osd_dt_obj(dt);
2056 struct osd_thandle *oh;
2057 struct osd_device *osd = osd_obj2dev(obj);
2063 down_read(&obj->oo_guard);
2064 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2065 GOTO(out, rc = -ENOENT);
2067 LASSERT(osd_invariant(obj));
2068 LASSERT(obj->oo_sa_hdl != NULL);
2070 oh = container_of(handle, struct osd_thandle, ot_super);
2072 write_lock(&obj->oo_attr_lock);
2073 nlink = ++obj->oo_attr.la_nlink;
2074 write_unlock(&obj->oo_attr_lock);
2076 rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2079 up_read(&obj->oo_guard);
2083 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
2084 struct thandle *handle)
2086 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2087 return osd_declare_attr_set(env, dt, NULL, handle);
2091 * Concurrency: @dt is write locked.
2093 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
2094 struct thandle *handle)
2096 struct osd_object *obj = osd_dt_obj(dt);
2097 struct osd_thandle *oh;
2098 struct osd_device *osd = osd_obj2dev(obj);
2104 down_read(&obj->oo_guard);
2106 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2107 GOTO(out, rc = -ENOENT);
2109 LASSERT(osd_invariant(obj));
2110 LASSERT(obj->oo_sa_hdl != NULL);
2112 oh = container_of(handle, struct osd_thandle, ot_super);
2113 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2115 write_lock(&obj->oo_attr_lock);
2116 nlink = --obj->oo_attr.la_nlink;
2117 write_unlock(&obj->oo_attr_lock);
2119 rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2122 up_read(&obj->oo_guard);
2126 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
2127 __u64 start, __u64 end)
2129 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2133 if (osd->od_dt_dev.dd_rdonly)
2136 txg = osd_db_dirty_txg(osd_dt_obj(dt)->oo_dn->dn_dbuf);
2138 /* the object is dirty or being synced */
2139 if (osd_object_sync_delay_us < 0)
2140 txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
2142 udelay(osd_object_sync_delay_us);
2148 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
2153 static bool osd_check_stale(struct dt_object *dt)
2158 static const struct dt_object_operations osd_obj_ops = {
2159 .do_read_lock = osd_read_lock,
2160 .do_write_lock = osd_write_lock,
2161 .do_read_unlock = osd_read_unlock,
2162 .do_write_unlock = osd_write_unlock,
2163 .do_write_locked = osd_write_locked,
2164 .do_attr_get = osd_attr_get,
2165 .do_declare_attr_set = osd_declare_attr_set,
2166 .do_attr_set = osd_attr_set,
2167 .do_ah_init = osd_ah_init,
2168 .do_declare_create = osd_declare_create,
2169 .do_create = osd_create,
2170 .do_declare_destroy = osd_declare_destroy,
2171 .do_destroy = osd_destroy,
2172 .do_index_try = osd_index_try,
2173 .do_declare_ref_add = osd_declare_ref_add,
2174 .do_ref_add = osd_ref_add,
2175 .do_declare_ref_del = osd_declare_ref_del,
2176 .do_ref_del = osd_ref_del,
2177 .do_xattr_get = osd_xattr_get,
2178 .do_declare_xattr_set = osd_declare_xattr_set,
2179 .do_xattr_set = osd_xattr_set,
2180 .do_declare_xattr_del = osd_declare_xattr_del,
2181 .do_xattr_del = osd_xattr_del,
2182 .do_xattr_list = osd_xattr_list,
2183 .do_object_sync = osd_object_sync,
2184 .do_invalidate = osd_invalidate,
2185 .do_check_stale = osd_check_stale,
2188 static const struct lu_object_operations osd_lu_obj_ops = {
2189 .loo_object_init = osd_object_init,
2190 .loo_object_delete = osd_object_delete,
2191 .loo_object_release = osd_object_release,
2192 .loo_object_free = osd_object_free,
2193 .loo_object_print = osd_object_print,
2194 .loo_object_invariant = osd_object_invariant,
2197 static int osd_otable_it_attr_get(const struct lu_env *env,
2198 struct dt_object *dt,
2199 struct lu_attr *attr)
2205 static const struct dt_object_operations osd_obj_otable_it_ops = {
2206 .do_attr_get = osd_otable_it_attr_get,
2207 .do_index_try = osd_index_try,
2210 module_param(osd_object_sync_delay_us, int, 0644);
2211 MODULE_PARM_DESC(osd_object_sync_delay_us,
2212 "If zero or larger delay N usec instead of doing object sync");