4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osd-zfs/osd_object.c
34 * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35 * Author: Mike Pershin <tappro@whamcloud.com>
36 * Author: Johann Lombardi <johann@whamcloud.com>
39 #define DEBUG_SUBSYSTEM S_OSD
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
49 #include "osd_internal.h"
51 #include <sys/dnode.h>
56 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/dmu_tx.h>
59 #include <sys/dmu_objset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/sa_impl.h>
64 char *osd_obj_tag = "osd_object";
65 static int osd_object_sync_delay_us = -1;
67 static struct dt_object_operations osd_obj_ops;
68 static struct lu_object_operations osd_lu_obj_ops;
69 static struct dt_object_operations osd_obj_otable_it_ops;
72 osd_object_sa_fini(struct osd_object *obj)
75 sa_handle_destroy(obj->oo_sa_hdl);
76 obj->oo_sa_hdl = NULL;
81 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
85 LASSERT(obj->oo_sa_hdl == NULL);
86 LASSERT(obj->oo_dn != NULL);
88 rc = osd_sa_handle_get(obj);
92 /* Cache the xattr object id, valid for the life of the object */
93 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
95 obj->oo_xattr = ZFS_NO_OBJECT;
98 osd_object_sa_fini(obj);
105 * Add object to list of dirty objects in tx handle.
107 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
109 if (!list_empty(&obj->oo_sa_linkage))
112 write_lock(&obj->oo_attr_lock);
113 if (likely(list_empty(&obj->oo_sa_linkage)))
114 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
115 write_unlock(&obj->oo_attr_lock);
119 * Release spill block dbuf hold for all dirty SAs.
121 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
123 struct osd_object *obj;
125 while (!list_empty(&oh->ot_sa_list)) {
126 obj = list_entry(oh->ot_sa_list.next,
127 struct osd_object, oo_sa_linkage);
128 write_lock(&obj->oo_attr_lock);
129 list_del_init(&obj->oo_sa_linkage);
130 write_unlock(&obj->oo_attr_lock);
131 if (obj->oo_late_xattr) {
133 * take oo_guard to protect oo_sa_xattr buffer
134 * from concurrent update by osd_xattr_set()
136 LASSERT(oh->ot_assigned != 0);
137 down_write(&obj->oo_guard);
138 if (obj->oo_late_attr_set)
139 __osd_sa_attr_init(env, obj, oh);
140 else if (obj->oo_late_xattr)
141 __osd_sa_xattr_update(env, obj, oh);
142 up_write(&obj->oo_guard);
144 sa_spill_rele(obj->oo_sa_hdl);
149 * Update the SA and add the object to the dirty list.
151 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
152 void *buf, uint32_t buflen, struct osd_thandle *oh)
156 LASSERT(obj->oo_sa_hdl != NULL);
157 LASSERT(oh->ot_tx != NULL);
159 rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
160 osd_object_sa_dirty_add(obj, oh);
166 * Bulk update the SA and add the object to the dirty list.
169 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
170 int count, struct osd_thandle *oh)
174 LASSERT(obj->oo_sa_hdl != NULL);
175 LASSERT(oh->ot_tx != NULL);
177 rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
178 osd_object_sa_dirty_add(obj, oh);
184 * Retrieve the attributes of a DMU object
186 static int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
187 struct osd_object *obj, struct lu_attr *la)
189 struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
190 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
191 struct lustre_mdt_attrs *lma;
197 LASSERT(obj->oo_dn != NULL);
199 la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_BTIME | LA_MODE |
200 LA_TYPE | LA_SIZE | LA_UID | LA_GID | LA_FLAGS |
203 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
204 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
205 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
206 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(o), NULL, osa->btime, 16);
207 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
208 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
209 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
210 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
211 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
212 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
213 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
215 rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
219 #ifdef ZFS_PROJINHERIT
220 if (o->od_projectused_dn && osa->flags & ZFS_PROJID) {
221 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(o),
226 la->la_projid = osa->projid;
227 la->la_valid |= LA_PROJID;
228 obj->oo_with_projid = 1;
230 la->la_projid = ZFS_DEFAULT_PROJID;
231 la->la_valid &= ~LA_PROJID;
235 la->la_valid &= ~LA_PROJID;
238 la->la_atime = osa->atime[0];
239 la->la_mtime = osa->mtime[0];
240 la->la_ctime = osa->ctime[0];
241 la->la_btime = osa->btime[0];
242 la->la_mode = osa->mode;
243 la->la_uid = osa->uid;
244 la->la_gid = osa->gid;
245 la->la_nlink = osa->nlink;
246 la->la_flags = attrs_zfs2fs(osa->flags);
247 la->la_size = osa->size;
249 /* Try to get extra flags from LMA */
250 lma = (struct lustre_mdt_attrs *)osd_oti_get(env)->oti_buf;
252 buf.lb_len = sizeof(osd_oti_get(env)->oti_buf);
253 down_read(&obj->oo_guard);
254 rc = osd_xattr_get_lma(env, obj, &buf);
256 lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
258 lma_to_lustre_flags(lma->lma_incompat);
259 } else if (rc == -ENODATA ||
260 !(S_ISDIR(la->la_mode) &&
261 dt_object_exists(&obj->oo_dt))) {
264 up_read(&obj->oo_guard);
266 if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
267 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
270 la->la_rdev = osa->rdev;
271 la->la_valid |= LA_RDEV;
278 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
284 rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
288 dbi = (dmu_buf_impl_t *)db;
290 *dnp = DB_DNODE(dbi);
292 LASSERT(*dnp != NULL);
298 * Concurrency: no concurrent access is possible that early in object
301 struct lu_object *osd_object_alloc(const struct lu_env *env,
302 const struct lu_object_header *hdr,
305 struct osd_object *mo;
307 OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
310 struct lu_object_header *h;
311 struct osd_device *o = osd_dev(d);
313 l = &mo->oo_dt.do_lu;
314 if (unlikely(o->od_in_init)) {
321 lu_object_header_init(h);
322 lu_object_init(l, h, d);
323 lu_object_add_top(h, l);
326 dt_object_init(&mo->oo_dt, NULL, d);
327 mo->oo_header = NULL;
330 mo->oo_dt.do_ops = &osd_obj_ops;
331 l->lo_ops = &osd_lu_obj_ops;
332 INIT_LIST_HEAD(&mo->oo_sa_linkage);
333 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
334 init_rwsem(&mo->oo_sem);
335 init_rwsem(&mo->oo_guard);
336 rwlock_init(&mo->oo_attr_lock);
337 mo->oo_destroy = OSD_DESTROY_NONE;
344 static void osd_obj_set_blksize(const struct lu_env *env,
345 struct osd_device *osd, struct osd_object *obj)
347 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
349 dnode_t *dn = obj->oo_dn;
354 LASSERT(!osd_oti_get(env)->oti_in_trans);
356 tx = dmu_tx_create(osd->od_os);
358 CERROR("%s: fail to create tx to set blksize for "DFID"\n",
359 osd->od_svname, PFID(fid));
363 dmu_tx_hold_bonus(tx, dn->dn_object);
364 rc = -dmu_tx_assign(tx, TXG_WAIT);
367 CERROR("%s: fail to assign tx to set blksize for "DFID
368 ": rc = %d\n", osd->od_svname, PFID(fid), rc);
372 down_write(&obj->oo_guard);
373 if (unlikely((1 << dn->dn_datablkshift) >= PAGE_SIZE))
376 blksz = dn->dn_datablksz;
377 if (!is_power_of_2(blksz))
378 blksz = size_roundup_power2(blksz);
380 if (blksz > osd->od_max_blksz)
381 blksz = osd->od_max_blksz;
382 else if (blksz < PAGE_SIZE)
384 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object, blksz, 0, tx);
389 up_write(&obj->oo_guard);
392 if (unlikely(obj->oo_dn->dn_maxblkid > 0))
395 CERROR("%s: fail to set blksize for "DFID": rc = %d\n",
396 osd->od_svname, PFID(fid), rc);
399 CDEBUG(D_INODE, "%s: set blksize as %u for "DFID"\n",
400 osd->od_svname, blksz, PFID(fid));
405 * Concurrency: shouldn't matter.
407 static int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
409 struct osd_device *osd = osd_obj2dev(obj);
410 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
416 rc = osd_object_sa_init(obj, osd);
420 /* cache attrs in object */
421 rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
425 if (likely(!fid_is_acct(fid))) {
426 /* no body operations for accounting objects */
427 obj->oo_dt.do_body_ops = &osd_body_ops;
429 if (S_ISREG(obj->oo_attr.la_mode) &&
430 obj->oo_dn->dn_maxblkid == 0 &&
431 (1 << obj->oo_dn->dn_datablkshift) < PAGE_SIZE &&
432 (fid_is_idif(fid) || fid_is_norm(fid) ||
434 osd->od_is_ost && !osd->od_dt_dev.dd_rdonly)
435 osd_obj_set_blksize(env, osd, obj);
439 * initialize object before marking it existing
441 obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
444 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
449 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
451 struct osd_thread_info *info = osd_oti_get(env);
454 struct lustre_mdt_attrs *lma;
455 const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
458 BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
459 lma = (struct lustre_mdt_attrs *)info->oti_buf;
461 buf.lb_len = sizeof(info->oti_buf);
463 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
466 lustre_lma_swab(lma);
467 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
468 CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
469 CWARN("%s: unsupported incompat LMA feature(s) %#x for "
470 "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
471 lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
474 } else if (unlikely(!lu_fid_eq(rfid, &lma->lma_self_fid))) {
475 CERROR("%s: FID-in-LMA "DFID" does not match the "
476 "object self-fid "DFID"\n",
477 osd_obj2dev(obj)->od_svname,
478 PFID(&lma->lma_self_fid), PFID(rfid));
481 struct osd_device *osd = osd_obj2dev(obj);
483 if (lma->lma_compat & LMAC_STRIPE_INFO &&
485 obj->oo_pfid_in_lma = 1;
486 if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
487 osd->od_remote_parent_dir != ZFS_NO_OBJECT)
488 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
490 } else if (rc == -ENODATA) {
491 /* haven't initialize LMA xattr */
499 * Helper function to retrieve DMU object id from fid for accounting object
501 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
502 const struct lu_fid *fid)
506 LASSERT(fid_is_acct(fid));
508 switch (fid_oid(fid)) {
510 dn = osd->od_userused_dn;
513 dn = osd->od_groupused_dn;
515 #ifdef ZFS_PROJINHERIT
516 case ACCT_PROJECT_OID:
517 dn = osd->od_projectused_dn;
528 * Concurrency: no concurrent access is possible that early in object
531 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
532 const struct lu_object_conf *conf)
534 struct osd_object *obj = osd_obj(l);
535 struct osd_device *osd = osd_obj2dev(obj);
536 const struct lu_fid *fid = lu_object_fid(l);
537 struct lustre_scrub *scrub = &osd->od_scrub;
538 struct osd_thread_info *info = osd_oti_get(env);
539 struct luz_direntry *zde = &info->oti_zde;
540 struct osd_idmap_cache *idc;
541 char *name = info->oti_str;
548 LASSERT(osd_invariant(obj));
550 if (fid_is_otable_it(&l->lo_header->loh_fid)) {
551 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
552 l->lo_header->loh_attr |= LOHA_EXISTS;
557 if (conf && conf->loc_flags & LOC_F_NEW)
560 if (unlikely(fid_is_acct(fid))) {
561 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
563 obj->oo_dt.do_index_ops = &osd_acct_index_ops;
564 l->lo_header->loh_attr |= LOHA_EXISTS;
570 idc = osd_idc_find(env, osd, fid);
571 if (idc && !idc->oic_remote && idc->oic_dnode != ZFS_NO_OBJECT) {
572 oid = idc->oic_dnode;
577 if (!list_empty(&osd->od_scrub.os_inconsistent_items))
578 rc = osd_oii_lookup(osd, fid, &oid);
581 rc = osd_fid_lookup(env, osd, fid, &oid);
584 if (likely(!(fid_is_norm(fid) || fid_is_igif(fid)) ||
585 fid_is_on_ost(env, osd, fid) ||
586 !zfs_test_bit(osd_oi_fid2idx(osd, fid),
587 scrub->os_file.sf_oi_bitmap)))
598 LASSERT(obj->oo_dn == NULL);
600 rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
601 /* EEXIST will be returned if object is being deleted in ZFS */
606 CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
607 osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
611 rc = osd_object_init0(env, obj);
615 if (unlikely(obj->oo_header))
618 rc = osd_check_lma(env, obj);
619 if ((!rc && !remote) || (rc != -EREMCHG))
623 /* We still have chance to get the valid dnode: for the object that is
624 * referenced by remote name entry, the object on the local MDT will be
625 * linked under the dir /REMOTE_PARENT_DIR with its FID string as name.
627 * During the OI scrub, if we cannot find the OI mapping, we may still
628 * have change to map the FID to local OID via lookup the dir
629 * /REMOTE_PARENT_DIR. */
630 if (!remote && !fid_is_on_ost(env, osd, fid)) {
631 osd_fid2str(name, fid, sizeof(info->oti_str));
632 rc = osd_zap_lookup(osd, osd->od_remote_parent_dir,
633 NULL, name, 8, 3, (void *)zde);
635 oid = zde->lzd_reg.zde_dnode;
636 osd_dnode_rele(obj->oo_dn);
643 /* The case someone triggered the OI scrub already. */
644 if (scrub->os_running) {
648 lu_object_set_agent_entry(l);
649 osd_oii_insert(env, osd, fid, oid, false);
657 /* The case NOT allow to trigger OI scrub automatically. */
658 if (osd->od_auto_scrub_interval == AS_NEVER)
661 /* It is me to trigger the OI scrub. */
662 rc1 = osd_scrub_start(env, osd, SS_CLEAR_DRYRUN |
663 SS_CLEAR_FAILOUT | SS_AUTO_FULL);
664 LCONSOLE_WARN("%s: trigger OI scrub by RPC for the "DFID": rc = %d\n",
665 osd_name(osd), PFID(fid), rc1);
669 lu_object_set_agent_entry(l);
671 osd_oii_insert(env, osd, fid, oid, false);
686 * Concurrency: no concurrent access is possible that late in object
689 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
691 struct osd_object *obj = osd_obj(l);
692 struct lu_object_header *h = obj->oo_header;
694 LASSERT(osd_invariant(obj));
696 dt_object_fini(&obj->oo_dt);
697 /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
698 OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
700 lu_object_header_free(h);
704 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
708 LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
710 /* the object is supposed to be exclusively locked by
711 * the caller (osd_destroy()), while the transaction
712 * (oh) is per-thread and not shared */
713 if (likely(list_empty(&obj->oo_unlinked_linkage))) {
714 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
721 /* Default to max data size covered by a level-1 indirect block */
722 static unsigned long osd_sync_destroy_max_size =
723 1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
724 module_param(osd_sync_destroy_max_size, ulong, 0444);
725 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
728 osd_object_set_destroy_type(struct osd_object *obj)
731 * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
732 * only once and use it consistently thereafter.
734 down_write(&obj->oo_guard);
735 if (obj->oo_destroy == OSD_DESTROY_NONE) {
736 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
737 obj->oo_destroy = OSD_DESTROY_SYNC;
738 else /* Larger objects are destroyed asynchronously */
739 obj->oo_destroy = OSD_DESTROY_ASYNC;
741 up_write(&obj->oo_guard);
744 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
747 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
748 struct osd_object *obj = osd_dt_obj(dt);
749 struct osd_device *osd = osd_obj2dev(obj);
750 struct osd_thandle *oh;
757 LASSERT(dt_object_exists(dt));
759 oh = container_of(th, struct osd_thandle, ot_super);
760 LASSERT(oh->ot_tx != NULL);
762 dmu_tx_mark_netfree(oh->ot_tx);
764 /* declare that we'll remove object from fid-dnode mapping */
765 zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
766 osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
768 osd_declare_xattrs_destroy(env, obj, oh);
771 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
772 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
773 -1, oh, NULL, OSD_QID_INODE);
777 /* data to be truncated */
778 rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
779 obj->oo_attr.la_gid, obj->oo_attr.la_projid,
780 0, oh, NULL, OSD_QID_BLK);
784 osd_object_set_destroy_type(obj);
785 if (obj->oo_destroy == OSD_DESTROY_SYNC)
786 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
789 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
790 osd->od_unlinked, TRUE, NULL);
792 /* remove agent entry (if have) from remote parent */
793 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
794 osd_tx_hold_zap(oh->ot_tx, osd->od_remote_parent_dir,
797 /* will help to find FID->ino when this object is being
798 * added to PENDING/ */
799 osd_idc_find_and_init(env, osd, obj);
804 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
807 struct osd_thread_info *info = osd_oti_get(env);
808 char *buf = info->oti_str;
809 struct osd_object *obj = osd_dt_obj(dt);
810 struct osd_device *osd = osd_obj2dev(obj);
811 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
812 struct osd_thandle *oh;
818 down_write(&obj->oo_guard);
820 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
821 GOTO(out, rc = -ENOENT);
823 LASSERT(obj->oo_dn != NULL);
825 oh = container_of(th, struct osd_thandle, ot_super);
827 LASSERT(oh->ot_tx != NULL);
829 /* remove obj ref from index dir (it depends) */
830 zapid = osd_get_name_n_idx(env, osd, fid, buf,
831 sizeof(info->oti_str), &zdn);
832 rc = osd_xattrs_destroy(env, obj, oh);
834 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
835 osd->od_svname, buf, rc);
839 if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
840 rc = osd_delete_from_remote_parent(env, osd, obj, oh, true);
845 oid = obj->oo_dn->dn_object;
846 if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
847 /* this may happen if the destroy wasn't declared
848 * e.g. when the object is created and then destroyed
849 * in the same transaction - we don't need additional
850 * space for destroy specifically */
851 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
852 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
854 CERROR("%s: failed to free %s %llu: rc = %d\n",
855 osd->od_svname, buf, oid, rc);
856 } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
857 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
859 CERROR("%s: failed to free %s %llu: rc = %d\n",
860 osd->od_svname, buf, oid, rc);
861 } else { /* asynchronous destroy */
862 char *key = info->oti_key;
864 rc = osd_object_unlinked_add(obj, oh);
868 snprintf(key, sizeof(info->oti_key), "%llx", oid);
869 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
870 osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
872 CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
873 osd->od_svname, buf, oid, rc);
876 /* Remove the OI mapping after the destroy to handle the race with
877 * OI scrub that may insert missed OI mapping during the interval. */
878 rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
879 if (unlikely(rc == -ENOENT))
882 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
883 osd->od_svname, buf, rc);
888 /* not needed in the cache anymore */
889 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
891 obj->oo_destroyed = 1;
892 up_write(&obj->oo_guard);
896 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
898 struct osd_object *obj = osd_obj(l);
899 const struct lu_fid *fid = lu_object_fid(l);
902 if (likely(!fid_is_acct(fid))) {
903 osd_object_sa_fini(obj);
904 if (obj->oo_sa_xattr) {
905 nvlist_free(obj->oo_sa_xattr);
906 obj->oo_sa_xattr = NULL;
908 osd_dnode_rele(obj->oo_dn);
909 list_del(&obj->oo_sa_linkage);
916 * Concurrency: ->loo_object_release() is called under site spin-lock.
918 static void osd_object_release(const struct lu_env *env,
924 * Concurrency: shouldn't matter.
926 static int osd_object_print(const struct lu_env *env, void *cookie,
927 lu_printer_t p, const struct lu_object *l)
929 struct osd_object *o = osd_obj(l);
931 return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
934 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
937 struct osd_object *obj = osd_dt_obj(dt);
939 LASSERT(osd_invariant(obj));
941 down_read_nested(&obj->oo_sem, role);
944 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
947 struct osd_object *obj = osd_dt_obj(dt);
949 LASSERT(osd_invariant(obj));
951 down_write_nested(&obj->oo_sem, role);
954 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
956 struct osd_object *obj = osd_dt_obj(dt);
958 LASSERT(osd_invariant(obj));
959 up_read(&obj->oo_sem);
962 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
964 struct osd_object *obj = osd_dt_obj(dt);
966 LASSERT(osd_invariant(obj));
967 up_write(&obj->oo_sem);
970 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
972 struct osd_object *obj = osd_dt_obj(dt);
975 LASSERT(osd_invariant(obj));
977 if (down_write_trylock(&obj->oo_sem)) {
979 up_write(&obj->oo_sem);
984 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
985 struct lu_attr *attr)
987 struct osd_object *obj = osd_dt_obj(dt);
988 struct osd_device *osd = osd_obj2dev(obj);
993 down_read(&obj->oo_guard);
995 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
996 GOTO(out, rc = -ENOENT);
998 if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
1001 LASSERT(osd_invariant(obj));
1002 LASSERT(obj->oo_dn);
1004 read_lock(&obj->oo_attr_lock);
1005 *attr = obj->oo_attr;
1006 if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
1007 attr->la_valid |= LA_FLAGS;
1008 attr->la_flags |= LUSTRE_ORPHAN_FL;
1010 if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
1011 attr->la_valid |= LA_FLAGS;
1012 attr->la_flags |= LUSTRE_ENCRYPT_FL;
1014 read_unlock(&obj->oo_attr_lock);
1015 if (attr->la_valid & LA_FLAGS && attr->la_flags & LUSTRE_ORPHAN_FL)
1016 CDEBUG(D_INFO, "%s: set orphan flag on "DFID" (%llx/%x)\n",
1017 osd_obj2dev(obj)->od_svname,
1018 PFID(lu_object_fid(&dt->do_lu)),
1019 attr->la_valid, obj->oo_lma_flags);
1021 /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
1022 * from within sa_object_size() can block on a mutex, so
1023 * we can't call sa_object_size() holding rwlock */
1024 sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
1025 /* we do not control size of indices, so always calculate
1026 * it from number of blocks reported by DMU */
1027 if (S_ISDIR(attr->la_mode)) {
1028 attr->la_size = 512 * blocks;
1029 rc = -zap_count(osd->od_os, obj->oo_dn->dn_object,
1030 &attr->la_dirent_count);
1032 /* Block size may be not set; suggest maximal I/O transfers. */
1034 blksize = osd_spa_maxblocksize(
1035 dmu_objset_spa(osd_obj2dev(obj)->od_os));
1037 attr->la_blksize = blksize;
1038 attr->la_blocks = blocks;
1039 attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
1042 up_read(&obj->oo_guard);
1046 /* Simple wrapper on top of qsd API which implement quota transfer for osd
1047 * setattr needs. As a reminder, only the root user can change ownership of
1048 * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
1049 static inline int qsd_transfer(const struct lu_env *env,
1050 struct qsd_instance *qsd,
1051 struct lquota_trans *trans, int qtype,
1052 __u64 orig_id, __u64 new_id, __u64 bspace,
1053 struct lquota_id_info *qi, bool ignore_edquot)
1057 if (unlikely(qsd == NULL))
1060 LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
1061 qi->lqi_type = qtype;
1063 /* inode accounting */
1064 qi->lqi_is_blk = false;
1066 /* one more inode for the new owner ... */
1067 qi->lqi_id.qid_uid = new_id;
1069 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1070 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1075 /* and one less inode for the current id */
1076 qi->lqi_id.qid_uid = orig_id;;
1078 /* can't get EDQUOT when reducing usage */
1079 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1080 if (rc == -EINPROGRESS)
1085 /* block accounting */
1086 qi->lqi_is_blk = true;
1088 /* more blocks for the new owner ... */
1089 qi->lqi_id.qid_uid = new_id;
1090 qi->lqi_space = bspace;
1091 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1092 if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1097 /* and finally less blocks for the current owner */
1098 qi->lqi_id.qid_uid = orig_id;
1099 qi->lqi_space = -bspace;
1100 rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1101 /* can't get EDQUOT when reducing usage */
1102 if (rc == -EINPROGRESS)
1107 static int osd_declare_attr_set(const struct lu_env *env,
1108 struct dt_object *dt,
1109 const struct lu_attr *attr,
1110 struct thandle *handle)
1112 struct osd_thread_info *info = osd_oti_get(env);
1113 struct osd_object *obj = osd_dt_obj(dt);
1114 struct osd_device *osd = osd_obj2dev(obj);
1116 struct osd_thandle *oh;
1124 LASSERT(handle != NULL);
1125 LASSERT(osd_invariant(obj));
1127 oh = container_of(handle, struct osd_thandle, ot_super);
1129 down_read(&obj->oo_guard);
1130 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1133 LASSERT(obj->oo_sa_hdl != NULL);
1134 LASSERT(oh->ot_tx != NULL);
1135 /* regular attributes are part of the bonus buffer */
1136 /* let's check whether this object is already part of
1139 for (txh = list_head(&oh->ot_tx->tx_holds); txh;
1140 txh = list_next(&oh->ot_tx->tx_holds, txh)) {
1141 if (txh->txh_dnode == NULL)
1143 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
1145 /* this object is part of the transaction already
1146 * we don't need to declare bonus again */
1151 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
1152 if (oh->ot_tx->tx_err != 0)
1153 GOTO(out, rc = -oh->ot_tx->tx_err);
1155 if (attr && attr->la_valid & LA_FLAGS) {
1156 /* LMA is usually a part of bonus, no need to declare
1160 if (attr && (attr->la_valid & (LA_UID | LA_GID | LA_PROJID))) {
1161 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
1162 bspace = toqb(bspace * 512);
1165 "%s: enforce quota on UID %u, GID %u, the quota space is %lld (%u)\n",
1167 attr->la_uid, attr->la_gid, bspace, blksize);
1170 if (attr && attr->la_valid & LA_UID) {
1171 /* quota enforcement for user */
1172 if (attr->la_uid != obj->oo_attr.la_uid) {
1173 rc = qsd_transfer(env, osd_def_qsd(osd),
1174 &oh->ot_quota_trans, USRQUOTA,
1175 obj->oo_attr.la_uid, attr->la_uid,
1176 bspace, &info->oti_qi, true);
1181 if (attr && attr->la_valid & LA_GID) {
1182 /* quota enforcement for group */
1183 if (attr->la_gid != obj->oo_attr.la_gid) {
1184 rc = qsd_transfer(env, osd_def_qsd(osd),
1185 &oh->ot_quota_trans, GRPQUOTA,
1186 obj->oo_attr.la_gid, attr->la_gid,
1187 bspace, &info->oti_qi,
1189 LUSTRE_SET_SYNC_FL));
1194 #ifdef ZFS_PROJINHERIT
1195 if (attr && attr->la_valid & LA_PROJID) {
1196 /* quota enforcement for project */
1197 if (attr->la_projid != obj->oo_attr.la_projid) {
1198 if (!osd->od_projectused_dn)
1199 GOTO(out, rc = -EOPNOTSUPP);
1201 /* Usually, if project quota is upgradable for the
1202 * device, then the upgrade will be done before or when
1203 * mount the device. So when we come here, this project
1204 * should have project ID attribute already (that is
1205 * zero by default). Otherwise, there was something
1206 * wrong during the former upgrade, let's return failure
1209 * Please note that, different from other attributes,
1210 * you can NOT simply set the project ID attribute under
1211 * such case, because adding (NOT change) project ID
1212 * attribute needs to change the object's attribute
1213 * layout to match zfs backend quota accounting
1215 if (unlikely(!obj->oo_with_projid))
1216 GOTO(out, rc = -ENXIO);
1218 rc = qsd_transfer(env, osd_def_qsd(osd),
1219 &oh->ot_quota_trans, PRJQUOTA,
1220 obj->oo_attr.la_projid,
1221 attr->la_projid, bspace,
1222 &info->oti_qi, true);
1229 up_read(&obj->oo_guard);
1234 * Set the attributes of an object
1236 * The transaction passed to this routine must have
1237 * dmu_tx_hold_bonus(tx, oid) called and then assigned
1238 * to a transaction group.
1240 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
1241 const struct lu_attr *la, struct thandle *handle)
1243 struct osd_thread_info *info = osd_oti_get(env);
1244 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1245 struct osd_object *obj = osd_dt_obj(dt);
1246 struct osd_device *osd = osd_obj2dev(obj);
1247 struct osd_thandle *oh;
1248 struct osa_attr *osa = &info->oti_osa;
1249 __u64 valid = la->la_valid;
1255 down_read(&obj->oo_guard);
1256 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1257 GOTO(out, rc = -ENOENT);
1259 LASSERT(handle != NULL);
1260 LASSERT(osd_invariant(obj));
1261 LASSERT(obj->oo_sa_hdl);
1263 oh = container_of(handle, struct osd_thandle, ot_super);
1264 /* Assert that the transaction has been assigned to a
1265 transaction group. */
1266 LASSERT(oh->ot_tx->tx_txg != 0);
1268 if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) && !osd->od_is_ost) {
1269 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1270 char *buf = info->oti_str;
1271 dnode_t *zdn = NULL;
1274 zapid = osd_get_name_n_idx(env, osd, lu_object_fid(&dt->do_lu),
1275 buf, sizeof(info->oti_str), &zdn);
1276 rc = osd_zap_lookup(osd, zapid, zdn, buf, 8,
1277 sizeof(*zde) / 8, zde);
1279 zde->zde_dnode -= 1;
1280 rc = -zap_update(osd->od_os, zapid, buf, 8,
1281 sizeof(*zde) / 8, zde, oh->ot_tx);
1288 /* Only allow set size for regular file */
1289 if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
1290 valid &= ~(LA_SIZE | LA_BLOCKS);
1292 if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
1295 if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
1298 if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
1304 if (valid & LA_FLAGS) {
1305 struct lustre_mdt_attrs *lma;
1309 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
1310 LASSERT(!obj->oo_pfid_in_lma);
1311 BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
1312 lma = (struct lustre_mdt_attrs *)&info->oti_buf;
1314 buf.lb_len = sizeof(info->oti_buf);
1316 /* Please do NOT call osd_xattr_get() directly, that
1317 * will cause recursive down_read() on oo_guard. */
1318 rc = osd_xattr_get_internal(env, obj, &buf,
1319 XATTR_NAME_LMA, &size);
1320 if (!rc && unlikely(size < sizeof(*lma))) {
1324 le32_to_cpu(lma->lma_incompat);
1325 lma->lma_incompat |=
1326 lustre_to_lma_flags(la->la_flags);
1328 cpu_to_le32(lma->lma_incompat);
1330 buf.lb_len = sizeof(*lma);
1331 rc = osd_xattr_set_internal(env, obj, &buf,
1337 CWARN("%s: failed to set LMA flags: rc = %d\n",
1338 osd->od_svname, rc);
1342 la->la_flags & LUSTRE_LMA_FL_MASKS;
1347 write_lock(&obj->oo_attr_lock);
1350 if (valid & LA_PROJID) {
1351 #ifdef ZFS_PROJINHERIT
1352 if (osd->od_projectused_dn) {
1353 LASSERT(obj->oo_with_projid);
1355 osa->projid = obj->oo_attr.la_projid = la->la_projid;
1356 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1360 valid &= ~LA_PROJID;
1363 if (valid & LA_ATIME) {
1364 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1365 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1368 if (valid & LA_MTIME) {
1369 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1370 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1373 if (valid & LA_CTIME) {
1374 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1375 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1378 if (valid & LA_MODE) {
1379 /* mode is stored along with type, so read it first */
1380 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1381 (la->la_mode & ~S_IFMT);
1382 osa->mode = obj->oo_attr.la_mode;
1383 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1386 if (valid & LA_SIZE) {
1387 osa->size = obj->oo_attr.la_size = la->la_size;
1388 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1391 if (valid & LA_NLINK) {
1392 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1393 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1396 if (valid & LA_RDEV) {
1397 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1398 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1401 if (valid & LA_FLAGS) {
1402 osa->flags = attrs_fs2zfs(la->la_flags);
1403 /* many flags are not supported by zfs, so ensure a good cached
1405 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1406 #ifdef ZFS_PROJINHERIT
1407 if (obj->oo_with_projid && osd->od_projectused_dn)
1408 osa->flags |= ZFS_PROJID;
1410 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1413 if (valid & LA_UID) {
1414 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1415 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1418 if (valid & LA_GID) {
1419 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1420 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1423 obj->oo_attr.la_valid |= valid;
1424 write_unlock(&obj->oo_attr_lock);
1426 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1427 rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1430 up_read(&obj->oo_guard);
1437 * XXX temporary solution.
1440 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1441 struct dt_object *parent, struct dt_object *child,
1446 ah->dah_parent = parent;
1447 ah->dah_mode = child_mode;
1449 if (parent != NULL && !dt_object_remote(parent)) {
1450 /* will help to find FID->ino at dt_insert("..") */
1451 struct osd_object *pobj = osd_dt_obj(parent);
1453 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1457 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1458 struct lu_attr *attr,
1459 struct dt_allocation_hint *hint,
1460 struct dt_object_format *dof,
1461 struct thandle *handle)
1463 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1464 struct osd_object *obj = osd_dt_obj(dt);
1465 struct osd_device *osd = osd_obj2dev(obj);
1466 struct osd_thandle *oh;
1474 switch (dof->dof_type) {
1478 if (obj->oo_dt.do_body_ops == NULL)
1479 obj->oo_dt.do_body_ops = &osd_body_ops;
1485 LASSERT(handle != NULL);
1486 oh = container_of(handle, struct osd_thandle, ot_super);
1487 LASSERT(oh->ot_tx != NULL);
1489 /* this is the minimum set of EAs on every Lustre object */
1490 obj->oo_ea_in_bonus = OSD_BASE_EA_IN_BONUS;
1491 /* reserve 32 bytes for extra stuff like ACLs */
1492 dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1494 switch (dof->dof_type) {
1496 dt->do_index_ops = &osd_dir_ops;
1499 /* for zap create */
1500 dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1501 dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1506 /* first, we'll create new object */
1507 dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1515 /* and we'll add it to some mapping */
1516 zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1517 osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1519 /* will help to find FID->ino mapping at dt_insert() */
1520 osd_idc_find_and_init(env, osd, obj);
1522 rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid,
1523 attr->la_projid, 1, oh, NULL, OSD_QID_INODE);
1528 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1529 struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
1530 struct lu_attr *la, uint64_t parent,
1533 sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1534 struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1536 inode_timespec_t now;
1539 char *dxattr = NULL;
1545 gen = dmu_tx_get_txg(tx);
1547 ZFS_TIME_ENCODE(&now, osa->btime);
1549 osa->atime[0] = la->la_atime;
1550 osa->ctime[0] = la->la_ctime;
1551 osa->mtime[0] = la->la_mtime;
1552 osa->mode = la->la_mode;
1553 osa->uid = la->la_uid;
1554 osa->gid = la->la_gid;
1555 osa->rdev = la->la_rdev;
1556 osa->nlink = la->la_nlink;
1557 if (la->la_valid & LA_FLAGS)
1558 osa->flags = attrs_fs2zfs(la->la_flags);
1561 osa->size = la->la_size;
1562 #ifdef ZFS_PROJINHERIT
1563 if (osd->od_projectused_dn) {
1564 if (la->la_valid & LA_PROJID)
1565 osa->projid = la->la_projid;
1567 osa->projid = ZFS_DEFAULT_PROJID;
1568 osa->flags |= ZFS_PROJID;
1570 obj->oo_with_projid = 1;
1572 osa->flags &= ~ZFS_PROJID;
1577 * we need to create all SA below upon object create.
1579 * XXX The attribute order matters since the accounting callback relies
1580 * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1581 * look up the UID/GID/PROJID attributes. Moreover, the callback does
1582 * not seem to support the spill block.
1583 * We define attributes in the same order as SA_*_OFFSET in order to
1584 * work around the problem. See ORI-610.
1587 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1588 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1589 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1590 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1591 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1592 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1593 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1594 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1595 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1596 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1597 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, osa->btime, 16);
1598 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1599 #ifdef ZFS_PROJINHERIT
1600 if (osd->od_projectused_dn)
1601 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1604 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1605 LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1608 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1611 dxattr = osd_zio_buf_alloc(sa_size);
1614 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1615 NV_ENCODE_XDR, KM_SLEEP);
1618 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1619 NULL, dxattr, sa_size);
1622 rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1624 osd_zio_buf_free(dxattr, sa_size);
1629 int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1630 uint64_t oid, dnode_t **dnp)
1635 /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1636 for (txh = list_tail(&tx->tx_holds); txh;
1637 txh = list_prev(&tx->tx_holds, txh)) {
1638 dnode_t *dn = txh->txh_dnode;
1643 if (dn->dn_object != oid)
1647 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1648 if (dn->dn_bonus == NULL)
1649 dbuf_create_bonus(dn);
1650 rw_exit(&dn->dn_struct_rwlock);
1654 LASSERT(dn->dn_handle);
1656 if (zfs_refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1657 zfs_refcount_add(&dn->dn_holds, osd_obj_tag);
1658 atomic_inc_32(&dn->dn_dbufs_count);
1662 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1666 if (unlikely(*dnp == NULL))
1667 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1672 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1673 int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
1677 if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1678 dnsize = DNODE_MIN_SIZE;
1680 if (DN_BONUS_SIZE(dnsize) >= ea_in_bonus + 32)
1683 } while (dnsize < DNODE_MAX_SIZE);
1684 if (dnsize > DNODE_MAX_SIZE)
1685 dnsize = DNODE_MAX_SIZE;
1686 } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1688 } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1690 } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1692 } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1694 } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1697 dnsize = DNODE_MIN_SIZE;
1704 * The transaction passed to this routine must have
1705 * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1706 * to a transaction group.
1708 int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
1709 struct osd_object *obj, const struct lu_fid *fid,
1710 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1712 dmu_object_type_t type = DMU_OT_PLAIN_FILE_CONTENTS;
1716 /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1717 * would get an additional ditto copy */
1718 if (unlikely(S_ISREG(la->la_mode) &&
1719 fid_seq_is_local_file(fid_seq(fid))))
1720 type = DMU_OTN_UINT8_METADATA;
1722 /* Create a new DMU object using the default dnode size. */
1724 size = obj->oo_ea_in_bonus;
1726 size = OSD_BASE_EA_IN_BONUS;
1727 oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1728 osd_find_dnsize(osd, size), tx);
1730 LASSERT(la->la_valid & LA_MODE);
1734 return osd_find_new_dnode(env, tx, oid, dnp);
1738 * The transaction passed to this routine must have
1739 * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1740 * to a transaction group.
1742 * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1743 * This is fine for directories today, because storing the FID in the dirent
1744 * will also require a FAT ZAP. If there is a new type of micro ZAP created
1745 * then we might need to re-evaluate the use of this flag and instead do
1746 * a conversion from the different internal ZAP hash formats being used. */
1747 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1748 dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1749 unsigned dnsize, zap_flags_t flags)
1753 /* Assert that the transaction has been assigned to a
1754 transaction group. */
1755 LASSERT(tx->tx_txg != 0);
1758 oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1759 DMU_OT_DIRECTORY_CONTENTS,
1760 14, /* == ZFS fzap_default_blockshift */
1761 DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1767 return osd_find_new_dnode(env, tx, oid, dnp);
1770 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1771 struct lu_attr *la, struct osd_thandle *oh)
1773 struct osd_device *osd = osd_obj2dev(obj);
1777 /* Index file should be created as regular file in order not to confuse
1778 * ZPL which could interpret them as directory.
1779 * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1781 LASSERT(S_ISREG(la->la_mode));
1782 rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1783 osd_find_dnsize(osd, obj->oo_ea_in_bonus), ZAP_FLAG_UINT64_KEY);
1789 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1790 struct lu_attr *la, struct osd_thandle *oh)
1792 struct osd_device *osd = osd_obj2dev(obj);
1796 LASSERT(S_ISDIR(la->la_mode));
1797 rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1798 osd_find_dnsize(osd, obj->oo_ea_in_bonus), 0);
1804 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1805 struct lu_attr *la, struct osd_thandle *oh)
1807 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1808 struct osd_device *osd = osd_obj2dev(obj);
1812 LASSERT(S_ISREG(la->la_mode));
1813 rc = __osd_object_create(env, osd, obj, fid, &dn, oh->ot_tx, la);
1817 if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid))) {
1818 /* The minimum block size must be at least page size otherwise
1819 * it will break the assumption in tgt_thread_big_cache where
1820 * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1821 * RDMA due to subpage transfer size */
1822 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1823 PAGE_SIZE, 0, oh->ot_tx);
1825 CERROR("%s: can't change blocksize: %d\n",
1826 osd->od_svname, rc);
1829 } else if ((fid_is_llog(fid))) {
1830 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1831 LLOG_MIN_CHUNK_SIZE, 0, oh->ot_tx);
1833 CERROR("%s: can't change blocksize: %d\n",
1834 osd->od_svname, rc);
1842 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1843 struct lu_attr *la, struct osd_thandle *oh)
1848 LASSERT(S_ISLNK(la->la_mode));
1849 rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1850 lu_object_fid(&obj->oo_dt.do_lu),
1851 &dn, oh->ot_tx, la);
1857 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1858 struct lu_attr *la, struct osd_thandle *oh)
1863 if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1864 la->la_valid |= LA_RDEV;
1866 rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1867 lu_object_fid(&obj->oo_dt.do_lu),
1868 &dn, oh->ot_tx, la);
1874 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1875 struct osd_object *obj,
1877 struct osd_thandle *oh);
1879 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1881 osd_obj_type_f result;
1907 * Concurrency: @dt is write locked.
1909 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1910 struct lu_attr *attr, struct dt_allocation_hint *hint,
1911 struct dt_object_format *dof, struct thandle *th)
1913 struct osd_thread_info *info = osd_oti_get(env);
1914 struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1915 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1916 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1917 struct osd_object *obj = osd_dt_obj(dt);
1918 struct osd_device *osd = osd_obj2dev(obj);
1919 char *buf = info->oti_str;
1920 struct osd_thandle *oh;
1921 dnode_t *dn = NULL, *zdn = NULL;
1922 uint64_t zapid, parent = 0;
1928 LASSERT(!fid_is_acct(fid));
1930 /* concurrent create declarations should not see
1931 * the object inconsistent (db, attr, etc).
1932 * in regular cases acquisition should be cheap */
1933 down_write(&obj->oo_guard);
1935 if (unlikely(dt_object_exists(dt)))
1936 GOTO(out, rc = -EEXIST);
1938 LASSERT(osd_invariant(obj));
1939 LASSERT(dof != NULL);
1941 LASSERT(th != NULL);
1942 oh = container_of(th, struct osd_thandle, ot_super);
1944 LASSERT(obj->oo_dn == NULL);
1946 /* to follow ZFS on-disk format we need
1947 * to initialize parent dnode properly */
1948 if (hint != NULL && hint->dah_parent != NULL &&
1949 !dt_object_remote(hint->dah_parent))
1950 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1952 /* we may fix some attributes, better do not change the source */
1953 obj->oo_attr = *attr;
1954 obj->oo_attr.la_size = 0;
1955 obj->oo_attr.la_nlink = 0;
1956 obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1958 #ifdef ZFS_PROJINHERIT
1959 if (osd->od_projectused_dn) {
1960 if (!(obj->oo_attr.la_valid & LA_PROJID))
1961 obj->oo_attr.la_projid = ZFS_DEFAULT_PROJID;
1962 obj->oo_with_projid = 1;
1966 dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1974 zde->zde_dnode = dn->dn_object;
1975 zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1977 zapid = osd_get_name_n_idx(env, osd, fid, buf,
1978 sizeof(info->oti_str), &zdn);
1979 if (CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY) ||
1980 (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_NO_ENTRY)))
1983 if (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
1986 rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
1992 /* Now add in all of the "SA" attributes */
1993 rc = osd_sa_handle_get(obj);
1997 rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
2001 /* initialize LMA */
2002 if (fid_is_idif(fid) || (fid_is_norm(fid) && osd->od_is_ost))
2003 compat |= LMAC_FID_ON_OST;
2004 lustre_lma_init(lma, fid, compat, 0);
2005 lustre_lma_swab(lma);
2006 rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
2007 (uchar_t *)lma, sizeof(*lma));
2011 /* configure new osd object */
2012 obj->oo_parent = parent != 0 ? parent : zapid;
2013 obj->oo_late_attr_set = 1;
2014 rc = __osd_sa_xattr_schedule_update(env, obj, oh);
2018 /* XXX: oo_lma_flags */
2019 obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
2020 if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
2021 /* no body operations for accounting objects */
2022 obj->oo_dt.do_body_ops = &osd_body_ops;
2024 osd_idc_find_and_init(env, osd, obj);
2027 if (unlikely(rc && dn)) {
2028 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
2032 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
2034 up_write(&obj->oo_guard);
2038 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
2041 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2042 return osd_declare_attr_set(env, dt, NULL, th);
2046 * Concurrency: @dt is write locked.
2048 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
2049 struct thandle *handle)
2051 struct osd_object *obj = osd_dt_obj(dt);
2052 struct osd_thandle *oh;
2053 struct osd_device *osd = osd_obj2dev(obj);
2059 down_read(&obj->oo_guard);
2060 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2061 GOTO(out, rc = -ENOENT);
2063 LASSERT(osd_invariant(obj));
2064 LASSERT(obj->oo_sa_hdl != NULL);
2066 oh = container_of(handle, struct osd_thandle, ot_super);
2068 write_lock(&obj->oo_attr_lock);
2069 nlink = ++obj->oo_attr.la_nlink;
2070 write_unlock(&obj->oo_attr_lock);
2072 rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2075 up_read(&obj->oo_guard);
2079 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
2080 struct thandle *handle)
2082 osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2083 return osd_declare_attr_set(env, dt, NULL, handle);
2087 * Concurrency: @dt is write locked.
2089 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
2090 struct thandle *handle)
2092 struct osd_object *obj = osd_dt_obj(dt);
2093 struct osd_thandle *oh;
2094 struct osd_device *osd = osd_obj2dev(obj);
2100 down_read(&obj->oo_guard);
2102 if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2103 GOTO(out, rc = -ENOENT);
2105 LASSERT(osd_invariant(obj));
2106 LASSERT(obj->oo_sa_hdl != NULL);
2108 oh = container_of(handle, struct osd_thandle, ot_super);
2109 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2111 write_lock(&obj->oo_attr_lock);
2112 nlink = --obj->oo_attr.la_nlink;
2113 write_unlock(&obj->oo_attr_lock);
2115 rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2118 up_read(&obj->oo_guard);
2122 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
2123 __u64 start, __u64 end)
2125 struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2129 if (osd->od_dt_dev.dd_rdonly)
2132 txg = osd_db_dirty_txg(osd_dt_obj(dt)->oo_dn->dn_dbuf);
2134 /* the object is dirty or being synced */
2135 if (osd_object_sync_delay_us < 0)
2136 txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
2138 udelay(osd_object_sync_delay_us);
2144 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
2149 static struct dt_object_operations osd_obj_ops = {
2150 .do_read_lock = osd_read_lock,
2151 .do_write_lock = osd_write_lock,
2152 .do_read_unlock = osd_read_unlock,
2153 .do_write_unlock = osd_write_unlock,
2154 .do_write_locked = osd_write_locked,
2155 .do_attr_get = osd_attr_get,
2156 .do_declare_attr_set = osd_declare_attr_set,
2157 .do_attr_set = osd_attr_set,
2158 .do_ah_init = osd_ah_init,
2159 .do_declare_create = osd_declare_create,
2160 .do_create = osd_create,
2161 .do_declare_destroy = osd_declare_destroy,
2162 .do_destroy = osd_destroy,
2163 .do_index_try = osd_index_try,
2164 .do_declare_ref_add = osd_declare_ref_add,
2165 .do_ref_add = osd_ref_add,
2166 .do_declare_ref_del = osd_declare_ref_del,
2167 .do_ref_del = osd_ref_del,
2168 .do_xattr_get = osd_xattr_get,
2169 .do_declare_xattr_set = osd_declare_xattr_set,
2170 .do_xattr_set = osd_xattr_set,
2171 .do_declare_xattr_del = osd_declare_xattr_del,
2172 .do_xattr_del = osd_xattr_del,
2173 .do_xattr_list = osd_xattr_list,
2174 .do_object_sync = osd_object_sync,
2175 .do_invalidate = osd_invalidate,
2178 static struct lu_object_operations osd_lu_obj_ops = {
2179 .loo_object_init = osd_object_init,
2180 .loo_object_delete = osd_object_delete,
2181 .loo_object_release = osd_object_release,
2182 .loo_object_free = osd_object_free,
2183 .loo_object_print = osd_object_print,
2184 .loo_object_invariant = osd_object_invariant,
2187 static int osd_otable_it_attr_get(const struct lu_env *env,
2188 struct dt_object *dt,
2189 struct lu_attr *attr)
2195 static struct dt_object_operations osd_obj_otable_it_ops = {
2196 .do_attr_get = osd_otable_it_attr_get,
2197 .do_index_try = osd_index_try,
2200 module_param(osd_object_sync_delay_us, int, 0644);
2201 MODULE_PARM_DESC(osd_object_sync_delay_us,
2202 "If zero or larger delay N usec instead of doing object sync");