Whamcloud - gitweb
e34ba947f55e7b1a6ed5737b7b66a52eeef6bc19
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd-zfs/osd_object.c
33  *
34  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35  * Author: Mike Pershin <tappro@whamcloud.com>
36  * Author: Johann Lombardi <johann@whamcloud.com>
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
44 #include <obd.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
48
49 #include "osd_internal.h"
50
51 #include <sys/dnode.h>
52 #include <sys/dbuf.h>
53 #include <sys/spa.h>
54 #include <sys/stat.h>
55 #include <sys/zap.h>
56 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/dmu_tx.h>
59 #include <sys/dmu_objset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/sa_impl.h>
62 #include <sys/txg.h>
63
64 char *osd_obj_tag = "osd_object";
65 static int osd_object_sync_delay_us = -1;
66
67 static struct dt_object_operations osd_obj_ops;
68 static struct lu_object_operations osd_lu_obj_ops;
69 static struct dt_object_operations osd_obj_otable_it_ops;
70
71 static void
72 osd_object_sa_fini(struct osd_object *obj)
73 {
74         if (obj->oo_sa_hdl) {
75                 sa_handle_destroy(obj->oo_sa_hdl);
76                 obj->oo_sa_hdl = NULL;
77         }
78 }
79
80 static int
81 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
82 {
83         int rc;
84
85         LASSERT(obj->oo_sa_hdl == NULL);
86         LASSERT(obj->oo_dn != NULL);
87
88         rc = osd_sa_handle_get(obj);
89         if (rc)
90                 return rc;
91
92         /* Cache the xattr object id, valid for the life of the object */
93         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
94         if (rc == -ENOENT) {
95                 obj->oo_xattr = ZFS_NO_OBJECT;
96                 rc = 0;
97         } else if (rc) {
98                 osd_object_sa_fini(obj);
99         }
100
101         return rc;
102 }
103
104 /*
105  * Add object to list of dirty objects in tx handle.
106  */
107 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
108 {
109         if (!list_empty(&obj->oo_sa_linkage))
110                 return;
111
112         write_lock(&obj->oo_attr_lock);
113         if (likely(list_empty(&obj->oo_sa_linkage)))
114                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
115         write_unlock(&obj->oo_attr_lock);
116 }
117
118 /*
119  * Release spill block dbuf hold for all dirty SAs.
120  */
121 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
122 {
123         struct osd_object *obj;
124
125         while (!list_empty(&oh->ot_sa_list)) {
126                 obj = list_entry(oh->ot_sa_list.next,
127                                  struct osd_object, oo_sa_linkage);
128                 write_lock(&obj->oo_attr_lock);
129                 list_del_init(&obj->oo_sa_linkage);
130                 write_unlock(&obj->oo_attr_lock);
131                 if (obj->oo_late_xattr) {
132                         /*
133                          * take oo_guard to protect oo_sa_xattr buffer
134                          * from concurrent update by osd_xattr_set()
135                          */
136                         LASSERT(oh->ot_assigned != 0);
137                         down_write(&obj->oo_guard);
138                         if (obj->oo_late_attr_set)
139                                 __osd_sa_attr_init(env, obj, oh);
140                         else if (obj->oo_late_xattr)
141                                 __osd_sa_xattr_update(env, obj, oh);
142                         up_write(&obj->oo_guard);
143                 }
144                 sa_spill_rele(obj->oo_sa_hdl);
145         }
146 }
147
148 /*
149  * Update the SA and add the object to the dirty list.
150  */
151 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
152                          void *buf, uint32_t buflen, struct osd_thandle *oh)
153 {
154         int rc;
155
156         LASSERT(obj->oo_sa_hdl != NULL);
157         LASSERT(oh->ot_tx != NULL);
158
159         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
160         osd_object_sa_dirty_add(obj, oh);
161
162         return rc;
163 }
164
165 /*
166  * Bulk update the SA and add the object to the dirty list.
167  */
168 static int
169 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
170                           int count, struct osd_thandle *oh)
171 {
172         int rc;
173
174         LASSERT(obj->oo_sa_hdl != NULL);
175         LASSERT(oh->ot_tx != NULL);
176
177         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
178         osd_object_sa_dirty_add(obj, oh);
179
180         return rc;
181 }
182
183 /*
184  * Retrieve the attributes of a DMU object
185  */
186 static int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
187                                  struct osd_object *obj, struct lu_attr *la)
188 {
189         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
190         sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
191         struct lustre_mdt_attrs *lma;
192         struct lu_buf buf;
193         int cnt = 0;
194         int              rc;
195         ENTRY;
196
197         LASSERT(obj->oo_dn != NULL);
198
199         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_BTIME | LA_MODE |
200                         LA_TYPE | LA_SIZE | LA_UID | LA_GID | LA_FLAGS |
201                         LA_NLINK;
202
203         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
204         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
205         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
206         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(o), NULL, osa->btime, 16);
207         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
208         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
209         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
210         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
211         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
212         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
213         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
214
215         rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
216         if (rc)
217                 GOTO(out_sa, rc);
218
219 #ifdef ZFS_PROJINHERIT
220         if (o->od_projectused_dn && osa->flags & ZFS_PROJID) {
221                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(o),
222                                 &osa->projid, 8);
223                 if (rc)
224                         GOTO(out_sa, rc);
225
226                 la->la_projid = osa->projid;
227                 la->la_valid |= LA_PROJID;
228                 obj->oo_with_projid = 1;
229         } else {
230                 la->la_projid = ZFS_DEFAULT_PROJID;
231                 la->la_valid &= ~LA_PROJID;
232         }
233 #else
234         la->la_projid = 0;
235         la->la_valid &= ~LA_PROJID;
236 #endif
237
238         la->la_atime = osa->atime[0];
239         la->la_mtime = osa->mtime[0];
240         la->la_ctime = osa->ctime[0];
241         la->la_btime = osa->btime[0];
242         la->la_mode = osa->mode;
243         la->la_uid = osa->uid;
244         la->la_gid = osa->gid;
245         la->la_nlink = osa->nlink;
246         la->la_flags = attrs_zfs2fs(osa->flags);
247         la->la_size = osa->size;
248
249         /* Try to get extra flags from LMA */
250         lma = (struct lustre_mdt_attrs *)osd_oti_get(env)->oti_buf;
251         buf.lb_buf = lma;
252         buf.lb_len = sizeof(osd_oti_get(env)->oti_buf);
253         down_read(&obj->oo_guard);
254         rc = osd_xattr_get_lma(env, obj, &buf);
255         if (!rc) {
256                 lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
257                 obj->oo_lma_flags =
258                         lma_to_lustre_flags(lma->lma_incompat);
259         } else if (rc == -ENODATA ||
260                    !(S_ISDIR(la->la_mode) &&
261                      dt_object_exists(&obj->oo_dt))) {
262                 rc = 0;
263         }
264         up_read(&obj->oo_guard);
265
266         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
267                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
268                 if (rc)
269                         GOTO(out_sa, rc);
270                 la->la_rdev = osa->rdev;
271                 la->la_valid |= LA_RDEV;
272         }
273 out_sa:
274
275         RETURN(rc);
276 }
277
278 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
279 {
280         dmu_buf_t *db;
281         dmu_buf_impl_t *dbi;
282         int rc;
283
284         rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
285         if (rc)
286                 return rc;
287
288         dbi = (dmu_buf_impl_t *)db;
289         DB_DNODE_ENTER(dbi);
290         *dnp = DB_DNODE(dbi);
291         DB_DNODE_EXIT(dbi);
292         LASSERT(*dnp != NULL);
293
294         return 0;
295 }
296
297 /*
298  * Concurrency: no concurrent access is possible that early in object
299  * life-cycle.
300  */
301 struct lu_object *osd_object_alloc(const struct lu_env *env,
302                                    const struct lu_object_header *hdr,
303                                    struct lu_device *d)
304 {
305         struct osd_object *mo;
306
307         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
308         if (mo != NULL) {
309                 struct lu_object *l;
310                 struct lu_object_header *h;
311                 struct osd_device *o = osd_dev(d);
312
313                 l = &mo->oo_dt.do_lu;
314                 if (unlikely(o->od_in_init)) {
315                         OBD_ALLOC_PTR(h);
316                         if (!h) {
317                                 OBD_FREE_PTR(mo);
318                                 return NULL;
319                         }
320
321                         lu_object_header_init(h);
322                         lu_object_init(l, h, d);
323                         lu_object_add_top(h, l);
324                         mo->oo_header = h;
325                 } else {
326                         dt_object_init(&mo->oo_dt, NULL, d);
327                         mo->oo_header = NULL;
328                 }
329
330                 mo->oo_dt.do_ops = &osd_obj_ops;
331                 l->lo_ops = &osd_lu_obj_ops;
332                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
333                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
334                 init_rwsem(&mo->oo_sem);
335                 init_rwsem(&mo->oo_guard);
336                 rwlock_init(&mo->oo_attr_lock);
337                 mo->oo_destroy = OSD_DESTROY_NONE;
338                 return l;
339         } else {
340                 return NULL;
341         }
342 }
343
344 static void osd_obj_set_blksize(const struct lu_env *env,
345                                 struct osd_device *osd, struct osd_object *obj)
346 {
347         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
348         dmu_tx_t *tx;
349         dnode_t *dn = obj->oo_dn;
350         uint32_t blksz;
351         int rc = 0;
352         ENTRY;
353
354         LASSERT(!osd_oti_get(env)->oti_in_trans);
355
356         tx = dmu_tx_create(osd->od_os);
357         if (!tx) {
358                 CERROR("%s: fail to create tx to set blksize for "DFID"\n",
359                        osd->od_svname, PFID(fid));
360                 RETURN_EXIT;
361         }
362
363         dmu_tx_hold_bonus(tx, dn->dn_object);
364         rc = -dmu_tx_assign(tx, TXG_WAIT);
365         if (rc) {
366                 dmu_tx_abort(tx);
367                 CERROR("%s: fail to assign tx to set blksize for "DFID
368                        ": rc = %d\n", osd->od_svname, PFID(fid), rc);
369                 RETURN_EXIT;
370         }
371
372         down_write(&obj->oo_guard);
373         if (unlikely((1 << dn->dn_datablkshift) >= PAGE_SIZE))
374                 GOTO(out, rc = 1);
375
376         blksz = dn->dn_datablksz;
377         if (!is_power_of_2(blksz))
378                 blksz = size_roundup_power2(blksz);
379
380         if (blksz > osd->od_max_blksz)
381                 blksz = osd->od_max_blksz;
382         else if (blksz < PAGE_SIZE)
383                 blksz = PAGE_SIZE;
384         rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object, blksz, 0, tx);
385
386         GOTO(out, rc);
387
388 out:
389         up_write(&obj->oo_guard);
390         if (rc) {
391                 dmu_tx_abort(tx);
392                 if (unlikely(obj->oo_dn->dn_maxblkid > 0))
393                         rc = 1;
394                 if (rc < 0)
395                         CERROR("%s: fail to set blksize for "DFID": rc = %d\n",
396                                osd->od_svname, PFID(fid), rc);
397         } else {
398                 dmu_tx_commit(tx);
399                 CDEBUG(D_INODE, "%s: set blksize as %u for "DFID"\n",
400                        osd->od_svname, blksz, PFID(fid));
401         }
402 }
403
404 /*
405  * Concurrency: shouldn't matter.
406  */
407 static int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
408 {
409         struct osd_device       *osd = osd_obj2dev(obj);
410         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
411         int                      rc = 0;
412         ENTRY;
413
414         LASSERT(obj->oo_dn);
415
416         rc = osd_object_sa_init(obj, osd);
417         if (rc)
418                 RETURN(rc);
419
420         /* cache attrs in object */
421         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
422         if (rc)
423                 RETURN(rc);
424
425         if (likely(!fid_is_acct(fid))) {
426                 /* no body operations for accounting objects */
427                 obj->oo_dt.do_body_ops = &osd_body_ops;
428
429                 if (S_ISREG(obj->oo_attr.la_mode) &&
430                     obj->oo_dn->dn_maxblkid == 0 &&
431                     (1 << obj->oo_dn->dn_datablkshift) < PAGE_SIZE &&
432                     (fid_is_idif(fid) || fid_is_norm(fid) ||
433                      fid_is_echo(fid)) &&
434                     osd->od_is_ost && !osd->od_dt_dev.dd_rdonly)
435                         osd_obj_set_blksize(env, osd, obj);
436         }
437
438         /*
439          * initialize object before marking it existing
440          */
441         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
442
443         smp_mb();
444         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
445
446         RETURN(0);
447 }
448
449 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
450 {
451         struct osd_thread_info  *info = osd_oti_get(env);
452         struct lu_buf           buf;
453         int                     rc;
454         struct lustre_mdt_attrs *lma;
455         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
456         ENTRY;
457
458         BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
459         lma = (struct lustre_mdt_attrs *)info->oti_buf;
460         buf.lb_buf = lma;
461         buf.lb_len = sizeof(info->oti_buf);
462
463         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
464         if (rc > 0) {
465                 rc = 0;
466                 lustre_lma_swab(lma);
467                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
468                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
469                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
470                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
471                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
472                               PFID(rfid));
473                         rc = -EOPNOTSUPP;
474                 } else if (unlikely(!lu_fid_eq(rfid, &lma->lma_self_fid))) {
475                         CERROR("%s: FID-in-LMA "DFID" does not match the "
476                               "object self-fid "DFID"\n",
477                               osd_obj2dev(obj)->od_svname,
478                               PFID(&lma->lma_self_fid), PFID(rfid));
479                         rc = -EREMCHG;
480                 } else {
481                         struct osd_device *osd = osd_obj2dev(obj);
482
483                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
484                             osd->od_is_ost)
485                                 obj->oo_pfid_in_lma = 1;
486                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
487                             osd->od_remote_parent_dir != ZFS_NO_OBJECT)
488                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
489                 }
490         } else if (rc == -ENODATA) {
491                 /* haven't initialize LMA xattr */
492                 rc = 0;
493         }
494
495         RETURN(rc);
496 }
497
498 /**
499  * Helper function to retrieve DMU object id from fid for accounting object
500  */
501 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
502                                   const struct lu_fid *fid)
503 {
504         dnode_t *dn = NULL;
505
506         LASSERT(fid_is_acct(fid));
507
508         switch (fid_oid(fid)) {
509         case ACCT_USER_OID:
510                 dn = osd->od_userused_dn;
511                 break;
512         case ACCT_GROUP_OID:
513                 dn = osd->od_groupused_dn;
514                 break;
515 #ifdef ZFS_PROJINHERIT
516         case ACCT_PROJECT_OID:
517                 dn = osd->od_projectused_dn;
518                 break;
519 #endif
520         default:
521                 break;
522         }
523
524         return dn;
525 }
526
527 /*
528  * Concurrency: no concurrent access is possible that early in object
529  * life-cycle.
530  */
531 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
532                            const struct lu_object_conf *conf)
533 {
534         struct osd_object *obj = osd_obj(l);
535         struct osd_device *osd = osd_obj2dev(obj);
536         const struct lu_fid *fid = lu_object_fid(l);
537         struct lustre_scrub *scrub = &osd->od_scrub;
538         struct osd_thread_info *info = osd_oti_get(env);
539         struct luz_direntry *zde = &info->oti_zde;
540         struct osd_idmap_cache *idc;
541         char *name = info->oti_str;
542         uint64_t oid;
543         int rc = 0;
544         int rc1;
545         bool remote = false;
546         ENTRY;
547
548         LASSERT(osd_invariant(obj));
549
550         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
551                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
552                 l->lo_header->loh_attr |= LOHA_EXISTS;
553
554                 GOTO(out, rc = 0);
555         }
556
557         if (conf && conf->loc_flags & LOC_F_NEW)
558                 GOTO(out, rc = 0);
559
560         if (unlikely(fid_is_acct(fid))) {
561                 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
562                 if (obj->oo_dn) {
563                         obj->oo_dt.do_index_ops = &osd_acct_index_ops;
564                         l->lo_header->loh_attr |= LOHA_EXISTS;
565                 }
566
567                 GOTO(out, rc = 0);
568         }
569
570         idc = osd_idc_find(env, osd, fid);
571         if (idc && !idc->oic_remote && idc->oic_dnode != ZFS_NO_OBJECT) {
572                 oid = idc->oic_dnode;
573                 goto zget;
574         }
575
576         rc = -ENOENT;
577         if (!list_empty(&osd->od_scrub.os_inconsistent_items))
578                 rc = osd_oii_lookup(osd, fid, &oid);
579
580         if (rc)
581                 rc = osd_fid_lookup(env, osd, fid, &oid);
582
583         if (rc == -ENOENT) {
584                 if (likely(!(fid_is_norm(fid) || fid_is_igif(fid)) ||
585                            fid_is_on_ost(env, osd, fid) ||
586                            !zfs_test_bit(osd_oi_fid2idx(osd, fid),
587                                          scrub->os_file.sf_oi_bitmap)))
588                         GOTO(out, rc = 0);
589
590                 rc = -EREMCHG;
591                 goto trigger;
592         }
593
594         if (rc)
595                 GOTO(out, rc);
596
597 zget:
598         LASSERT(obj->oo_dn == NULL);
599
600         rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
601         /* EEXIST will be returned if object is being deleted in ZFS */
602         if (rc == -EEXIST)
603                 GOTO(out, rc = 0);
604
605         if (rc) {
606                 CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
607                        osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
608                 GOTO(out, rc);
609         }
610
611         rc = osd_object_init0(env, obj);
612         if (rc)
613                 GOTO(out, rc);
614
615         if (unlikely(obj->oo_header))
616                 GOTO(out, rc = 0);
617
618         rc = osd_check_lma(env, obj);
619         if ((!rc && !remote) || (rc != -EREMCHG))
620                 GOTO(out, rc);
621
622 trigger:
623         /* We still have chance to get the valid dnode: for the object that is
624          * referenced by remote name entry, the object on the local MDT will be
625          * linked under the dir /REMOTE_PARENT_DIR with its FID string as name.
626          *
627          * During the OI scrub, if we cannot find the OI mapping, we may still
628          * have change to map the FID to local OID via lookup the dir
629          * /REMOTE_PARENT_DIR. */
630         if (!remote && !fid_is_on_ost(env, osd, fid)) {
631                 osd_fid2str(name, fid, sizeof(info->oti_str));
632                 rc = osd_zap_lookup(osd, osd->od_remote_parent_dir,
633                                     NULL, name, 8, 3, (void *)zde);
634                 if (!rc) {
635                         oid = zde->lzd_reg.zde_dnode;
636                         osd_dnode_rele(obj->oo_dn);
637                         obj->oo_dn = NULL;
638                         remote = true;
639                         goto zget;
640                 }
641         }
642
643         /* The case someone triggered the OI scrub already. */
644         if (scrub->os_running) {
645                 if (!rc) {
646                         LASSERT(remote);
647
648                         lu_object_set_agent_entry(l);
649                         osd_oii_insert(env, osd, fid, oid, false);
650                 } else {
651                         rc = -EINPROGRESS;
652                 }
653
654                 GOTO(out, rc);
655         }
656
657         /* The case NOT allow to trigger OI scrub automatically. */
658         if (osd->od_auto_scrub_interval == AS_NEVER)
659                 GOTO(out, rc);
660
661         /* It is me to trigger the OI scrub. */
662         rc1 = osd_scrub_start(env, osd, SS_CLEAR_DRYRUN |
663                               SS_CLEAR_FAILOUT | SS_AUTO_FULL);
664         LCONSOLE_WARN("%s: trigger OI scrub by RPC for the "DFID": rc = %d\n",
665                       osd_name(osd), PFID(fid), rc1);
666         if (!rc) {
667                 LASSERT(remote);
668
669                 lu_object_set_agent_entry(l);
670                 if (!rc1)
671                         osd_oii_insert(env, osd, fid, oid, false);
672         } else {
673                 if (!rc1)
674                         rc = -EINPROGRESS;
675                 else
676                         rc = -EREMCHG;
677         }
678
679         GOTO(out, rc);
680
681 out:
682         RETURN(rc);
683 }
684
685 /*
686  * Concurrency: no concurrent access is possible that late in object
687  * life-cycle.
688  */
689 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
690 {
691         struct osd_object *obj = osd_obj(l);
692         struct lu_object_header *h = obj->oo_header;
693
694         LASSERT(osd_invariant(obj));
695
696         dt_object_fini(&obj->oo_dt);
697         /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
698         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
699         if (unlikely(h))
700                 lu_object_header_free(h);
701 }
702
703 static int
704 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
705 {
706         int rc = -EBUSY;
707
708         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
709
710         /* the object is supposed to be exclusively locked by
711          * the caller (osd_destroy()), while the transaction
712          * (oh) is per-thread and not shared */
713         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
714                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
715                 rc = 0;
716         }
717
718         return rc;
719 }
720
721 /* Default to max data size covered by a level-1 indirect block */
722 static unsigned long osd_sync_destroy_max_size =
723         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
724 module_param(osd_sync_destroy_max_size, ulong, 0444);
725 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
726
727 static inline void
728 osd_object_set_destroy_type(struct osd_object *obj)
729 {
730         /*
731          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
732          * only once and use it consistently thereafter.
733          */
734         down_write(&obj->oo_guard);
735         if (obj->oo_destroy == OSD_DESTROY_NONE) {
736                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
737                         obj->oo_destroy = OSD_DESTROY_SYNC;
738                 else /* Larger objects are destroyed asynchronously */
739                         obj->oo_destroy = OSD_DESTROY_ASYNC;
740         }
741         up_write(&obj->oo_guard);
742 }
743
744 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
745                                struct thandle *th)
746 {
747         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
748         struct osd_object       *obj = osd_dt_obj(dt);
749         struct osd_device       *osd = osd_obj2dev(obj);
750         struct osd_thandle      *oh;
751         dnode_t *dn;
752         int                      rc;
753         uint64_t                 zapid;
754         ENTRY;
755
756         LASSERT(th != NULL);
757         LASSERT(dt_object_exists(dt));
758
759         oh = container_of(th, struct osd_thandle, ot_super);
760         LASSERT(oh->ot_tx != NULL);
761
762         dmu_tx_mark_netfree(oh->ot_tx);
763
764         /* declare that we'll remove object from fid-dnode mapping */
765         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
766         osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
767
768         osd_declare_xattrs_destroy(env, obj, oh);
769
770         /* one less inode */
771         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
772                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
773                                -1, oh, NULL, OSD_QID_INODE);
774         if (rc)
775                 RETURN(rc);
776
777         /* data to be truncated */
778         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
779                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
780                                0, oh, NULL, OSD_QID_BLK);
781         if (rc)
782                 RETURN(rc);
783
784         osd_object_set_destroy_type(obj);
785         if (obj->oo_destroy == OSD_DESTROY_SYNC)
786                 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
787                                  0, DMU_OBJECT_END);
788         else
789                 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
790                                 osd->od_unlinked, TRUE, NULL);
791
792         /* remove agent entry (if have) from remote parent */
793         if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
794                 osd_tx_hold_zap(oh->ot_tx, osd->od_remote_parent_dir,
795                                 NULL, FALSE, NULL);
796
797         /* will help to find FID->ino when this object is being
798          * added to PENDING/ */
799         osd_idc_find_and_init(env, osd, obj);
800
801         RETURN(0);
802 }
803
804 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
805                        struct thandle *th)
806 {
807         struct osd_thread_info  *info = osd_oti_get(env);
808         char                    *buf = info->oti_str;
809         struct osd_object       *obj = osd_dt_obj(dt);
810         struct osd_device       *osd = osd_obj2dev(obj);
811         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
812         struct osd_thandle      *oh;
813         int                      rc;
814         uint64_t                 oid, zapid;
815         dnode_t *zdn;
816         ENTRY;
817
818         down_write(&obj->oo_guard);
819
820         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
821                 GOTO(out, rc = -ENOENT);
822
823         LASSERT(obj->oo_dn != NULL);
824
825         oh = container_of(th, struct osd_thandle, ot_super);
826         LASSERT(oh != NULL);
827         LASSERT(oh->ot_tx != NULL);
828
829         /* remove obj ref from index dir (it depends) */
830         zapid = osd_get_name_n_idx(env, osd, fid, buf,
831                                    sizeof(info->oti_str), &zdn);
832         rc = osd_xattrs_destroy(env, obj, oh);
833         if (rc) {
834                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
835                        osd->od_svname, buf, rc);
836                 GOTO(out, rc);
837         }
838
839         if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
840                 rc = osd_delete_from_remote_parent(env, osd, obj, oh, true);
841                 if (rc)
842                         GOTO(out, rc);
843         }
844
845         oid = obj->oo_dn->dn_object;
846         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
847                 /* this may happen if the destroy wasn't declared
848                  * e.g. when the object is created and then destroyed
849                  * in the same transaction - we don't need additional
850                  * space for destroy specifically */
851                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
852                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
853                 if (rc)
854                         CERROR("%s: failed to free %s %llu: rc = %d\n",
855                                osd->od_svname, buf, oid, rc);
856         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
857                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
858                 if (rc)
859                         CERROR("%s: failed to free %s %llu: rc = %d\n",
860                                osd->od_svname, buf, oid, rc);
861         } else { /* asynchronous destroy */
862                 char *key = info->oti_key;
863
864                 rc = osd_object_unlinked_add(obj, oh);
865                 if (rc)
866                         GOTO(out, rc);
867
868                 snprintf(key, sizeof(info->oti_key), "%llx", oid);
869                 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
870                                  osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
871                 if (rc)
872                         CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
873                                osd->od_svname, buf, oid, rc);
874         }
875
876         /* Remove the OI mapping after the destroy to handle the race with
877          * OI scrub that may insert missed OI mapping during the interval. */
878         rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
879         if (unlikely(rc == -ENOENT))
880                 rc = 0;
881         if (rc)
882                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
883                        osd->od_svname, buf, rc);
884
885         GOTO(out, rc);
886
887 out:
888         /* not needed in the cache anymore */
889         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
890         if (rc == 0)
891                 obj->oo_destroyed = 1;
892         up_write(&obj->oo_guard);
893         RETURN (0);
894 }
895
896 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
897 {
898         struct osd_object *obj = osd_obj(l);
899         const struct lu_fid *fid = lu_object_fid(l);
900
901         if (obj->oo_dn) {
902                 if (likely(!fid_is_acct(fid))) {
903                         osd_object_sa_fini(obj);
904                         if (obj->oo_sa_xattr) {
905                                 nvlist_free(obj->oo_sa_xattr);
906                                 obj->oo_sa_xattr = NULL;
907                         }
908                         osd_dnode_rele(obj->oo_dn);
909                         list_del(&obj->oo_sa_linkage);
910                 }
911                 obj->oo_dn = NULL;
912         }
913 }
914
915 /*
916  * Concurrency: ->loo_object_release() is called under site spin-lock.
917  */
918 static void osd_object_release(const struct lu_env *env,
919                                struct lu_object *l)
920 {
921 }
922
923 /*
924  * Concurrency: shouldn't matter.
925  */
926 static int osd_object_print(const struct lu_env *env, void *cookie,
927                             lu_printer_t p, const struct lu_object *l)
928 {
929         struct osd_object *o = osd_obj(l);
930
931         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
932 }
933
934 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
935                           unsigned role)
936 {
937         struct osd_object *obj = osd_dt_obj(dt);
938
939         LASSERT(osd_invariant(obj));
940
941         down_read_nested(&obj->oo_sem, role);
942 }
943
944 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
945                            unsigned role)
946 {
947         struct osd_object *obj = osd_dt_obj(dt);
948
949         LASSERT(osd_invariant(obj));
950
951         down_write_nested(&obj->oo_sem, role);
952 }
953
954 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
955 {
956         struct osd_object *obj = osd_dt_obj(dt);
957
958         LASSERT(osd_invariant(obj));
959         up_read(&obj->oo_sem);
960 }
961
962 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
963 {
964         struct osd_object *obj = osd_dt_obj(dt);
965
966         LASSERT(osd_invariant(obj));
967         up_write(&obj->oo_sem);
968 }
969
970 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
971 {
972         struct osd_object *obj = osd_dt_obj(dt);
973         int rc = 1;
974
975         LASSERT(osd_invariant(obj));
976
977         if (down_write_trylock(&obj->oo_sem)) {
978                 rc = 0;
979                 up_write(&obj->oo_sem);
980         }
981         return rc;
982 }
983
984 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
985                         struct lu_attr *attr)
986 {
987         struct osd_object *obj = osd_dt_obj(dt);
988         struct osd_device *osd = osd_obj2dev(obj);
989         uint64_t blocks;
990         uint32_t blksize;
991         int rc = 0;
992
993         down_read(&obj->oo_guard);
994
995         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
996                 GOTO(out, rc = -ENOENT);
997
998         if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
999                 GOTO(out, rc = 0);
1000
1001         LASSERT(osd_invariant(obj));
1002         LASSERT(obj->oo_dn);
1003
1004         read_lock(&obj->oo_attr_lock);
1005         *attr = obj->oo_attr;
1006         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
1007                 attr->la_valid |= LA_FLAGS;
1008                 attr->la_flags |= LUSTRE_ORPHAN_FL;
1009         }
1010         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
1011                 attr->la_valid |= LA_FLAGS;
1012                 attr->la_flags |= LUSTRE_ENCRYPT_FL;
1013         }
1014         read_unlock(&obj->oo_attr_lock);
1015         if (attr->la_valid & LA_FLAGS && attr->la_flags & LUSTRE_ORPHAN_FL)
1016                 CDEBUG(D_INFO, "%s: set orphan flag on "DFID" (%llx/%x)\n",
1017                        osd_obj2dev(obj)->od_svname,
1018                        PFID(lu_object_fid(&dt->do_lu)),
1019                        attr->la_valid, obj->oo_lma_flags);
1020
1021         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
1022          * from within sa_object_size() can block on a mutex, so
1023          * we can't call sa_object_size() holding rwlock */
1024         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
1025         /* we do not control size of indices, so always calculate
1026          * it from number of blocks reported by DMU */
1027         if (S_ISDIR(attr->la_mode)) {
1028                 attr->la_size = 512 * blocks;
1029                 rc = -zap_count(osd->od_os, obj->oo_dn->dn_object,
1030                                 &attr->la_dirent_count);
1031         }
1032         /* Block size may be not set; suggest maximal I/O transfers. */
1033         if (blksize == 0)
1034                 blksize = osd_spa_maxblocksize(
1035                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
1036
1037         attr->la_blksize = blksize;
1038         attr->la_blocks = blocks;
1039         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
1040
1041 out:
1042         up_read(&obj->oo_guard);
1043         return rc;
1044 }
1045
1046 /* Simple wrapper on top of qsd API which implement quota transfer for osd
1047  * setattr needs. As a reminder, only the root user can change ownership of
1048  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
1049 static inline int qsd_transfer(const struct lu_env *env,
1050                                struct qsd_instance *qsd,
1051                                struct lquota_trans *trans, int qtype,
1052                                __u64 orig_id, __u64 new_id, __u64 bspace,
1053                                struct lquota_id_info *qi, bool ignore_edquot)
1054 {
1055         int     rc;
1056
1057         if (unlikely(qsd == NULL))
1058                 return 0;
1059
1060         LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
1061         qi->lqi_type = qtype;
1062
1063         /* inode accounting */
1064         qi->lqi_is_blk = false;
1065
1066         /* one more inode for the new owner ... */
1067         qi->lqi_id.qid_uid = new_id;
1068         qi->lqi_space      = 1;
1069         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1070         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1071                 rc = 0;
1072         if (rc)
1073                 return rc;
1074
1075         /* and one less inode for the current id */
1076         qi->lqi_id.qid_uid = orig_id;;
1077         qi->lqi_space      = -1;
1078         /* can't get EDQUOT when reducing usage */
1079         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1080         if (rc == -EINPROGRESS)
1081                 rc = 0;
1082         if (rc)
1083                 return rc;
1084
1085         /* block accounting */
1086         qi->lqi_is_blk = true;
1087
1088         /* more blocks for the new owner ... */
1089         qi->lqi_id.qid_uid = new_id;
1090         qi->lqi_space      = bspace;
1091         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1092         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1093                 rc = 0;
1094         if (rc)
1095                 return rc;
1096
1097         /* and finally less blocks for the current owner */
1098         qi->lqi_id.qid_uid = orig_id;
1099         qi->lqi_space      = -bspace;
1100         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1101         /* can't get EDQUOT when reducing usage */
1102         if (rc == -EINPROGRESS)
1103                 rc = 0;
1104         return rc;
1105 }
1106
1107 static int osd_declare_attr_set(const struct lu_env *env,
1108                                 struct dt_object *dt,
1109                                 const struct lu_attr *attr,
1110                                 struct thandle *handle)
1111 {
1112         struct osd_thread_info  *info = osd_oti_get(env);
1113         struct osd_object       *obj = osd_dt_obj(dt);
1114         struct osd_device       *osd = osd_obj2dev(obj);
1115         dmu_tx_hold_t           *txh;
1116         struct osd_thandle      *oh;
1117         uint64_t                 bspace;
1118         uint32_t                 blksize;
1119         int                      rc = 0;
1120         bool                     found;
1121         ENTRY;
1122
1123
1124         LASSERT(handle != NULL);
1125         LASSERT(osd_invariant(obj));
1126
1127         oh = container_of(handle, struct osd_thandle, ot_super);
1128
1129         down_read(&obj->oo_guard);
1130         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1131                 GOTO(out, rc = 0);
1132
1133         LASSERT(obj->oo_sa_hdl != NULL);
1134         LASSERT(oh->ot_tx != NULL);
1135         /* regular attributes are part of the bonus buffer */
1136         /* let's check whether this object is already part of
1137          * transaction.. */
1138         found = false;
1139         for (txh = list_head(&oh->ot_tx->tx_holds); txh;
1140              txh = list_next(&oh->ot_tx->tx_holds, txh)) {
1141                 if (txh->txh_dnode == NULL)
1142                         continue;
1143                 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
1144                         continue;
1145                 /* this object is part of the transaction already
1146                  * we don't need to declare bonus again */
1147                 found = true;
1148                 break;
1149         }
1150         if (!found)
1151                 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
1152         if (oh->ot_tx->tx_err != 0)
1153                 GOTO(out, rc = -oh->ot_tx->tx_err);
1154
1155         if (attr && attr->la_valid & LA_FLAGS) {
1156                 /* LMA is usually a part of bonus, no need to declare
1157                  * anything else */
1158         }
1159
1160         if (attr && (attr->la_valid & (LA_UID | LA_GID | LA_PROJID))) {
1161                 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
1162                 bspace = toqb(bspace * 512);
1163
1164                 CDEBUG(D_QUOTA,
1165                        "%s: enforce quota on UID %u, GID %u, the quota space is %lld (%u)\n",
1166                        osd->od_svname,
1167                        attr->la_uid, attr->la_gid, bspace, blksize);
1168         }
1169
1170         if (attr && attr->la_valid & LA_UID) {
1171                 /* quota enforcement for user */
1172                 if (attr->la_uid != obj->oo_attr.la_uid) {
1173                         rc = qsd_transfer(env, osd_def_qsd(osd),
1174                                           &oh->ot_quota_trans, USRQUOTA,
1175                                           obj->oo_attr.la_uid, attr->la_uid,
1176                                           bspace, &info->oti_qi, true);
1177                         if (rc)
1178                                 GOTO(out, rc);
1179                 }
1180         }
1181         if (attr && attr->la_valid & LA_GID) {
1182                 /* quota enforcement for group */
1183                 if (attr->la_gid != obj->oo_attr.la_gid) {
1184                         rc = qsd_transfer(env, osd_def_qsd(osd),
1185                                           &oh->ot_quota_trans, GRPQUOTA,
1186                                           obj->oo_attr.la_gid, attr->la_gid,
1187                                           bspace, &info->oti_qi,
1188                                           !(attr->la_flags &
1189                                                         LUSTRE_SET_SYNC_FL));
1190                         if (rc)
1191                                 GOTO(out, rc);
1192                 }
1193         }
1194 #ifdef ZFS_PROJINHERIT
1195         if (attr && attr->la_valid & LA_PROJID) {
1196                 /* quota enforcement for project */
1197                 if (attr->la_projid != obj->oo_attr.la_projid) {
1198                         if (!osd->od_projectused_dn)
1199                                 GOTO(out, rc = -EOPNOTSUPP);
1200
1201                         /* Usually, if project quota is upgradable for the
1202                          * device, then the upgrade will be done before or when
1203                          * mount the device. So when we come here, this project
1204                          * should have project ID attribute already (that is
1205                          * zero by default).  Otherwise, there was something
1206                          * wrong during the former upgrade, let's return failure
1207                          * to report that.
1208                          *
1209                          * Please note that, different from other attributes,
1210                          * you can NOT simply set the project ID attribute under
1211                          * such case, because adding (NOT change) project ID
1212                          * attribute needs to change the object's attribute
1213                          * layout to match zfs backend quota accounting
1214                          * requirement. */
1215                         if (unlikely(!obj->oo_with_projid))
1216                                 GOTO(out, rc = -ENXIO);
1217
1218                         rc = qsd_transfer(env, osd_def_qsd(osd),
1219                                           &oh->ot_quota_trans, PRJQUOTA,
1220                                           obj->oo_attr.la_projid,
1221                                           attr->la_projid, bspace,
1222                                           &info->oti_qi, true);
1223                         if (rc)
1224                                 GOTO(out, rc);
1225                 }
1226         }
1227 #endif
1228 out:
1229         up_read(&obj->oo_guard);
1230         RETURN(rc);
1231 }
1232
1233 /*
1234  * Set the attributes of an object
1235  *
1236  * The transaction passed to this routine must have
1237  * dmu_tx_hold_bonus(tx, oid) called and then assigned
1238  * to a transaction group.
1239  */
1240 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
1241                         const struct lu_attr *la, struct thandle *handle)
1242 {
1243         struct osd_thread_info  *info = osd_oti_get(env);
1244         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
1245         struct osd_object       *obj = osd_dt_obj(dt);
1246         struct osd_device       *osd = osd_obj2dev(obj);
1247         struct osd_thandle      *oh;
1248         struct osa_attr         *osa = &info->oti_osa;
1249         __u64                    valid = la->la_valid;
1250         int                      cnt;
1251         int                      rc = 0;
1252
1253         ENTRY;
1254
1255         down_read(&obj->oo_guard);
1256         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1257                 GOTO(out, rc = -ENOENT);
1258
1259         LASSERT(handle != NULL);
1260         LASSERT(osd_invariant(obj));
1261         LASSERT(obj->oo_sa_hdl);
1262
1263         oh = container_of(handle, struct osd_thandle, ot_super);
1264         /* Assert that the transaction has been assigned to a
1265            transaction group. */
1266         LASSERT(oh->ot_tx->tx_txg != 0);
1267
1268         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) && !osd->od_is_ost) {
1269                 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1270                 char *buf = info->oti_str;
1271                 dnode_t *zdn = NULL;
1272                 uint64_t zapid;
1273
1274                 zapid = osd_get_name_n_idx(env, osd, lu_object_fid(&dt->do_lu),
1275                                            buf, sizeof(info->oti_str), &zdn);
1276                 rc = osd_zap_lookup(osd, zapid, zdn, buf, 8,
1277                                     sizeof(*zde) / 8, zde);
1278                 if (!rc) {
1279                         zde->zde_dnode -= 1;
1280                         rc = -zap_update(osd->od_os, zapid, buf, 8,
1281                                          sizeof(*zde) / 8, zde, oh->ot_tx);
1282                 }
1283                 if (rc > 0)
1284                         rc = 0;
1285                 GOTO(out, rc);
1286         }
1287
1288         /* Only allow set size for regular file */
1289         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
1290                 valid &= ~(LA_SIZE | LA_BLOCKS);
1291
1292         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
1293                 valid &= ~LA_CTIME;
1294
1295         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
1296                 valid &= ~LA_MTIME;
1297
1298         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
1299                 valid &= ~LA_ATIME;
1300
1301         if (valid == 0)
1302                 GOTO(out, rc = 0);
1303
1304         if (valid & LA_FLAGS) {
1305                 struct lustre_mdt_attrs *lma;
1306                 struct lu_buf buf;
1307                 int size = 0;
1308
1309                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
1310                         LASSERT(!obj->oo_pfid_in_lma);
1311                         BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
1312                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
1313                         buf.lb_buf = lma;
1314                         buf.lb_len = sizeof(info->oti_buf);
1315
1316                         /* Please do NOT call osd_xattr_get() directly, that
1317                          * will cause recursive down_read() on oo_guard. */
1318                         rc = osd_xattr_get_internal(env, obj, &buf,
1319                                                     XATTR_NAME_LMA, &size);
1320                         if (!rc && unlikely(size < sizeof(*lma))) {
1321                                 rc = -EINVAL;
1322                         } else if (!rc) {
1323                                 lma->lma_incompat =
1324                                         le32_to_cpu(lma->lma_incompat);
1325                                 lma->lma_incompat |=
1326                                         lustre_to_lma_flags(la->la_flags);
1327                                 lma->lma_incompat =
1328                                         cpu_to_le32(lma->lma_incompat);
1329                                 buf.lb_buf = lma;
1330                                 buf.lb_len = sizeof(*lma);
1331                                 rc = osd_xattr_set_internal(env, obj, &buf,
1332                                                             XATTR_NAME_LMA,
1333                                                             LU_XATTR_REPLACE,
1334                                                             oh);
1335                         }
1336                         if (rc < 0) {
1337                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
1338                                        osd->od_svname, rc);
1339                                 GOTO(out, rc);
1340                         } else {
1341                                 obj->oo_lma_flags =
1342                                         la->la_flags & LUSTRE_LMA_FL_MASKS;
1343                         }
1344                 }
1345         }
1346
1347         write_lock(&obj->oo_attr_lock);
1348         cnt = 0;
1349
1350         if (valid & LA_PROJID) {
1351 #ifdef ZFS_PROJINHERIT
1352                 if (osd->od_projectused_dn) {
1353                         LASSERT(obj->oo_with_projid);
1354
1355                         osa->projid = obj->oo_attr.la_projid = la->la_projid;
1356                         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1357                                          &osa->projid, 8);
1358                 } else
1359 #endif
1360                         valid &= ~LA_PROJID;
1361         }
1362
1363         if (valid & LA_ATIME) {
1364                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1365                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1366                                  osa->atime, 16);
1367         }
1368         if (valid & LA_MTIME) {
1369                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1370                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1371                                  osa->mtime, 16);
1372         }
1373         if (valid & LA_CTIME) {
1374                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1375                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1376                                  osa->ctime, 16);
1377         }
1378         if (valid & LA_MODE) {
1379                 /* mode is stored along with type, so read it first */
1380                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1381                         (la->la_mode & ~S_IFMT);
1382                 osa->mode = obj->oo_attr.la_mode;
1383                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1384                                  &osa->mode, 8);
1385         }
1386         if (valid & LA_SIZE) {
1387                 osa->size = obj->oo_attr.la_size = la->la_size;
1388                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1389                                  &osa->size, 8);
1390         }
1391         if (valid & LA_NLINK) {
1392                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1393                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1394                                  &osa->nlink, 8);
1395         }
1396         if (valid & LA_RDEV) {
1397                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1398                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1399                                  &osa->rdev, 8);
1400         }
1401         if (valid & LA_FLAGS) {
1402                 osa->flags = attrs_fs2zfs(la->la_flags);
1403                 /* many flags are not supported by zfs, so ensure a good cached
1404                  * copy */
1405                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1406 #ifdef ZFS_PROJINHERIT
1407                 if (obj->oo_with_projid && osd->od_projectused_dn)
1408                         osa->flags |= ZFS_PROJID;
1409 #endif
1410                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1411                                  &osa->flags, 8);
1412         }
1413         if (valid & LA_UID) {
1414                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1415                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1416                                  &osa->uid, 8);
1417         }
1418         if (valid & LA_GID) {
1419                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1420                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1421                                  &osa->gid, 8);
1422         }
1423         obj->oo_attr.la_valid |= valid;
1424         write_unlock(&obj->oo_attr_lock);
1425
1426         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1427         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1428
1429 out:
1430         up_read(&obj->oo_guard);
1431         RETURN(rc);
1432 }
1433
1434 /*
1435  * Object creation.
1436  *
1437  * XXX temporary solution.
1438  */
1439
1440 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1441                         struct dt_object *parent, struct dt_object *child,
1442                         umode_t child_mode)
1443 {
1444         LASSERT(ah);
1445
1446         ah->dah_parent = parent;
1447         ah->dah_mode = child_mode;
1448
1449         if (parent != NULL && !dt_object_remote(parent)) {
1450                 /* will help to find FID->ino at dt_insert("..") */
1451                 struct osd_object *pobj = osd_dt_obj(parent);
1452
1453                 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1454         }
1455 }
1456
1457 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1458                               struct lu_attr *attr,
1459                               struct dt_allocation_hint *hint,
1460                               struct dt_object_format *dof,
1461                               struct thandle *handle)
1462 {
1463         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1464         struct osd_object       *obj = osd_dt_obj(dt);
1465         struct osd_device       *osd = osd_obj2dev(obj);
1466         struct osd_thandle      *oh;
1467         uint64_t                 zapid;
1468         dnode_t                 *dn;
1469         int                      rc, dnode_size;
1470         ENTRY;
1471
1472         LASSERT(dof);
1473
1474         switch (dof->dof_type) {
1475                 case DFT_REGULAR:
1476                 case DFT_SYM:
1477                 case DFT_NODE:
1478                         if (obj->oo_dt.do_body_ops == NULL)
1479                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1480                         break;
1481                 default:
1482                         break;
1483         }
1484
1485         LASSERT(handle != NULL);
1486         oh = container_of(handle, struct osd_thandle, ot_super);
1487         LASSERT(oh->ot_tx != NULL);
1488
1489         /* this is the minimum set of EAs on every Lustre object */
1490         obj->oo_ea_in_bonus = OSD_BASE_EA_IN_BONUS;
1491         /* reserve 32 bytes for extra stuff like ACLs */
1492         dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1493
1494         switch (dof->dof_type) {
1495                 case DFT_DIR:
1496                         dt->do_index_ops = &osd_dir_ops;
1497                         /* fallthrough */
1498                 case DFT_INDEX:
1499                         /* for zap create */
1500                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1501                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1502                         break;
1503                 case DFT_REGULAR:
1504                 case DFT_SYM:
1505                 case DFT_NODE:
1506                         /* first, we'll create new object */
1507                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1508                         break;
1509
1510                 default:
1511                         LBUG();
1512                         break;
1513         }
1514
1515         /* and we'll add it to some mapping */
1516         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1517         osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1518
1519         /* will help to find FID->ino mapping at dt_insert() */
1520         osd_idc_find_and_init(env, osd, obj);
1521
1522         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid,
1523                                attr->la_projid, 1, oh, NULL, OSD_QID_INODE);
1524
1525         RETURN(rc);
1526 }
1527
1528 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1529                     struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
1530                     struct lu_attr *la, uint64_t parent,
1531                     nvlist_t *xattr)
1532 {
1533         sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1534         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1535         uint64_t gen;
1536         inode_timespec_t now;
1537         int cnt;
1538         int rc;
1539         char *dxattr = NULL;
1540         size_t sa_size;
1541
1542
1543         LASSERT(sa_hdl);
1544
1545         gen = dmu_tx_get_txg(tx);
1546         gethrestime(&now);
1547         ZFS_TIME_ENCODE(&now, osa->btime);
1548
1549         osa->atime[0] = la->la_atime;
1550         osa->ctime[0] = la->la_ctime;
1551         osa->mtime[0] = la->la_mtime;
1552         osa->mode = la->la_mode;
1553         osa->uid = la->la_uid;
1554         osa->gid = la->la_gid;
1555         osa->rdev = la->la_rdev;
1556         osa->nlink = la->la_nlink;
1557         if (la->la_valid & LA_FLAGS)
1558                 osa->flags = attrs_fs2zfs(la->la_flags);
1559         else
1560                 osa->flags = 0;
1561         osa->size  = la->la_size;
1562 #ifdef ZFS_PROJINHERIT
1563         if (osd->od_projectused_dn) {
1564                 if (la->la_valid & LA_PROJID)
1565                         osa->projid = la->la_projid;
1566                 else
1567                         osa->projid = ZFS_DEFAULT_PROJID;
1568                 osa->flags |= ZFS_PROJID;
1569                 if (obj)
1570                         obj->oo_with_projid = 1;
1571         } else {
1572                 osa->flags &= ~ZFS_PROJID;
1573         }
1574 #endif
1575
1576         /*
1577          * we need to create all SA below upon object create.
1578          *
1579          * XXX The attribute order matters since the accounting callback relies
1580          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1581          * look up the UID/GID/PROJID attributes. Moreover, the callback does
1582          * not seem to support the spill block.
1583          * We define attributes in the same order as SA_*_OFFSET in order to
1584          * work around the problem. See ORI-610.
1585          */
1586         cnt = 0;
1587         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1588         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1589         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1590         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1591         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1592         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1593         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1594         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1595         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1596         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1597         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, osa->btime, 16);
1598         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1599 #ifdef ZFS_PROJINHERIT
1600         if (osd->od_projectused_dn)
1601                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1602                                  &osa->projid, 8);
1603 #endif
1604         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1605         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1606
1607         if (xattr) {
1608                 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1609                 LASSERT(rc == 0);
1610
1611                 dxattr = osd_zio_buf_alloc(sa_size);
1612                 LASSERT(dxattr);
1613
1614                 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1615                                 NV_ENCODE_XDR, KM_SLEEP);
1616                 LASSERT(rc == 0);
1617
1618                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1619                                 NULL, dxattr, sa_size);
1620         }
1621
1622         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1623         if (dxattr)
1624                 osd_zio_buf_free(dxattr, sa_size);
1625
1626         return rc;
1627 }
1628
1629 int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1630                        uint64_t oid, dnode_t **dnp)
1631 {
1632         dmu_tx_hold_t *txh;
1633         int rc = 0;
1634
1635         /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1636         for (txh = list_tail(&tx->tx_holds); txh;
1637              txh = list_prev(&tx->tx_holds, txh)) {
1638                 dnode_t *dn = txh->txh_dnode;
1639                 dmu_buf_impl_t *db;
1640
1641                 if (dn == NULL)
1642                         continue;
1643                 if (dn->dn_object != oid)
1644                         continue;
1645                 db = dn->dn_bonus;
1646                 if (db == NULL) {
1647                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1648                         if (dn->dn_bonus == NULL)
1649                                 dbuf_create_bonus(dn);
1650                         rw_exit(&dn->dn_struct_rwlock);
1651                 }
1652                 db = dn->dn_bonus;
1653                 LASSERT(db);
1654                 LASSERT(dn->dn_handle);
1655                 DB_DNODE_ENTER(db);
1656                 if (zfs_refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1657                         zfs_refcount_add(&dn->dn_holds, osd_obj_tag);
1658                         atomic_inc_32(&dn->dn_dbufs_count);
1659                 }
1660                 *dnp = dn;
1661                 DB_DNODE_EXIT(db);
1662                 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1663                 break;
1664         }
1665
1666         if (unlikely(*dnp == NULL))
1667                 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1668
1669         return rc;
1670 }
1671
1672 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1673 int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
1674 {
1675         int dnsize;
1676
1677         if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1678                 dnsize = DNODE_MIN_SIZE;
1679                 do {
1680                         if (DN_BONUS_SIZE(dnsize) >= ea_in_bonus + 32)
1681                                 break;
1682                         dnsize <<= 1;
1683                 } while (dnsize < DNODE_MAX_SIZE);
1684                 if (dnsize > DNODE_MAX_SIZE)
1685                         dnsize = DNODE_MAX_SIZE;
1686         } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1687                 dnsize = 1024;
1688         } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1689                 dnsize = 2048;
1690         } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1691                 dnsize = 4096;
1692         } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1693                 dnsize = 8192;
1694         } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1695                 dnsize = 16384;
1696         } else {
1697                 dnsize = DNODE_MIN_SIZE;
1698         }
1699         return dnsize;
1700 }
1701 #endif
1702
1703 /*
1704  * The transaction passed to this routine must have
1705  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1706  * to a transaction group.
1707  */
1708 int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
1709                         struct osd_object *obj, const struct lu_fid *fid,
1710                         dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1711 {
1712         dmu_object_type_t type = DMU_OT_PLAIN_FILE_CONTENTS;
1713         uint64_t oid;
1714         int size;
1715
1716         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1717          * would get an additional ditto copy */
1718         if (unlikely(S_ISREG(la->la_mode) &&
1719                      fid_seq_is_local_file(fid_seq(fid))))
1720                 type = DMU_OTN_UINT8_METADATA;
1721
1722         /* Create a new DMU object using the default dnode size. */
1723         if (obj)
1724                 size = obj->oo_ea_in_bonus;
1725         else
1726                 size = OSD_BASE_EA_IN_BONUS;
1727         oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1728                                    osd_find_dnsize(osd, size), tx);
1729
1730         LASSERT(la->la_valid & LA_MODE);
1731         la->la_size = 0;
1732         la->la_nlink = 1;
1733
1734         return osd_find_new_dnode(env, tx, oid, dnp);
1735 }
1736
1737 /*
1738  * The transaction passed to this routine must have
1739  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1740  * to a transaction group.
1741  *
1742  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1743  * This is fine for directories today, because storing the FID in the dirent
1744  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1745  * then we might need to re-evaluate the use of this flag and instead do
1746  * a conversion from the different internal ZAP hash formats being used. */
1747 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1748                      dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1749                      unsigned dnsize, zap_flags_t flags)
1750 {
1751         uint64_t oid;
1752
1753         /* Assert that the transaction has been assigned to a
1754            transaction group. */
1755         LASSERT(tx->tx_txg != 0);
1756         *dnp = NULL;
1757
1758         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1759                                    DMU_OT_DIRECTORY_CONTENTS,
1760                                    14, /* == ZFS fzap_default_blockshift */
1761                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1762                                    dnsize, tx);
1763
1764         la->la_size = 2;
1765         la->la_nlink = 1;
1766
1767         return osd_find_new_dnode(env, tx, oid, dnp);
1768 }
1769
1770 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1771                           struct lu_attr *la, struct osd_thandle *oh)
1772 {
1773         struct osd_device *osd = osd_obj2dev(obj);
1774         dnode_t *dn;
1775         int rc;
1776
1777         /* Index file should be created as regular file in order not to confuse
1778          * ZPL which could interpret them as directory.
1779          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1780          * binary keys */
1781         LASSERT(S_ISREG(la->la_mode));
1782         rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1783                 osd_find_dnsize(osd, obj->oo_ea_in_bonus), ZAP_FLAG_UINT64_KEY);
1784         if (rc)
1785                 return ERR_PTR(rc);
1786         return dn;
1787 }
1788
1789 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1790                           struct lu_attr *la, struct osd_thandle *oh)
1791 {
1792         struct osd_device *osd = osd_obj2dev(obj);
1793         dnode_t *dn;
1794         int rc;
1795
1796         LASSERT(S_ISDIR(la->la_mode));
1797         rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1798                               osd_find_dnsize(osd, obj->oo_ea_in_bonus), 0);
1799         if (rc)
1800                 return ERR_PTR(rc);
1801         return dn;
1802 }
1803
1804 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1805                           struct lu_attr *la, struct osd_thandle *oh)
1806 {
1807         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1808         struct osd_device *osd = osd_obj2dev(obj);
1809         dnode_t *dn;
1810         int rc;
1811
1812         LASSERT(S_ISREG(la->la_mode));
1813         rc = __osd_object_create(env, osd, obj, fid, &dn, oh->ot_tx, la);
1814         if (rc)
1815                 return ERR_PTR(rc);
1816
1817         if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid))) {
1818                 /* The minimum block size must be at least page size otherwise
1819                  * it will break the assumption in tgt_thread_big_cache where
1820                  * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1821                  * RDMA due to subpage transfer size */
1822                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1823                                                PAGE_SIZE, 0, oh->ot_tx);
1824                 if (unlikely(rc)) {
1825                         CERROR("%s: can't change blocksize: %d\n",
1826                                osd->od_svname, rc);
1827                         return ERR_PTR(rc);
1828                 }
1829         } else if ((fid_is_llog(fid))) {
1830                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1831                                                LLOG_MIN_CHUNK_SIZE, 0, oh->ot_tx);
1832                 if (unlikely(rc)) {
1833                         CERROR("%s: can't change blocksize: %d\n",
1834                                osd->od_svname, rc);
1835                         return ERR_PTR(rc);
1836                 }
1837         }
1838
1839         return dn;
1840 }
1841
1842 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1843                           struct lu_attr *la, struct osd_thandle *oh)
1844 {
1845         dnode_t *dn;
1846         int rc;
1847
1848         LASSERT(S_ISLNK(la->la_mode));
1849         rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1850                                  lu_object_fid(&obj->oo_dt.do_lu),
1851                                  &dn, oh->ot_tx, la);
1852         if (rc)
1853                 return ERR_PTR(rc);
1854         return dn;
1855 }
1856
1857 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1858                           struct lu_attr *la, struct osd_thandle *oh)
1859 {
1860         dnode_t *dn;
1861         int rc;
1862
1863         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1864                 la->la_valid |= LA_RDEV;
1865
1866         rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1867                                  lu_object_fid(&obj->oo_dt.do_lu),
1868                                  &dn, oh->ot_tx, la);
1869         if (rc)
1870                 return ERR_PTR(rc);
1871         return dn;
1872 }
1873
1874 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1875                                    struct osd_object *obj,
1876                                    struct lu_attr *la,
1877                                    struct osd_thandle *oh);
1878
1879 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1880 {
1881         osd_obj_type_f result;
1882
1883         switch (type) {
1884         case DFT_DIR:
1885                 result = osd_mkdir;
1886                 break;
1887         case DFT_INDEX:
1888                 result = osd_mkidx;
1889                 break;
1890         case DFT_REGULAR:
1891                 result = osd_mkreg;
1892                 break;
1893         case DFT_SYM:
1894                 result = osd_mksym;
1895                 break;
1896         case DFT_NODE:
1897                 result = osd_mknod;
1898                 break;
1899         default:
1900                 LBUG();
1901                 break;
1902         }
1903         return result;
1904 }
1905
1906 /*
1907  * Concurrency: @dt is write locked.
1908  */
1909 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1910                       struct lu_attr *attr, struct dt_allocation_hint *hint,
1911                       struct dt_object_format *dof, struct thandle *th)
1912 {
1913         struct osd_thread_info  *info = osd_oti_get(env);
1914         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1915         struct zpl_direntry     *zde = &info->oti_zde.lzd_reg;
1916         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1917         struct osd_object       *obj = osd_dt_obj(dt);
1918         struct osd_device       *osd = osd_obj2dev(obj);
1919         char                    *buf = info->oti_str;
1920         struct osd_thandle      *oh;
1921         dnode_t *dn = NULL, *zdn = NULL;
1922         uint64_t                 zapid, parent = 0;
1923         int                      rc;
1924         __u32 compat = 0;
1925
1926         ENTRY;
1927
1928         LASSERT(!fid_is_acct(fid));
1929
1930         /* concurrent create declarations should not see
1931          * the object inconsistent (db, attr, etc).
1932          * in regular cases acquisition should be cheap */
1933         down_write(&obj->oo_guard);
1934
1935         if (unlikely(dt_object_exists(dt)))
1936                 GOTO(out, rc = -EEXIST);
1937
1938         LASSERT(osd_invariant(obj));
1939         LASSERT(dof != NULL);
1940
1941         LASSERT(th != NULL);
1942         oh = container_of(th, struct osd_thandle, ot_super);
1943
1944         LASSERT(obj->oo_dn == NULL);
1945
1946         /* to follow ZFS on-disk format we need
1947          * to initialize parent dnode properly */
1948         if (hint != NULL && hint->dah_parent != NULL &&
1949             !dt_object_remote(hint->dah_parent))
1950                 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1951
1952         /* we may fix some attributes, better do not change the source */
1953         obj->oo_attr = *attr;
1954         obj->oo_attr.la_size = 0;
1955         obj->oo_attr.la_nlink = 0;
1956         obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1957
1958 #ifdef ZFS_PROJINHERIT
1959         if (osd->od_projectused_dn) {
1960                 if (!(obj->oo_attr.la_valid & LA_PROJID))
1961                         obj->oo_attr.la_projid = ZFS_DEFAULT_PROJID;
1962                 obj->oo_with_projid = 1;
1963         }
1964 #endif
1965
1966         dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1967         if (IS_ERR(dn)) {
1968                 rc = PTR_ERR(dn);
1969                 dn = NULL;
1970                 GOTO(out, rc);
1971         }
1972
1973         zde->zde_pad = 0;
1974         zde->zde_dnode = dn->dn_object;
1975         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1976
1977         zapid = osd_get_name_n_idx(env, osd, fid, buf,
1978                                    sizeof(info->oti_str), &zdn);
1979         if (CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY) ||
1980             (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_NO_ENTRY)))
1981                 goto skip_add;
1982
1983         if (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
1984                 zde->zde_dnode++;
1985
1986         rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
1987         if (rc)
1988                 GOTO(out, rc);
1989
1990 skip_add:
1991         obj->oo_dn = dn;
1992         /* Now add in all of the "SA" attributes */
1993         rc = osd_sa_handle_get(obj);
1994         if (rc)
1995                 GOTO(out, rc);
1996
1997         rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
1998         if (rc)
1999                 GOTO(out, rc);
2000
2001         /* initialize LMA */
2002         if (fid_is_idif(fid) || (fid_is_norm(fid) && osd->od_is_ost))
2003                 compat |= LMAC_FID_ON_OST;
2004         lustre_lma_init(lma, fid, compat, 0);
2005         lustre_lma_swab(lma);
2006         rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
2007                                     (uchar_t *)lma, sizeof(*lma));
2008         if (rc)
2009                 GOTO(out, rc);
2010
2011         /* configure new osd object */
2012         obj->oo_parent = parent != 0 ? parent : zapid;
2013         obj->oo_late_attr_set = 1;
2014         rc = __osd_sa_xattr_schedule_update(env, obj, oh);
2015         if (rc)
2016                 GOTO(out, rc);
2017
2018         /* XXX: oo_lma_flags */
2019         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
2020         if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
2021                 /* no body operations for accounting objects */
2022                 obj->oo_dt.do_body_ops = &osd_body_ops;
2023
2024         osd_idc_find_and_init(env, osd, obj);
2025
2026 out:
2027         if (unlikely(rc && dn)) {
2028                 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
2029                 osd_dnode_rele(dn);
2030                 obj->oo_dn = NULL;
2031         } else if (!rc) {
2032                 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
2033         }
2034         up_write(&obj->oo_guard);
2035         RETURN(rc);
2036 }
2037
2038 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
2039                                struct thandle *th)
2040 {
2041         osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2042         return osd_declare_attr_set(env, dt, NULL, th);
2043 }
2044
2045 /*
2046  * Concurrency: @dt is write locked.
2047  */
2048 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
2049                        struct thandle *handle)
2050 {
2051         struct osd_object       *obj = osd_dt_obj(dt);
2052         struct osd_thandle      *oh;
2053         struct osd_device       *osd = osd_obj2dev(obj);
2054         uint64_t                 nlink;
2055         int rc;
2056
2057         ENTRY;
2058
2059         down_read(&obj->oo_guard);
2060         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2061                 GOTO(out, rc = -ENOENT);
2062
2063         LASSERT(osd_invariant(obj));
2064         LASSERT(obj->oo_sa_hdl != NULL);
2065
2066         oh = container_of(handle, struct osd_thandle, ot_super);
2067
2068         write_lock(&obj->oo_attr_lock);
2069         nlink = ++obj->oo_attr.la_nlink;
2070         write_unlock(&obj->oo_attr_lock);
2071
2072         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2073
2074 out:
2075         up_read(&obj->oo_guard);
2076         RETURN(rc);
2077 }
2078
2079 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
2080                                struct thandle *handle)
2081 {
2082         osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2083         return osd_declare_attr_set(env, dt, NULL, handle);
2084 }
2085
2086 /*
2087  * Concurrency: @dt is write locked.
2088  */
2089 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
2090                        struct thandle *handle)
2091 {
2092         struct osd_object       *obj = osd_dt_obj(dt);
2093         struct osd_thandle      *oh;
2094         struct osd_device       *osd = osd_obj2dev(obj);
2095         uint64_t                 nlink;
2096         int                      rc;
2097
2098         ENTRY;
2099
2100         down_read(&obj->oo_guard);
2101
2102         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2103                 GOTO(out, rc = -ENOENT);
2104
2105         LASSERT(osd_invariant(obj));
2106         LASSERT(obj->oo_sa_hdl != NULL);
2107
2108         oh = container_of(handle, struct osd_thandle, ot_super);
2109         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2110
2111         write_lock(&obj->oo_attr_lock);
2112         nlink = --obj->oo_attr.la_nlink;
2113         write_unlock(&obj->oo_attr_lock);
2114
2115         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2116
2117 out:
2118         up_read(&obj->oo_guard);
2119         RETURN(rc);
2120 }
2121
2122 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
2123                            __u64 start, __u64 end)
2124 {
2125         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2126         uint64_t txg = 0;
2127         ENTRY;
2128
2129         if (osd->od_dt_dev.dd_rdonly)
2130                 RETURN(0);
2131
2132         txg = osd_db_dirty_txg(osd_dt_obj(dt)->oo_dn->dn_dbuf);
2133         if (txg) {
2134                 /* the object is dirty or being synced */
2135                 if (osd_object_sync_delay_us < 0)
2136                         txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
2137                 else
2138                         udelay(osd_object_sync_delay_us);
2139         }
2140
2141         RETURN(0);
2142 }
2143
2144 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
2145 {
2146         return 0;
2147 }
2148
2149 static struct dt_object_operations osd_obj_ops = {
2150         .do_read_lock           = osd_read_lock,
2151         .do_write_lock          = osd_write_lock,
2152         .do_read_unlock         = osd_read_unlock,
2153         .do_write_unlock        = osd_write_unlock,
2154         .do_write_locked        = osd_write_locked,
2155         .do_attr_get            = osd_attr_get,
2156         .do_declare_attr_set    = osd_declare_attr_set,
2157         .do_attr_set            = osd_attr_set,
2158         .do_ah_init             = osd_ah_init,
2159         .do_declare_create      = osd_declare_create,
2160         .do_create              = osd_create,
2161         .do_declare_destroy     = osd_declare_destroy,
2162         .do_destroy             = osd_destroy,
2163         .do_index_try           = osd_index_try,
2164         .do_declare_ref_add     = osd_declare_ref_add,
2165         .do_ref_add             = osd_ref_add,
2166         .do_declare_ref_del     = osd_declare_ref_del,
2167         .do_ref_del             = osd_ref_del,
2168         .do_xattr_get           = osd_xattr_get,
2169         .do_declare_xattr_set   = osd_declare_xattr_set,
2170         .do_xattr_set           = osd_xattr_set,
2171         .do_declare_xattr_del   = osd_declare_xattr_del,
2172         .do_xattr_del           = osd_xattr_del,
2173         .do_xattr_list          = osd_xattr_list,
2174         .do_object_sync         = osd_object_sync,
2175         .do_invalidate          = osd_invalidate,
2176 };
2177
2178 static struct lu_object_operations osd_lu_obj_ops = {
2179         .loo_object_init        = osd_object_init,
2180         .loo_object_delete      = osd_object_delete,
2181         .loo_object_release     = osd_object_release,
2182         .loo_object_free        = osd_object_free,
2183         .loo_object_print       = osd_object_print,
2184         .loo_object_invariant   = osd_object_invariant,
2185 };
2186
2187 static int osd_otable_it_attr_get(const struct lu_env *env,
2188                                 struct dt_object *dt,
2189                                 struct lu_attr *attr)
2190 {
2191         attr->la_valid = 0;
2192         return 0;
2193 }
2194
2195 static struct dt_object_operations osd_obj_otable_it_ops = {
2196         .do_attr_get            = osd_otable_it_attr_get,
2197         .do_index_try           = osd_index_try,
2198 };
2199
2200 module_param(osd_object_sync_delay_us, int, 0644);
2201 MODULE_PARM_DESC(osd_object_sync_delay_us,
2202                  "If zero or larger delay N usec instead of doing object sync");