Whamcloud - gitweb
LU-13189 osd-zfs: add project id for old objects without ZFS_PROJID
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osd-zfs/osd_object.c
32  *
33  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
34  * Author: Mike Pershin <tappro@whamcloud.com>
35  * Author: Johann Lombardi <johann@whamcloud.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_OSD
39
40 #include <libcfs/libcfs.h>
41 #include <obd_support.h>
42 #include <lustre_net.h>
43 #include <obd.h>
44 #include <obd_class.h>
45 #include <lustre_disk.h>
46 #include <lustre_fid.h>
47
48 #include "osd_internal.h"
49
50 #include <sys/dnode.h>
51 #include <sys/dbuf.h>
52 #include <sys/spa.h>
53 #include <sys/stat.h>
54 #include <sys/zap.h>
55 #include <sys/spa_impl.h>
56 #include <sys/zfs_znode.h>
57 #include <sys/dmu_tx.h>
58 #include <sys/dmu_objset.h>
59 #include <sys/dsl_prop.h>
60 #include <sys/sa_impl.h>
61 #include <sys/txg.h>
62
63 char *osd_obj_tag = "osd_object";
64 static int osd_object_sync_delay_us = -1;
65
66 static const struct dt_object_operations osd_obj_ops;
67 static const struct lu_object_operations osd_lu_obj_ops;
68 static const struct dt_object_operations osd_obj_otable_it_ops;
69
70 static void
71 osd_object_sa_fini(struct osd_object *obj)
72 {
73         if (obj->oo_sa_hdl) {
74                 sa_handle_destroy(obj->oo_sa_hdl);
75                 obj->oo_sa_hdl = NULL;
76         }
77 }
78
79 static int
80 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
81 {
82         int rc;
83
84         LASSERT(obj->oo_sa_hdl == NULL);
85         LASSERT(obj->oo_dn != NULL);
86
87         rc = osd_sa_handle_get(obj);
88         if (rc)
89                 return rc;
90
91         /* Cache the xattr object id, valid for the life of the object */
92         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
93         if (rc == -ENOENT) {
94                 obj->oo_xattr = ZFS_NO_OBJECT;
95                 rc = 0;
96         } else if (rc) {
97                 osd_object_sa_fini(obj);
98         }
99
100         return rc;
101 }
102
103 /*
104  * Add object to list of dirty objects in tx handle.
105  */
106 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
107 {
108         if (!list_empty(&obj->oo_sa_linkage))
109                 return;
110
111         write_lock(&obj->oo_attr_lock);
112         if (likely(list_empty(&obj->oo_sa_linkage)))
113                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
114         write_unlock(&obj->oo_attr_lock);
115 }
116
117 /*
118  * Release spill block dbuf hold for all dirty SAs.
119  */
120 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
121 {
122         struct osd_object *obj;
123
124         while (!list_empty(&oh->ot_sa_list)) {
125                 obj = list_entry(oh->ot_sa_list.next,
126                                  struct osd_object, oo_sa_linkage);
127                 write_lock(&obj->oo_attr_lock);
128                 list_del_init(&obj->oo_sa_linkage);
129                 write_unlock(&obj->oo_attr_lock);
130                 if (obj->oo_late_xattr && obj->oo_destroyed == 0) {
131                         /*
132                          * take oo_guard to protect oo_sa_xattr buffer
133                          * from concurrent update by osd_xattr_set()
134                          */
135                         LASSERT(oh->ot_assigned != 0);
136                         down_write(&obj->oo_guard);
137                         if (obj->oo_destroyed == 0) {
138                                 if (obj->oo_late_attr_set)
139                                         __osd_sa_attr_init(env, obj, oh);
140                                 else if (obj->oo_late_xattr)
141                                         __osd_sa_xattr_update(env, obj, oh);
142                         }
143                         up_write(&obj->oo_guard);
144                 }
145                 sa_spill_rele(obj->oo_sa_hdl);
146         }
147 }
148
149 /*
150  * Update the SA and add the object to the dirty list.
151  */
152 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
153                          void *buf, uint32_t buflen, struct osd_thandle *oh)
154 {
155         int rc;
156
157         LASSERT(obj->oo_sa_hdl != NULL);
158         LASSERT(oh->ot_tx != NULL);
159
160         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
161         osd_object_sa_dirty_add(obj, oh);
162
163         return rc;
164 }
165
166 /*
167  * Bulk update the SA and add the object to the dirty list.
168  */
169 static int
170 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
171                           int count, struct osd_thandle *oh)
172 {
173         int rc;
174
175         LASSERT(obj->oo_sa_hdl != NULL);
176         LASSERT(oh->ot_tx != NULL);
177
178         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
179         osd_object_sa_dirty_add(obj, oh);
180
181         return rc;
182 }
183
184 /*
185  * Retrieve the attributes of a DMU object
186  */
187 static int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
188                                  struct osd_object *obj, struct lu_attr *la)
189 {
190         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
191         sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
192         struct lustre_mdt_attrs *lma;
193         struct lu_buf buf;
194         int cnt = 0;
195         int              rc;
196         ENTRY;
197
198         LASSERT(obj->oo_dn != NULL);
199
200         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_BTIME | LA_MODE |
201                         LA_TYPE | LA_SIZE | LA_UID | LA_GID | LA_FLAGS |
202                         LA_NLINK;
203
204         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
205         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
206         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
207         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(o), NULL, osa->btime, 16);
208         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
209         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
210         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
211         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
212         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
213         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
214         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
215
216         rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
217         if (rc)
218                 GOTO(out_sa, rc);
219
220 #ifdef ZFS_PROJINHERIT
221         if (o->od_projectused_dn && osa->flags & ZFS_PROJID) {
222                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(o),
223                                 &osa->projid, 8);
224                 if (rc)
225                         GOTO(out_sa, rc);
226
227                 la->la_projid = osa->projid;
228                 la->la_valid |= LA_PROJID;
229                 obj->oo_with_projid = 1;
230         } else {
231                 la->la_projid = ZFS_DEFAULT_PROJID;
232                 la->la_valid &= ~LA_PROJID;
233         }
234 #else
235         la->la_projid = 0;
236         la->la_valid &= ~LA_PROJID;
237 #endif
238
239         la->la_atime = osa->atime[0];
240         la->la_mtime = osa->mtime[0];
241         la->la_ctime = osa->ctime[0];
242         la->la_btime = osa->btime[0];
243         la->la_mode = osa->mode;
244         la->la_uid = osa->uid;
245         la->la_gid = osa->gid;
246         la->la_nlink = osa->nlink;
247         la->la_flags = attrs_zfs2fs(osa->flags);
248         la->la_size = osa->size;
249
250         /* Try to get extra flags from LMA */
251         lma = (struct lustre_mdt_attrs *)osd_oti_get(env)->oti_buf;
252         buf.lb_buf = lma;
253         buf.lb_len = sizeof(osd_oti_get(env)->oti_buf);
254         down_read(&obj->oo_guard);
255         rc = osd_xattr_get_lma(env, obj, &buf);
256         if (!rc) {
257                 lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
258                 obj->oo_lma_flags =
259                         lma_to_lustre_flags(lma->lma_incompat);
260         } else if (rc == -ENODATA ||
261                    !(S_ISDIR(la->la_mode) &&
262                      dt_object_exists(&obj->oo_dt))) {
263                 rc = 0;
264         }
265         up_read(&obj->oo_guard);
266
267         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
268                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
269                 if (rc)
270                         GOTO(out_sa, rc);
271                 la->la_rdev = osa->rdev;
272                 la->la_valid |= LA_RDEV;
273         }
274 out_sa:
275
276         RETURN(rc);
277 }
278
279 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
280 {
281         dmu_buf_t *db;
282         dmu_buf_impl_t *dbi;
283         int rc;
284
285         rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
286         if (rc)
287                 return rc;
288
289         dbi = (dmu_buf_impl_t *)db;
290         DB_DNODE_ENTER(dbi);
291         *dnp = DB_DNODE(dbi);
292         DB_DNODE_EXIT(dbi);
293         LASSERT(*dnp != NULL);
294
295         return 0;
296 }
297
298 /*
299  * Concurrency: no concurrent access is possible that early in object
300  * life-cycle.
301  */
302 struct lu_object *osd_object_alloc(const struct lu_env *env,
303                                    const struct lu_object_header *hdr,
304                                    struct lu_device *d)
305 {
306         struct osd_object *mo;
307
308         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
309         if (mo != NULL) {
310                 struct lu_object *l;
311                 struct lu_object_header *h;
312                 struct osd_device *o = osd_dev(d);
313
314                 l = &mo->oo_dt.do_lu;
315                 if (unlikely(o->od_in_init)) {
316                         OBD_ALLOC_PTR(h);
317                         if (!h) {
318                                 OBD_FREE_PTR(mo);
319                                 return NULL;
320                         }
321
322                         lu_object_header_init(h);
323                         lu_object_init(l, h, d);
324                         lu_object_add_top(h, l);
325                         mo->oo_header = h;
326                 } else {
327                         dt_object_init(&mo->oo_dt, NULL, d);
328                         mo->oo_header = NULL;
329                 }
330
331                 mo->oo_dt.do_ops = &osd_obj_ops;
332                 l->lo_ops = &osd_lu_obj_ops;
333                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
334                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
335                 init_rwsem(&mo->oo_sem);
336                 init_rwsem(&mo->oo_guard);
337                 rwlock_init(&mo->oo_attr_lock);
338                 mo->oo_destroy = OSD_DESTROY_NONE;
339                 return l;
340         } else {
341                 return NULL;
342         }
343 }
344
345 static void osd_obj_set_blksize(const struct lu_env *env,
346                                 struct osd_device *osd, struct osd_object *obj)
347 {
348         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
349         dmu_tx_t *tx;
350         dnode_t *dn = obj->oo_dn;
351         uint32_t blksz;
352         int rc = 0;
353         ENTRY;
354
355         LASSERT(!osd_oti_get(env)->oti_in_trans);
356
357         tx = dmu_tx_create(osd->od_os);
358         if (!tx) {
359                 CERROR("%s: fail to create tx to set blksize for "DFID"\n",
360                        osd->od_svname, PFID(fid));
361                 RETURN_EXIT;
362         }
363
364         dmu_tx_hold_bonus(tx, dn->dn_object);
365         rc = -dmu_tx_assign(tx, TXG_WAIT);
366         if (rc) {
367                 dmu_tx_abort(tx);
368                 CERROR("%s: fail to assign tx to set blksize for "DFID
369                        ": rc = %d\n", osd->od_svname, PFID(fid), rc);
370                 RETURN_EXIT;
371         }
372
373         down_write(&obj->oo_guard);
374         if (unlikely((1 << dn->dn_datablkshift) >= PAGE_SIZE))
375                 GOTO(out, rc = 1);
376
377         blksz = dn->dn_datablksz;
378         if (!is_power_of_2(blksz))
379                 blksz = size_roundup_power2(blksz);
380
381         if (blksz > osd->od_max_blksz)
382                 blksz = osd->od_max_blksz;
383         else if (blksz < PAGE_SIZE)
384                 blksz = PAGE_SIZE;
385         rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object, blksz, 0, tx);
386
387         GOTO(out, rc);
388
389 out:
390         up_write(&obj->oo_guard);
391         if (rc) {
392                 dmu_tx_abort(tx);
393                 if (unlikely(obj->oo_dn->dn_maxblkid > 0))
394                         rc = 1;
395                 if (rc < 0)
396                         CERROR("%s: fail to set blksize for "DFID": rc = %d\n",
397                                osd->od_svname, PFID(fid), rc);
398         } else {
399                 dmu_tx_commit(tx);
400                 CDEBUG(D_INODE, "%s: set blksize as %u for "DFID"\n",
401                        osd->od_svname, blksz, PFID(fid));
402         }
403 }
404
405 /*
406  * Concurrency: shouldn't matter.
407  */
408 static int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
409 {
410         struct osd_device       *osd = osd_obj2dev(obj);
411         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
412         int                      rc = 0;
413         ENTRY;
414
415         LASSERT(obj->oo_dn);
416
417         rc = osd_object_sa_init(obj, osd);
418         if (rc)
419                 RETURN(rc);
420
421         /* cache attrs in object */
422         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
423         if (rc)
424                 RETURN(rc);
425
426         if (likely(!fid_is_acct(fid))) {
427                 /* no body operations for accounting objects */
428                 obj->oo_dt.do_body_ops = &osd_body_ops;
429
430                 if (S_ISREG(obj->oo_attr.la_mode) &&
431                     obj->oo_dn->dn_maxblkid == 0 &&
432                     (1 << obj->oo_dn->dn_datablkshift) < PAGE_SIZE &&
433                     (fid_is_idif(fid) || fid_is_norm(fid) ||
434                      fid_is_echo(fid)) &&
435                     osd->od_is_ost && !osd->od_dt_dev.dd_rdonly)
436                         osd_obj_set_blksize(env, osd, obj);
437         }
438
439         /*
440          * initialize object before marking it existing
441          */
442         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
443
444         smp_mb();
445         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
446
447         RETURN(0);
448 }
449
450 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
451 {
452         struct osd_thread_info  *info = osd_oti_get(env);
453         struct lu_buf           buf;
454         int                     rc;
455         struct lustre_mdt_attrs *lma;
456         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
457         ENTRY;
458
459         BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
460         lma = (struct lustre_mdt_attrs *)info->oti_buf;
461         buf.lb_buf = lma;
462         buf.lb_len = sizeof(info->oti_buf);
463
464         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
465         if (rc > 0) {
466                 rc = 0;
467                 lustre_lma_swab(lma);
468                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
469                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
470                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
471                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
472                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
473                               PFID(rfid));
474                         rc = -EOPNOTSUPP;
475                 } else if (unlikely(!lu_fid_eq(rfid, &lma->lma_self_fid))) {
476                         CERROR("%s: FID-in-LMA "DFID" does not match the "
477                               "object self-fid "DFID"\n",
478                               osd_obj2dev(obj)->od_svname,
479                               PFID(&lma->lma_self_fid), PFID(rfid));
480                         rc = -EREMCHG;
481                 } else {
482                         struct osd_device *osd = osd_obj2dev(obj);
483
484                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
485                             osd->od_is_ost)
486                                 obj->oo_pfid_in_lma = 1;
487                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
488                             osd->od_remote_parent_dir != ZFS_NO_OBJECT)
489                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
490                 }
491         } else if (rc == -ENODATA) {
492                 /* haven't initialize LMA xattr */
493                 rc = 0;
494         }
495
496         RETURN(rc);
497 }
498
499 /**
500  * Helper function to retrieve DMU object id from fid for accounting object
501  */
502 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
503                                   const struct lu_fid *fid)
504 {
505         dnode_t *dn = NULL;
506
507         LASSERT(fid_is_acct(fid));
508
509         switch (fid_oid(fid)) {
510         case ACCT_USER_OID:
511                 dn = osd->od_userused_dn;
512                 break;
513         case ACCT_GROUP_OID:
514                 dn = osd->od_groupused_dn;
515                 break;
516 #ifdef ZFS_PROJINHERIT
517         case ACCT_PROJECT_OID:
518                 dn = osd->od_projectused_dn;
519                 break;
520 #endif
521         default:
522                 break;
523         }
524
525         return dn;
526 }
527
528 /*
529  * Concurrency: no concurrent access is possible that early in object
530  * life-cycle.
531  */
532 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
533                            const struct lu_object_conf *conf)
534 {
535         struct osd_object *obj = osd_obj(l);
536         struct osd_device *osd = osd_obj2dev(obj);
537         const struct lu_fid *fid = lu_object_fid(l);
538         struct lustre_scrub *scrub = &osd->od_scrub;
539         struct osd_thread_info *info = osd_oti_get(env);
540         struct luz_direntry *zde = &info->oti_zde;
541         struct osd_idmap_cache *idc;
542         char *name = info->oti_str;
543         uint64_t oid;
544         int rc = 0;
545         int rc1;
546         bool remote = false;
547         ENTRY;
548
549         LASSERT(osd_invariant(obj));
550
551         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
552                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
553                 l->lo_header->loh_attr |= LOHA_EXISTS;
554
555                 GOTO(out, rc = 0);
556         }
557
558         if (conf && conf->loc_flags & LOC_F_NEW)
559                 GOTO(out, rc = 0);
560
561         if (unlikely(fid_is_acct(fid))) {
562                 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
563                 if (obj->oo_dn) {
564                         obj->oo_dt.do_index_ops = &osd_acct_index_ops;
565                         l->lo_header->loh_attr |= LOHA_EXISTS;
566                 }
567
568                 GOTO(out, rc = 0);
569         }
570
571         idc = osd_idc_find(env, osd, fid);
572         if (idc && !idc->oic_remote && idc->oic_dnode != ZFS_NO_OBJECT) {
573                 oid = idc->oic_dnode;
574                 goto zget;
575         }
576
577         rc = -ENOENT;
578         if (!list_empty(&osd->od_scrub.os_inconsistent_items))
579                 rc = osd_oii_lookup(osd, fid, &oid);
580
581         if (rc)
582                 rc = osd_fid_lookup(env, osd, fid, &oid);
583
584         if (rc == -ENOENT) {
585                 if (likely(!(fid_is_norm(fid) || fid_is_igif(fid)) ||
586                            fid_is_on_ost(env, osd, fid) ||
587                            !zfs_test_bit(osd_oi_fid2idx(osd, fid),
588                                          scrub->os_file.sf_oi_bitmap)))
589                         GOTO(out, rc = 0);
590
591                 rc = -EREMCHG;
592                 goto trigger;
593         }
594
595         if (rc)
596                 GOTO(out, rc);
597
598 zget:
599         LASSERT(obj->oo_dn == NULL);
600
601         rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
602         /* EEXIST will be returned if object is being deleted in ZFS */
603         if (rc == -EEXIST)
604                 GOTO(out, rc = 0);
605
606         if (rc) {
607                 CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
608                        osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
609                 GOTO(out, rc);
610         }
611
612         rc = osd_object_init0(env, obj);
613         if (rc)
614                 GOTO(out, rc);
615
616         if (unlikely(obj->oo_header))
617                 GOTO(out, rc = 0);
618
619         rc = osd_check_lma(env, obj);
620         if (rc != -EREMCHG)
621                 GOTO(out, rc);
622
623         osd_scrub_refresh_mapping(env, osd, fid, oid, DTO_INDEX_DELETE, true,
624                                   NULL);
625
626 trigger:
627         /* We still have chance to get the valid dnode: for the object that is
628          * referenced by remote name entry, the object on the local MDT will be
629          * linked under the dir /REMOTE_PARENT_DIR with its FID string as name.
630          *
631          * During the OI scrub, if we cannot find the OI mapping, we may still
632          * have change to map the FID to local OID via lookup the dir
633          * /REMOTE_PARENT_DIR. */
634         if (!remote && !fid_is_on_ost(env, osd, fid)) {
635                 osd_fid2str(name, fid, sizeof(info->oti_str));
636                 rc = osd_zap_lookup(osd, osd->od_remote_parent_dir,
637                                     NULL, name, 8, 3, (void *)zde);
638                 if (!rc) {
639                         oid = zde->lzd_reg.zde_dnode;
640                         osd_dnode_rele(obj->oo_dn);
641                         obj->oo_dn = NULL;
642                         remote = true;
643                         goto zget;
644                 }
645         }
646
647         /* The case someone triggered the OI scrub already. */
648         if (scrub->os_running) {
649                 if (!rc) {
650                         LASSERT(remote);
651
652                         lu_object_set_agent_entry(l);
653                         osd_oii_insert(env, osd, fid, oid, false);
654                 } else {
655                         rc = -EINPROGRESS;
656                 }
657
658                 GOTO(out, rc);
659         }
660
661         /* The case NOT allow to trigger OI scrub automatically. */
662         if (osd->od_scrub.os_auto_scrub_interval == AS_NEVER)
663                 GOTO(out, rc);
664
665         /* It is me to trigger the OI scrub. */
666         rc1 = osd_scrub_start(env, osd, SS_CLEAR_DRYRUN |
667                               SS_CLEAR_FAILOUT | SS_AUTO_FULL);
668         CDEBUG_LIMIT(D_LFSCK | D_CONSOLE | D_WARNING,
669                      "%s: trigger OI scrub by RPC for "DFID"/%#llx: rc = %d\n",
670                      osd_name(osd), PFID(fid), oid, rc1);
671         if (!rc) {
672                 LASSERT(remote);
673
674                 lu_object_set_agent_entry(l);
675                 if (!rc1)
676                         osd_oii_insert(env, osd, fid, oid, false);
677         } else {
678                 if (!rc1)
679                         rc = -EINPROGRESS;
680                 else
681                         rc = -EREMCHG;
682         }
683
684         GOTO(out, rc);
685
686 out:
687         RETURN(rc);
688 }
689
690 /*
691  * Concurrency: no concurrent access is possible that late in object
692  * life-cycle.
693  */
694 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
695 {
696         struct osd_object *obj = osd_obj(l);
697         struct lu_object_header *h = obj->oo_header;
698
699         LASSERT(osd_invariant(obj));
700
701         dt_object_fini(&obj->oo_dt);
702         /* obj doesn't contain an lu_object_header, so we don't need call_rcu */
703         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
704         if (unlikely(h))
705                 lu_object_header_free(h);
706 }
707
708 static int
709 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
710 {
711         int rc = -EBUSY;
712
713         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
714
715         /* the object is supposed to be exclusively locked by
716          * the caller (osd_destroy()), while the transaction
717          * (oh) is per-thread and not shared */
718         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
719                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
720                 rc = 0;
721         }
722
723         return rc;
724 }
725
726 /* Default to max data size covered by a level-1 indirect block */
727 static unsigned long osd_sync_destroy_max_size =
728         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
729 module_param(osd_sync_destroy_max_size, ulong, 0444);
730 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
731
732 static inline void
733 osd_object_set_destroy_type(struct osd_object *obj)
734 {
735         /*
736          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
737          * only once and use it consistently thereafter.
738          */
739         down_write(&obj->oo_guard);
740         if (obj->oo_destroy == OSD_DESTROY_NONE) {
741                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
742                         obj->oo_destroy = OSD_DESTROY_SYNC;
743                 else /* Larger objects are destroyed asynchronously */
744                         obj->oo_destroy = OSD_DESTROY_ASYNC;
745         }
746         up_write(&obj->oo_guard);
747 }
748
749 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
750                                struct thandle *th)
751 {
752         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
753         struct osd_object       *obj = osd_dt_obj(dt);
754         struct osd_device       *osd = osd_obj2dev(obj);
755         struct osd_thandle      *oh;
756         dnode_t *dn;
757         int                      rc;
758         uint64_t                 zapid;
759         ENTRY;
760
761         LASSERT(th != NULL);
762         LASSERT(dt_object_exists(dt));
763
764         oh = container_of(th, struct osd_thandle, ot_super);
765         LASSERT(oh->ot_tx != NULL);
766
767         dmu_tx_mark_netfree(oh->ot_tx);
768
769         /* declare that we'll remove object from fid-dnode mapping */
770         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
771         osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
772
773         osd_declare_xattrs_destroy(env, obj, oh);
774
775         /* one less inode */
776         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
777                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
778                                -1, oh, NULL, OSD_QID_INODE);
779         if (rc)
780                 RETURN(rc);
781
782         /* data to be truncated */
783         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
784                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
785                                0, oh, NULL, OSD_QID_BLK);
786         if (rc)
787                 RETURN(rc);
788
789         osd_object_set_destroy_type(obj);
790         if (obj->oo_destroy == OSD_DESTROY_SYNC)
791                 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
792                                  0, DMU_OBJECT_END);
793         else
794                 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
795                                 osd->od_unlinked, TRUE, NULL);
796
797         /* remove agent entry (if have) from remote parent */
798         if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
799                 osd_tx_hold_zap(oh->ot_tx, osd->od_remote_parent_dir,
800                                 NULL, FALSE, NULL);
801
802         /* will help to find FID->ino when this object is being
803          * added to PENDING/ */
804         osd_idc_find_and_init(env, osd, obj);
805
806         RETURN(0);
807 }
808
809 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
810                        struct thandle *th)
811 {
812         struct osd_thread_info  *info = osd_oti_get(env);
813         char                    *buf = info->oti_str;
814         struct osd_object       *obj = osd_dt_obj(dt);
815         struct osd_device       *osd = osd_obj2dev(obj);
816         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
817         struct osd_thandle      *oh;
818         int                      rc;
819         uint64_t                 oid, zapid;
820         dnode_t *zdn;
821         ENTRY;
822
823         down_write(&obj->oo_guard);
824
825         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
826                 GOTO(out, rc = -ENOENT);
827
828         LASSERT(obj->oo_dn != NULL);
829
830         oh = container_of(th, struct osd_thandle, ot_super);
831         LASSERT(oh != NULL);
832         LASSERT(oh->ot_tx != NULL);
833
834         /* remove obj ref from index dir (it depends) */
835         zapid = osd_get_name_n_idx(env, osd, fid, buf,
836                                    sizeof(info->oti_str), &zdn);
837         rc = osd_xattrs_destroy(env, obj, oh);
838         if (rc) {
839                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
840                        osd->od_svname, buf, rc);
841                 GOTO(out, rc);
842         }
843
844         if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
845                 rc = osd_delete_from_remote_parent(env, osd, obj, oh, true);
846                 if (rc)
847                         GOTO(out, rc);
848         }
849
850         oid = obj->oo_dn->dn_object;
851         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
852                 /* this may happen if the destroy wasn't declared
853                  * e.g. when the object is created and then destroyed
854                  * in the same transaction - we don't need additional
855                  * space for destroy specifically */
856                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
857                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
858                 if (rc)
859                         CERROR("%s: failed to free %s/%#llx: rc = %d\n",
860                                osd->od_svname, buf, oid, rc);
861         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
862                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
863                 if (rc)
864                         CERROR("%s: failed to free %s/%#llx: rc = %d\n",
865                                osd->od_svname, buf, oid, rc);
866         } else { /* asynchronous destroy */
867                 char *key = info->oti_key;
868
869                 rc = osd_object_unlinked_add(obj, oh);
870                 if (rc)
871                         GOTO(out, rc);
872
873                 snprintf(key, sizeof(info->oti_key), "%llx", oid);
874                 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
875                                  osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
876                 if (rc)
877                         CERROR("%s: zap_add_int() failed %s/%#llx: rc = %d\n",
878                                osd->od_svname, buf, oid, rc);
879         }
880
881         /* Remove the OI mapping after the destroy to handle the race with
882          * OI scrub that may insert missed OI mapping during the interval. */
883         rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
884         if (unlikely(rc == -ENOENT))
885                 rc = 0;
886         if (rc)
887                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
888                        osd->od_svname, buf, rc);
889
890         GOTO(out, rc);
891
892 out:
893         /* not needed in the cache anymore */
894         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
895         if (rc == 0)
896                 obj->oo_destroyed = 1;
897         up_write(&obj->oo_guard);
898         RETURN (0);
899 }
900
901 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
902 {
903         struct osd_object *obj = osd_obj(l);
904         const struct lu_fid *fid = lu_object_fid(l);
905
906         if (obj->oo_dn) {
907                 if (likely(!fid_is_acct(fid))) {
908                         osd_object_sa_fini(obj);
909                         if (obj->oo_sa_xattr) {
910                                 nvlist_free(obj->oo_sa_xattr);
911                                 obj->oo_sa_xattr = NULL;
912                         }
913                         osd_dnode_rele(obj->oo_dn);
914                         list_del(&obj->oo_sa_linkage);
915                 }
916                 obj->oo_dn = NULL;
917         }
918 }
919
920 /*
921  * Concurrency: ->loo_object_release() is called under site spin-lock.
922  */
923 static void osd_object_release(const struct lu_env *env,
924                                struct lu_object *l)
925 {
926 }
927
928 /*
929  * Concurrency: shouldn't matter.
930  */
931 static int osd_object_print(const struct lu_env *env, void *cookie,
932                             lu_printer_t p, const struct lu_object *l)
933 {
934         struct osd_object *o = osd_obj(l);
935
936         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
937 }
938
939 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
940                           unsigned role)
941 {
942         struct osd_object *obj = osd_dt_obj(dt);
943
944         LASSERT(osd_invariant(obj));
945
946         down_read_nested(&obj->oo_sem, role);
947 }
948
949 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
950                            unsigned role)
951 {
952         struct osd_object *obj = osd_dt_obj(dt);
953
954         LASSERT(osd_invariant(obj));
955
956         down_write_nested(&obj->oo_sem, role);
957 }
958
959 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
960 {
961         struct osd_object *obj = osd_dt_obj(dt);
962
963         LASSERT(osd_invariant(obj));
964         up_read(&obj->oo_sem);
965 }
966
967 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
968 {
969         struct osd_object *obj = osd_dt_obj(dt);
970
971         LASSERT(osd_invariant(obj));
972         up_write(&obj->oo_sem);
973 }
974
975 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
976 {
977         struct osd_object *obj = osd_dt_obj(dt);
978         int rc = 1;
979
980         LASSERT(osd_invariant(obj));
981
982         if (down_write_trylock(&obj->oo_sem)) {
983                 rc = 0;
984                 up_write(&obj->oo_sem);
985         }
986         return rc;
987 }
988
989 static int osd_attr_get(const struct lu_env *env, struct dt_object *dt,
990                         struct lu_attr *attr)
991 {
992         struct osd_object *obj = osd_dt_obj(dt);
993         struct osd_device *osd = osd_obj2dev(obj);
994         uint64_t blocks;
995         uint32_t blksize;
996         int rc = 0;
997
998         down_read(&obj->oo_guard);
999
1000         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1001                 GOTO(out, rc = -ENOENT);
1002
1003         if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
1004                 GOTO(out, rc = 0);
1005
1006         LASSERT(osd_invariant(obj));
1007         LASSERT(obj->oo_dn);
1008
1009         read_lock(&obj->oo_attr_lock);
1010         *attr = obj->oo_attr;
1011         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL) {
1012                 attr->la_valid |= LA_FLAGS;
1013                 attr->la_flags |= LUSTRE_ORPHAN_FL;
1014         }
1015         if (obj->oo_lma_flags & LUSTRE_ENCRYPT_FL) {
1016                 attr->la_valid |= LA_FLAGS;
1017                 attr->la_flags |= LUSTRE_ENCRYPT_FL;
1018         }
1019         read_unlock(&obj->oo_attr_lock);
1020         if (attr->la_valid & LA_FLAGS && attr->la_flags & LUSTRE_ORPHAN_FL)
1021                 CDEBUG(D_INFO, "%s: set orphan flag on "DFID" (%#llx/%#x)\n",
1022                        osd_obj2dev(obj)->od_svname,
1023                        PFID(lu_object_fid(&dt->do_lu)),
1024                        attr->la_valid, obj->oo_lma_flags);
1025
1026         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
1027          * from within sa_object_size() can block on a mutex, so
1028          * we can't call sa_object_size() holding rwlock */
1029         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
1030         /* we do not control size of indices, so always calculate
1031          * it from number of blocks reported by DMU */
1032         if (S_ISDIR(attr->la_mode)) {
1033                 attr->la_size = 512 * blocks;
1034                 rc = -zap_count(osd->od_os, obj->oo_dn->dn_object,
1035                                 &attr->la_dirent_count);
1036         }
1037         /* Block size may be not set; suggest maximal I/O transfers. */
1038         if (blksize == 0)
1039                 blksize = osd_spa_maxblocksize(
1040                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
1041
1042         attr->la_blksize = blksize;
1043         attr->la_blocks = blocks;
1044         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
1045
1046 out:
1047         up_read(&obj->oo_guard);
1048         return rc;
1049 }
1050
1051 #ifdef ZFS_PROJINHERIT
1052 /*
1053  * For the existed object that is upgraded from old system, its ondisk layout
1054  * has no slot for the project ID attribute. But quota accounting logic needs
1055  * to access related slots by offset directly. So we need to adjust these old
1056  * objects' layout to make the project ID to some unified and fixed offset.
1057  */
1058 static int osd_add_projid(const struct lu_env *env, struct osd_object *obj,
1059                           struct osd_thandle *oh, uint64_t projid)
1060 {
1061         sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1062         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1063         struct osd_device *osd = osd_obj2dev(obj);
1064         uint64_t gen;
1065         size_t sa_size;
1066         char *dxattr = NULL;
1067         int rc, cnt;
1068
1069         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(osd), &osa->projid, 8);
1070         if (unlikely(rc == 0))
1071                 rc = -EEXIST;
1072         if (rc != -ENOENT)
1073                 GOTO(out, rc);
1074
1075         gen = dmu_tx_get_txg(oh->ot_tx);
1076         osa->atime[0] = obj->oo_attr.la_atime;
1077         osa->ctime[0] = obj->oo_attr.la_ctime;
1078         osa->mtime[0] = obj->oo_attr.la_mtime;
1079         osa->btime[0] = obj->oo_attr.la_btime;
1080         osa->mode = obj->oo_attr.la_mode;
1081         osa->uid = obj->oo_attr.la_uid;
1082         osa->gid = obj->oo_attr.la_gid;
1083         osa->rdev = obj->oo_attr.la_rdev;
1084         osa->nlink = obj->oo_attr.la_nlink;
1085         osa->flags = attrs_fs2zfs(obj->oo_attr.la_flags) | ZFS_PROJID;
1086         osa->size  = obj->oo_attr.la_size;
1087         osa->projid = projid;
1088
1089         cnt = 0;
1090         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1091         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1092         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1093         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1094         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1095         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL,
1096                          &obj->oo_parent, 8);
1097         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1098         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1099         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1100         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1101         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, osa->btime, 16);
1102         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1103         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL, &osa->projid, 8);
1104         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1105         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1106
1107         if (obj->oo_sa_xattr == NULL) {
1108                 rc = __osd_xattr_load(osd, obj->oo_sa_hdl, &obj->oo_sa_xattr);
1109                 if (rc)
1110                         GOTO(out, rc);
1111         }
1112
1113         if (obj->oo_sa_xattr) {
1114                 rc = -nvlist_size(obj->oo_sa_xattr, &sa_size, NV_ENCODE_XDR);
1115                 if (rc)
1116                         GOTO(out, rc);
1117
1118                 dxattr = osd_zio_buf_alloc(sa_size);
1119                 if (dxattr == NULL)
1120                         GOTO(out, rc = -ENOMEM);
1121
1122                 rc = -nvlist_pack(obj->oo_sa_xattr, &dxattr, &sa_size,
1123                                 NV_ENCODE_XDR, KM_SLEEP);
1124                 if (rc)
1125                         GOTO(out, rc);
1126
1127                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1128                                 NULL, dxattr, sa_size);
1129         }
1130
1131         rc = -sa_replace_all_by_template(obj->oo_sa_hdl, bulk, cnt, oh->ot_tx);
1132 out:
1133         if (dxattr)
1134                 osd_zio_buf_free(dxattr, sa_size);
1135         return rc;
1136 }
1137 #endif
1138
1139 static int osd_declare_attr_set(const struct lu_env *env,
1140                                 struct dt_object *dt,
1141                                 const struct lu_attr *attr,
1142                                 struct thandle *handle)
1143 {
1144         struct osd_thread_info  *info = osd_oti_get(env);
1145         struct osd_object       *obj = osd_dt_obj(dt);
1146         struct osd_device       *osd = osd_obj2dev(obj);
1147         dmu_tx_hold_t           *txh;
1148         struct osd_thandle      *oh;
1149         uint64_t                 bspace;
1150         uint32_t                 blksize;
1151         int                      rc = 0;
1152         bool                     found;
1153         ENTRY;
1154
1155
1156         LASSERT(handle != NULL);
1157         LASSERT(osd_invariant(obj));
1158
1159         oh = container_of(handle, struct osd_thandle, ot_super);
1160
1161         down_read(&obj->oo_guard);
1162         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1163                 GOTO(out_sem, rc = 0);
1164
1165         LASSERT(obj->oo_sa_hdl != NULL);
1166         LASSERT(oh->ot_tx != NULL);
1167         /* regular attributes are part of the bonus buffer */
1168         /* let's check whether this object is already part of
1169          * transaction.. */
1170         found = false;
1171         for (txh = list_head(&oh->ot_tx->tx_holds); txh;
1172              txh = list_next(&oh->ot_tx->tx_holds, txh)) {
1173                 if (txh->txh_dnode == NULL)
1174                         continue;
1175                 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
1176                         continue;
1177                 /* this object is part of the transaction already
1178                  * we don't need to declare bonus again */
1179                 found = true;
1180                 break;
1181         }
1182         if (!found)
1183                 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
1184         if (oh->ot_tx->tx_err != 0)
1185                 GOTO(out_sem, rc = -oh->ot_tx->tx_err);
1186
1187         if (attr && attr->la_valid & LA_FLAGS) {
1188                 /* LMA is usually a part of bonus, no need to declare
1189                  * anything else */
1190         }
1191
1192         if (attr && (attr->la_valid & (LA_UID | LA_GID | LA_PROJID))) {
1193                 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
1194                 bspace = toqb(bspace * 512);
1195
1196                 CDEBUG(D_QUOTA,
1197                        "%s: enforce quota on UID %u, GID %u, the quota space is %lld (%u)\n",
1198                        osd->od_svname,
1199                        attr->la_uid, attr->la_gid, bspace, blksize);
1200         }
1201         /* to preserve locking order - qsd_transfer() may need to flush
1202          * currently running transaction when we're out of quota.
1203          */
1204         up_read(&obj->oo_guard);
1205
1206         /* quota enforcement for user */
1207         if (attr && attr->la_valid & LA_UID &&
1208             attr->la_uid != obj->oo_attr.la_uid) {
1209                 rc = qsd_transfer(env, osd_def_qsd(osd),
1210                                   &oh->ot_quota_trans, USRQUOTA,
1211                                   obj->oo_attr.la_uid, attr->la_uid,
1212                                   bspace, &info->oti_qi);
1213                 if (rc)
1214                         GOTO(out, rc);
1215         }
1216
1217         /* quota enforcement for group */
1218         if (attr && attr->la_valid & LA_GID &&
1219             attr->la_gid != obj->oo_attr.la_gid) {
1220                 rc = qsd_transfer(env, osd_def_qsd(osd),
1221                                   &oh->ot_quota_trans, GRPQUOTA,
1222                                   obj->oo_attr.la_gid, attr->la_gid,
1223                                   bspace, &info->oti_qi);
1224                 if (rc)
1225                         GOTO(out, rc);
1226         }
1227 #ifdef ZFS_PROJINHERIT
1228         /* quota enforcement for project */
1229         if (attr && attr->la_valid & LA_PROJID &&
1230             attr->la_projid != obj->oo_attr.la_projid) {
1231                 if (!osd->od_projectused_dn)
1232                         GOTO(out, rc = -EOPNOTSUPP);
1233
1234                 if (!zpl_is_valid_projid(attr->la_projid))
1235                         GOTO(out, rc = -EINVAL);
1236
1237                 rc = qsd_transfer(env, osd_def_qsd(osd),
1238                                   &oh->ot_quota_trans, PRJQUOTA,
1239                                   obj->oo_attr.la_projid,
1240                                   attr->la_projid, bspace,
1241                                   &info->oti_qi);
1242                 if (rc)
1243                         GOTO(out, rc);
1244         }
1245 #endif
1246 out:
1247         RETURN(rc);
1248 out_sem:
1249         up_read(&obj->oo_guard);
1250         RETURN(rc);
1251 }
1252
1253 /*
1254  * Set the attributes of an object
1255  *
1256  * The transaction passed to this routine must have
1257  * dmu_tx_hold_bonus(tx, oid) called and then assigned
1258  * to a transaction group.
1259  */
1260 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
1261                         const struct lu_attr *la, struct thandle *handle)
1262 {
1263         struct osd_thread_info  *info = osd_oti_get(env);
1264         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
1265         struct osd_object       *obj = osd_dt_obj(dt);
1266         struct osd_device       *osd = osd_obj2dev(obj);
1267         struct osd_thandle      *oh;
1268         struct osa_attr         *osa = &info->oti_osa;
1269         __u64                    valid = la->la_valid;
1270         int                      cnt;
1271         int                      rc = 0;
1272
1273         ENTRY;
1274
1275         down_read(&obj->oo_guard);
1276         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1277                 GOTO(out, rc = -ENOENT);
1278
1279         LASSERT(handle != NULL);
1280         LASSERT(osd_invariant(obj));
1281         LASSERT(obj->oo_sa_hdl);
1282
1283         oh = container_of(handle, struct osd_thandle, ot_super);
1284         /* Assert that the transaction has been assigned to a
1285            transaction group. */
1286         LASSERT(oh->ot_tx->tx_txg != 0);
1287
1288         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) && !osd->od_is_ost) {
1289                 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1290                 char *buf = info->oti_str;
1291                 dnode_t *zdn = NULL;
1292                 uint64_t zapid;
1293
1294                 zapid = osd_get_name_n_idx(env, osd, lu_object_fid(&dt->do_lu),
1295                                            buf, sizeof(info->oti_str), &zdn);
1296                 rc = osd_zap_lookup(osd, zapid, zdn, buf, 8,
1297                                     sizeof(*zde) / 8, zde);
1298                 if (!rc) {
1299                         zde->zde_dnode -= 1;
1300                         rc = -zap_update(osd->od_os, zapid, buf, 8,
1301                                          sizeof(*zde) / 8, zde, oh->ot_tx);
1302                 }
1303                 if (rc > 0)
1304                         rc = 0;
1305                 GOTO(out, rc);
1306         }
1307
1308         /* Only allow set size for regular file */
1309         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
1310                 valid &= ~(LA_SIZE | LA_BLOCKS);
1311
1312         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
1313                 valid &= ~LA_CTIME;
1314
1315         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
1316                 valid &= ~LA_MTIME;
1317
1318         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
1319                 valid &= ~LA_ATIME;
1320
1321         if (valid == 0)
1322                 GOTO(out, rc = 0);
1323
1324         if (valid & LA_FLAGS) {
1325                 struct lustre_mdt_attrs *lma;
1326                 struct lu_buf buf;
1327                 int size = 0;
1328
1329                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
1330                         LASSERT(!obj->oo_pfid_in_lma);
1331                         BUILD_BUG_ON(sizeof(info->oti_buf) < sizeof(*lma));
1332                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
1333                         buf.lb_buf = lma;
1334                         buf.lb_len = sizeof(info->oti_buf);
1335
1336                         /* Please do NOT call osd_xattr_get() directly, that
1337                          * will cause recursive down_read() on oo_guard. */
1338                         rc = osd_xattr_get_internal(env, obj, &buf,
1339                                                     XATTR_NAME_LMA, &size);
1340                         if (!rc && unlikely(size < sizeof(*lma))) {
1341                                 rc = -EINVAL;
1342                         } else if (!rc) {
1343                                 lma->lma_incompat =
1344                                         le32_to_cpu(lma->lma_incompat);
1345                                 lma->lma_incompat |=
1346                                         lustre_to_lma_flags(la->la_flags);
1347                                 lma->lma_incompat =
1348                                         cpu_to_le32(lma->lma_incompat);
1349                                 buf.lb_buf = lma;
1350                                 buf.lb_len = sizeof(*lma);
1351                                 rc = osd_xattr_set_internal(env, obj, &buf,
1352                                                             XATTR_NAME_LMA,
1353                                                             LU_XATTR_REPLACE,
1354                                                             oh);
1355                         }
1356                         if (rc < 0) {
1357                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
1358                                        osd->od_svname, rc);
1359                                 GOTO(out, rc);
1360                         } else {
1361                                 obj->oo_lma_flags =
1362                                         la->la_flags & LUSTRE_LMA_FL_MASKS;
1363                         }
1364                 }
1365         }
1366
1367         write_lock(&obj->oo_attr_lock);
1368         cnt = 0;
1369
1370         if (valid & LA_PROJID) {
1371 #ifdef ZFS_PROJINHERIT
1372                 if (osd->od_projectused_dn) {
1373                         if (obj->oo_with_projid) {
1374                                 osa->projid  = la->la_projid;
1375                                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd),
1376                                                  NULL, &osa->projid, 8);
1377                         } else {
1378                                 rc = osd_add_projid(env, obj, oh,
1379                                                     la->la_projid);
1380                                 if (unlikely(rc == -EEXIST)) {
1381                                         rc = 0;
1382                                 } else if (rc != 0) {
1383                                         write_unlock(&obj->oo_attr_lock);
1384                                         GOTO(out, rc);
1385                                 }
1386                                 obj->oo_with_projid = 1;
1387                         }
1388                         obj->oo_attr.la_projid = la->la_projid;
1389                 } else
1390 #endif
1391                         valid &= ~LA_PROJID;
1392         }
1393
1394         if (valid & LA_ATIME) {
1395                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1396                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1397                                  osa->atime, 16);
1398         }
1399         if (valid & LA_MTIME) {
1400                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1401                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1402                                  osa->mtime, 16);
1403         }
1404         if (valid & LA_CTIME) {
1405                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1406                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1407                                  osa->ctime, 16);
1408         }
1409         if (valid & LA_MODE) {
1410                 /* mode is stored along with type, so read it first */
1411                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1412                         (la->la_mode & ~S_IFMT);
1413                 osa->mode = obj->oo_attr.la_mode;
1414                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1415                                  &osa->mode, 8);
1416         }
1417         if (valid & LA_SIZE) {
1418                 osa->size = obj->oo_attr.la_size = la->la_size;
1419                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1420                                  &osa->size, 8);
1421         }
1422         if (valid & LA_NLINK) {
1423                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1424                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1425                                  &osa->nlink, 8);
1426         }
1427         if (valid & LA_RDEV) {
1428                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1429                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1430                                  &osa->rdev, 8);
1431         }
1432         if (valid & LA_FLAGS) {
1433                 osa->flags = attrs_fs2zfs(la->la_flags);
1434                 /* many flags are not supported by zfs, so ensure a good cached
1435                  * copy */
1436                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1437 #ifdef ZFS_PROJINHERIT
1438                 if (obj->oo_with_projid && osd->od_projectused_dn)
1439                         osa->flags |= ZFS_PROJID;
1440 #endif
1441                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1442                                  &osa->flags, 8);
1443         }
1444         if (valid & LA_UID) {
1445                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1446                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1447                                  &osa->uid, 8);
1448         }
1449         if (valid & LA_GID) {
1450                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1451                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1452                                  &osa->gid, 8);
1453         }
1454         obj->oo_attr.la_valid |= valid;
1455         write_unlock(&obj->oo_attr_lock);
1456
1457         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1458         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1459
1460 out:
1461         up_read(&obj->oo_guard);
1462         RETURN(rc);
1463 }
1464
1465 /*
1466  * Object creation.
1467  *
1468  * XXX temporary solution.
1469  */
1470
1471 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1472                         struct dt_object *parent, struct dt_object *child,
1473                         umode_t child_mode)
1474 {
1475         LASSERT(ah);
1476
1477         ah->dah_parent = parent;
1478         ah->dah_mode = child_mode;
1479
1480         if (parent != NULL && !dt_object_remote(parent)) {
1481                 /* will help to find FID->ino at dt_insert("..") */
1482                 struct osd_object *pobj = osd_dt_obj(parent);
1483
1484                 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1485         }
1486 }
1487
1488 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1489                               struct lu_attr *attr,
1490                               struct dt_allocation_hint *hint,
1491                               struct dt_object_format *dof,
1492                               struct thandle *handle)
1493 {
1494         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1495         struct osd_object       *obj = osd_dt_obj(dt);
1496         struct osd_device       *osd = osd_obj2dev(obj);
1497         struct osd_thandle      *oh;
1498         uint64_t                 zapid;
1499         dnode_t                 *dn;
1500         int                      rc, dnode_size;
1501         ENTRY;
1502
1503         LASSERT(dof);
1504
1505         switch (dof->dof_type) {
1506                 case DFT_REGULAR:
1507                 case DFT_SYM:
1508                 case DFT_NODE:
1509                         if (obj->oo_dt.do_body_ops == NULL)
1510                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1511                         break;
1512                 default:
1513                         break;
1514         }
1515
1516         LASSERT(handle != NULL);
1517         oh = container_of(handle, struct osd_thandle, ot_super);
1518         LASSERT(oh->ot_tx != NULL);
1519
1520         /* this is the minimum set of EAs on every Lustre object */
1521         obj->oo_ea_in_bonus = OSD_BASE_EA_IN_BONUS;
1522         /* reserve 32 bytes for extra stuff like ACLs */
1523         dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1524
1525         switch (dof->dof_type) {
1526                 case DFT_DIR:
1527                         dt->do_index_ops = &osd_dir_ops;
1528                         fallthrough;
1529                 case DFT_INDEX:
1530                         /* for zap create */
1531                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1532                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1533                         break;
1534                 case DFT_REGULAR:
1535                 case DFT_SYM:
1536                 case DFT_NODE:
1537                         /* first, we'll create new object */
1538                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1539                         break;
1540
1541                 default:
1542                         LBUG();
1543                         break;
1544         }
1545
1546         /* and we'll add it to some mapping */
1547         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1548         osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1549
1550         /* will help to find FID->ino mapping at dt_insert() */
1551         osd_idc_find_and_init(env, osd, obj);
1552
1553         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid,
1554                                attr->la_projid, 1, oh, NULL, OSD_QID_INODE);
1555
1556         RETURN(rc);
1557 }
1558
1559 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1560                     struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
1561                     struct lu_attr *la, uint64_t parent,
1562                     nvlist_t *xattr)
1563 {
1564         sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1565         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1566         uint64_t gen;
1567         inode_timespec_t now;
1568         int cnt;
1569         int rc;
1570         char *dxattr = NULL;
1571         size_t sa_size;
1572
1573
1574         LASSERT(sa_hdl);
1575
1576         gen = dmu_tx_get_txg(tx);
1577         gethrestime(&now);
1578         ZFS_TIME_ENCODE(&now, osa->btime);
1579
1580         osa->atime[0] = la->la_atime;
1581         osa->ctime[0] = la->la_ctime;
1582         osa->mtime[0] = la->la_mtime;
1583         osa->mode = la->la_mode;
1584         osa->uid = la->la_uid;
1585         osa->gid = la->la_gid;
1586         osa->rdev = la->la_rdev;
1587         osa->nlink = la->la_nlink;
1588         if (la->la_valid & LA_FLAGS)
1589                 osa->flags = attrs_fs2zfs(la->la_flags);
1590         else
1591                 osa->flags = 0;
1592         osa->size  = la->la_size;
1593 #ifdef ZFS_PROJINHERIT
1594         if (osd->od_projectused_dn) {
1595                 if (la->la_valid & LA_PROJID)
1596                         osa->projid = la->la_projid;
1597                 else
1598                         osa->projid = ZFS_DEFAULT_PROJID;
1599                 osa->flags |= ZFS_PROJID;
1600                 if (obj)
1601                         obj->oo_with_projid = 1;
1602         } else {
1603                 osa->flags &= ~ZFS_PROJID;
1604         }
1605 #endif
1606
1607         /*
1608          * we need to create all SA below upon object create.
1609          *
1610          * XXX The attribute order matters since the accounting callback relies
1611          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1612          * look up the UID/GID/PROJID attributes. Moreover, the callback does
1613          * not seem to support the spill block.
1614          * We define attributes in the same order as SA_*_OFFSET in order to
1615          * work around the problem. See ORI-610.
1616          */
1617         cnt = 0;
1618         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1619         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1620         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1621         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1622         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1623         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1624         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1625         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1626         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1627         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1628         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, osa->btime, 16);
1629         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1630 #ifdef ZFS_PROJINHERIT
1631         if (osd->od_projectused_dn)
1632                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1633                                  &osa->projid, 8);
1634 #endif
1635         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1636         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1637
1638         if (xattr) {
1639                 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1640                 LASSERT(rc == 0);
1641
1642                 dxattr = osd_zio_buf_alloc(sa_size);
1643                 LASSERT(dxattr);
1644
1645                 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1646                                 NV_ENCODE_XDR, KM_SLEEP);
1647                 LASSERT(rc == 0);
1648
1649                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1650                                 NULL, dxattr, sa_size);
1651         }
1652
1653         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1654         if (dxattr)
1655                 osd_zio_buf_free(dxattr, sa_size);
1656
1657         return rc;
1658 }
1659
1660 int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1661                        uint64_t oid, dnode_t **dnp)
1662 {
1663         dmu_tx_hold_t *txh;
1664         int rc = 0;
1665
1666         /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1667         for (txh = list_tail(&tx->tx_holds); txh;
1668              txh = list_prev(&tx->tx_holds, txh)) {
1669                 dnode_t *dn = txh->txh_dnode;
1670                 dmu_buf_impl_t *db;
1671
1672                 if (dn == NULL)
1673                         continue;
1674                 if (dn->dn_object != oid)
1675                         continue;
1676                 db = dn->dn_bonus;
1677                 if (db == NULL) {
1678                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1679                         if (dn->dn_bonus == NULL)
1680                                 dbuf_create_bonus(dn);
1681                         rw_exit(&dn->dn_struct_rwlock);
1682                 }
1683                 db = dn->dn_bonus;
1684                 LASSERT(db);
1685                 LASSERT(dn->dn_handle);
1686                 DB_DNODE_ENTER(db);
1687                 if (zfs_refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1688                         zfs_refcount_add(&dn->dn_holds, osd_obj_tag);
1689                         atomic_inc_32(&dn->dn_dbufs_count);
1690                 }
1691                 *dnp = dn;
1692                 DB_DNODE_EXIT(db);
1693                 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1694                 break;
1695         }
1696
1697         if (unlikely(*dnp == NULL))
1698                 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1699
1700         return rc;
1701 }
1702
1703 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1704 int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
1705 {
1706         int dnsize;
1707
1708         if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1709                 dnsize = DNODE_MIN_SIZE;
1710                 do {
1711                         if (DN_BONUS_SIZE(dnsize) >= ea_in_bonus + 32)
1712                                 break;
1713                         dnsize <<= 1;
1714                 } while (dnsize < DNODE_MAX_SIZE);
1715                 if (dnsize > DNODE_MAX_SIZE)
1716                         dnsize = DNODE_MAX_SIZE;
1717         } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1718                 dnsize = 1024;
1719         } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1720                 dnsize = 2048;
1721         } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1722                 dnsize = 4096;
1723         } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1724                 dnsize = 8192;
1725         } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1726                 dnsize = 16384;
1727         } else {
1728                 dnsize = DNODE_MIN_SIZE;
1729         }
1730         return dnsize;
1731 }
1732 #endif
1733
1734 /*
1735  * The transaction passed to this routine must have
1736  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1737  * to a transaction group.
1738  */
1739 int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
1740                         struct osd_object *obj, const struct lu_fid *fid,
1741                         dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1742 {
1743         dmu_object_type_t type = DMU_OT_PLAIN_FILE_CONTENTS;
1744         uint64_t oid;
1745         int size;
1746
1747         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1748          * would get an additional ditto copy */
1749         if (unlikely(S_ISREG(la->la_mode) &&
1750                      fid_seq_is_local_file(fid_seq(fid))))
1751                 type = DMU_OTN_UINT8_METADATA;
1752
1753         /* Create a new DMU object using the default dnode size. */
1754         if (obj)
1755                 size = obj->oo_ea_in_bonus;
1756         else
1757                 size = OSD_BASE_EA_IN_BONUS;
1758         oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1759                                    osd_find_dnsize(osd, size), tx);
1760
1761         LASSERT(la->la_valid & LA_MODE);
1762         la->la_size = 0;
1763         la->la_nlink = 1;
1764
1765         return osd_find_new_dnode(env, tx, oid, dnp);
1766 }
1767
1768 /*
1769  * The transaction passed to this routine must have
1770  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1771  * to a transaction group.
1772  *
1773  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1774  * This is fine for directories today, because storing the FID in the dirent
1775  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1776  * then we might need to re-evaluate the use of this flag and instead do
1777  * a conversion from the different internal ZAP hash formats being used. */
1778 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1779                      dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1780                      unsigned dnsize, zap_flags_t flags)
1781 {
1782         uint64_t oid;
1783
1784         /* Assert that the transaction has been assigned to a
1785            transaction group. */
1786         LASSERT(tx->tx_txg != 0);
1787         *dnp = NULL;
1788
1789         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1790                                    DMU_OT_DIRECTORY_CONTENTS,
1791                                    14, /* == ZFS fzap_default_blockshift */
1792                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1793                                    dnsize, tx);
1794
1795         la->la_size = 2;
1796         la->la_nlink = 1;
1797
1798         return osd_find_new_dnode(env, tx, oid, dnp);
1799 }
1800
1801 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1802                           struct lu_attr *la, struct osd_thandle *oh)
1803 {
1804         struct osd_device *osd = osd_obj2dev(obj);
1805         dnode_t *dn;
1806         int rc;
1807
1808         /* Index file should be created as regular file in order not to confuse
1809          * ZPL which could interpret them as directory.
1810          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1811          * binary keys */
1812         LASSERT(S_ISREG(la->la_mode));
1813         rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1814                 osd_find_dnsize(osd, obj->oo_ea_in_bonus), ZAP_FLAG_UINT64_KEY);
1815         if (rc)
1816                 return ERR_PTR(rc);
1817         return dn;
1818 }
1819
1820 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1821                           struct lu_attr *la, struct osd_thandle *oh)
1822 {
1823         struct osd_device *osd = osd_obj2dev(obj);
1824         dnode_t *dn;
1825         int rc;
1826
1827         LASSERT(S_ISDIR(la->la_mode));
1828         rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1829                               osd_find_dnsize(osd, obj->oo_ea_in_bonus), 0);
1830         if (rc)
1831                 return ERR_PTR(rc);
1832         return dn;
1833 }
1834
1835 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1836                           struct lu_attr *la, struct osd_thandle *oh)
1837 {
1838         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1839         struct osd_device *osd = osd_obj2dev(obj);
1840         dnode_t *dn;
1841         int rc;
1842
1843         LASSERT(S_ISREG(la->la_mode));
1844         rc = __osd_object_create(env, osd, obj, fid, &dn, oh->ot_tx, la);
1845         if (rc)
1846                 return ERR_PTR(rc);
1847
1848         if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid))) {
1849                 /* The minimum block size must be at least page size otherwise
1850                  * it will break the assumption in tgt_thread_big_cache where
1851                  * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1852                  * RDMA due to subpage transfer size */
1853                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1854                                                PAGE_SIZE, 0, oh->ot_tx);
1855                 if (unlikely(rc)) {
1856                         CERROR("%s: can't change blocksize: %d\n",
1857                                osd->od_svname, rc);
1858                         return ERR_PTR(rc);
1859                 }
1860         } else if ((fid_is_llog(fid))) {
1861                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1862                                                LLOG_MIN_CHUNK_SIZE, 0, oh->ot_tx);
1863                 if (unlikely(rc)) {
1864                         CERROR("%s: can't change blocksize: %d\n",
1865                                osd->od_svname, rc);
1866                         return ERR_PTR(rc);
1867                 }
1868         }
1869
1870         return dn;
1871 }
1872
1873 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1874                           struct lu_attr *la, struct osd_thandle *oh)
1875 {
1876         dnode_t *dn;
1877         int rc;
1878
1879         LASSERT(S_ISLNK(la->la_mode));
1880         rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1881                                  lu_object_fid(&obj->oo_dt.do_lu),
1882                                  &dn, oh->ot_tx, la);
1883         if (rc)
1884                 return ERR_PTR(rc);
1885         return dn;
1886 }
1887
1888 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1889                           struct lu_attr *la, struct osd_thandle *oh)
1890 {
1891         dnode_t *dn;
1892         int rc;
1893
1894         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1895                 la->la_valid |= LA_RDEV;
1896
1897         rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1898                                  lu_object_fid(&obj->oo_dt.do_lu),
1899                                  &dn, oh->ot_tx, la);
1900         if (rc)
1901                 return ERR_PTR(rc);
1902         return dn;
1903 }
1904
1905 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1906                                    struct osd_object *obj,
1907                                    struct lu_attr *la,
1908                                    struct osd_thandle *oh);
1909
1910 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1911 {
1912         osd_obj_type_f result;
1913
1914         switch (type) {
1915         case DFT_DIR:
1916                 result = osd_mkdir;
1917                 break;
1918         case DFT_INDEX:
1919                 result = osd_mkidx;
1920                 break;
1921         case DFT_REGULAR:
1922                 result = osd_mkreg;
1923                 break;
1924         case DFT_SYM:
1925                 result = osd_mksym;
1926                 break;
1927         case DFT_NODE:
1928                 result = osd_mknod;
1929                 break;
1930         default:
1931                 LBUG();
1932                 break;
1933         }
1934         return result;
1935 }
1936
1937 /*
1938  * Concurrency: @dt is write locked.
1939  */
1940 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1941                       struct lu_attr *attr, struct dt_allocation_hint *hint,
1942                       struct dt_object_format *dof, struct thandle *th)
1943 {
1944         struct osd_thread_info  *info = osd_oti_get(env);
1945         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1946         struct zpl_direntry     *zde = &info->oti_zde.lzd_reg;
1947         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1948         struct osd_object       *obj = osd_dt_obj(dt);
1949         struct osd_device       *osd = osd_obj2dev(obj);
1950         char                    *buf = info->oti_str;
1951         struct osd_thandle      *oh;
1952         dnode_t *dn = NULL, *zdn = NULL;
1953         uint64_t                 zapid, parent = 0;
1954         int                      rc;
1955         __u32 compat = 0;
1956
1957         ENTRY;
1958
1959         LASSERT(!fid_is_acct(fid));
1960
1961         /* concurrent create declarations should not see
1962          * the object inconsistent (db, attr, etc).
1963          * in regular cases acquisition should be cheap */
1964         down_write(&obj->oo_guard);
1965
1966         if (unlikely(dt_object_exists(dt)))
1967                 GOTO(out, rc = -EEXIST);
1968
1969         LASSERT(osd_invariant(obj));
1970         LASSERT(dof != NULL);
1971
1972         LASSERT(th != NULL);
1973         oh = container_of(th, struct osd_thandle, ot_super);
1974
1975         LASSERT(obj->oo_dn == NULL);
1976
1977         /* to follow ZFS on-disk format we need
1978          * to initialize parent dnode properly */
1979         if (hint != NULL && hint->dah_parent != NULL &&
1980             !dt_object_remote(hint->dah_parent))
1981                 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1982
1983         /* we may fix some attributes, better do not change the source */
1984         obj->oo_attr = *attr;
1985         obj->oo_attr.la_size = 0;
1986         obj->oo_attr.la_nlink = 0;
1987         obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1988
1989 #ifdef ZFS_PROJINHERIT
1990         if (osd->od_projectused_dn) {
1991                 if (!(obj->oo_attr.la_valid & LA_PROJID))
1992                         obj->oo_attr.la_projid = ZFS_DEFAULT_PROJID;
1993                 obj->oo_with_projid = 1;
1994         }
1995 #endif
1996
1997         dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1998         if (IS_ERR(dn)) {
1999                 rc = PTR_ERR(dn);
2000                 dn = NULL;
2001                 GOTO(out, rc);
2002         }
2003
2004         zde->zde_pad = 0;
2005         zde->zde_dnode = dn->dn_object;
2006         zde->zde_type = S_DT(attr->la_mode & S_IFMT);
2007
2008         zapid = osd_get_name_n_idx(env, osd, fid, buf,
2009                                    sizeof(info->oti_str), &zdn);
2010         if (CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY) ||
2011             (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_NO_ENTRY)))
2012                 goto skip_add;
2013
2014         if (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
2015                 zde->zde_dnode++;
2016
2017         rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
2018         if (rc)
2019                 GOTO(out, rc);
2020
2021 skip_add:
2022         obj->oo_dn = dn;
2023         /* Now add in all of the "SA" attributes */
2024         rc = osd_sa_handle_get(obj);
2025         if (rc)
2026                 GOTO(out, rc);
2027
2028         rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
2029         if (rc)
2030                 GOTO(out, rc);
2031
2032         /* initialize LMA */
2033         if (fid_is_idif(fid) || (fid_is_norm(fid) && osd->od_is_ost))
2034                 compat |= LMAC_FID_ON_OST;
2035         lustre_lma_init(lma, fid, compat, 0);
2036         lustre_lma_swab(lma);
2037         rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
2038                                     (uchar_t *)lma, sizeof(*lma));
2039         if (rc)
2040                 GOTO(out, rc);
2041
2042         /* configure new osd object */
2043         obj->oo_parent = parent != 0 ? parent : zapid;
2044         obj->oo_late_attr_set = 1;
2045         rc = __osd_sa_xattr_schedule_update(env, obj, oh);
2046         if (rc)
2047                 GOTO(out, rc);
2048
2049         /* XXX: oo_lma_flags */
2050         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
2051         if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
2052                 /* no body operations for accounting objects */
2053                 obj->oo_dt.do_body_ops = &osd_body_ops;
2054
2055         osd_idc_find_and_init(env, osd, obj);
2056
2057 out:
2058         if (unlikely(rc && dn)) {
2059                 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
2060                 osd_dnode_rele(dn);
2061                 obj->oo_dn = NULL;
2062         } else if (!rc) {
2063                 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
2064         }
2065         up_write(&obj->oo_guard);
2066         RETURN(rc);
2067 }
2068
2069 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
2070                                struct thandle *th)
2071 {
2072         osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2073         return osd_declare_attr_set(env, dt, NULL, th);
2074 }
2075
2076 /*
2077  * Concurrency: @dt is write locked.
2078  */
2079 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
2080                        struct thandle *handle)
2081 {
2082         struct osd_object       *obj = osd_dt_obj(dt);
2083         struct osd_thandle      *oh;
2084         struct osd_device       *osd = osd_obj2dev(obj);
2085         uint64_t                 nlink;
2086         int rc;
2087
2088         ENTRY;
2089
2090         down_read(&obj->oo_guard);
2091         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2092                 GOTO(out, rc = -ENOENT);
2093
2094         LASSERT(osd_invariant(obj));
2095         LASSERT(obj->oo_sa_hdl != NULL);
2096
2097         oh = container_of(handle, struct osd_thandle, ot_super);
2098
2099         write_lock(&obj->oo_attr_lock);
2100         nlink = ++obj->oo_attr.la_nlink;
2101         write_unlock(&obj->oo_attr_lock);
2102
2103         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2104
2105 out:
2106         up_read(&obj->oo_guard);
2107         RETURN(rc);
2108 }
2109
2110 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
2111                                struct thandle *handle)
2112 {
2113         osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2114         return osd_declare_attr_set(env, dt, NULL, handle);
2115 }
2116
2117 /*
2118  * Concurrency: @dt is write locked.
2119  */
2120 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
2121                        struct thandle *handle)
2122 {
2123         struct osd_object       *obj = osd_dt_obj(dt);
2124         struct osd_thandle      *oh;
2125         struct osd_device       *osd = osd_obj2dev(obj);
2126         uint64_t                 nlink;
2127         int                      rc;
2128
2129         ENTRY;
2130
2131         down_read(&obj->oo_guard);
2132
2133         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2134                 GOTO(out, rc = -ENOENT);
2135
2136         LASSERT(osd_invariant(obj));
2137         LASSERT(obj->oo_sa_hdl != NULL);
2138
2139         oh = container_of(handle, struct osd_thandle, ot_super);
2140         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2141
2142         write_lock(&obj->oo_attr_lock);
2143         nlink = --obj->oo_attr.la_nlink;
2144         write_unlock(&obj->oo_attr_lock);
2145
2146         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2147
2148 out:
2149         up_read(&obj->oo_guard);
2150         RETURN(rc);
2151 }
2152
2153 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
2154                            __u64 start, __u64 end)
2155 {
2156         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2157         uint64_t txg = 0;
2158         ENTRY;
2159
2160         if (osd->od_dt_dev.dd_rdonly)
2161                 RETURN(0);
2162
2163         txg = osd_db_dirty_txg(osd_dt_obj(dt)->oo_dn->dn_dbuf);
2164         if (txg) {
2165                 /* the object is dirty or being synced */
2166                 if (osd_object_sync_delay_us < 0)
2167                         txg_wait_synced(dmu_objset_pool(osd->od_os), txg);
2168                 else
2169                         udelay(osd_object_sync_delay_us);
2170         }
2171
2172         RETURN(0);
2173 }
2174
2175 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
2176 {
2177         return 0;
2178 }
2179
2180 static bool osd_check_stale(struct dt_object *dt)
2181 {
2182         return false;
2183 }
2184
2185 static const struct dt_object_operations osd_obj_ops = {
2186         .do_read_lock           = osd_read_lock,
2187         .do_write_lock          = osd_write_lock,
2188         .do_read_unlock         = osd_read_unlock,
2189         .do_write_unlock        = osd_write_unlock,
2190         .do_write_locked        = osd_write_locked,
2191         .do_attr_get            = osd_attr_get,
2192         .do_declare_attr_set    = osd_declare_attr_set,
2193         .do_attr_set            = osd_attr_set,
2194         .do_ah_init             = osd_ah_init,
2195         .do_declare_create      = osd_declare_create,
2196         .do_create              = osd_create,
2197         .do_declare_destroy     = osd_declare_destroy,
2198         .do_destroy             = osd_destroy,
2199         .do_index_try           = osd_index_try,
2200         .do_declare_ref_add     = osd_declare_ref_add,
2201         .do_ref_add             = osd_ref_add,
2202         .do_declare_ref_del     = osd_declare_ref_del,
2203         .do_ref_del             = osd_ref_del,
2204         .do_xattr_get           = osd_xattr_get,
2205         .do_declare_xattr_set   = osd_declare_xattr_set,
2206         .do_xattr_set           = osd_xattr_set,
2207         .do_declare_xattr_del   = osd_declare_xattr_del,
2208         .do_xattr_del           = osd_xattr_del,
2209         .do_xattr_list          = osd_xattr_list,
2210         .do_object_sync         = osd_object_sync,
2211         .do_invalidate          = osd_invalidate,
2212         .do_check_stale         = osd_check_stale,
2213 };
2214
2215 static const struct lu_object_operations osd_lu_obj_ops = {
2216         .loo_object_init        = osd_object_init,
2217         .loo_object_delete      = osd_object_delete,
2218         .loo_object_release     = osd_object_release,
2219         .loo_object_free        = osd_object_free,
2220         .loo_object_print       = osd_object_print,
2221         .loo_object_invariant   = osd_object_invariant,
2222 };
2223
2224 static int osd_otable_it_attr_get(const struct lu_env *env,
2225                                 struct dt_object *dt,
2226                                 struct lu_attr *attr)
2227 {
2228         attr->la_valid = 0;
2229         return 0;
2230 }
2231
2232 static const struct dt_object_operations osd_obj_otable_it_ops = {
2233         .do_attr_get            = osd_otable_it_attr_get,
2234         .do_index_try           = osd_index_try,
2235 };
2236
2237 module_param(osd_object_sync_delay_us, int, 0644);
2238 MODULE_PARM_DESC(osd_object_sync_delay_us,
2239                  "If zero or larger delay N usec instead of doing object sync");