Whamcloud - gitweb
LU-11393 osd-zfs: time struct changes
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd-zfs/osd_object.c
33  *
34  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35  * Author: Mike Pershin <tappro@whamcloud.com>
36  * Author: Johann Lombardi <johann@whamcloud.com>
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
44 #include <obd.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
48
49 #include "osd_internal.h"
50
51 #include <sys/dnode.h>
52 #include <sys/dbuf.h>
53 #include <sys/spa.h>
54 #include <sys/stat.h>
55 #include <sys/zap.h>
56 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/dmu_tx.h>
59 #include <sys/dmu_objset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/sa_impl.h>
62 #include <sys/txg.h>
63
64 char *osd_obj_tag = "osd_object";
65 static int osd_object_sync_delay_us = -1;
66
67 static struct dt_object_operations osd_obj_ops;
68 static struct lu_object_operations osd_lu_obj_ops;
69 extern struct dt_body_operations osd_body_ops;
70 static struct dt_object_operations osd_obj_otable_it_ops;
71
72 extern struct kmem_cache *osd_object_kmem;
73
74 static void
75 osd_object_sa_fini(struct osd_object *obj)
76 {
77         if (obj->oo_sa_hdl) {
78                 sa_handle_destroy(obj->oo_sa_hdl);
79                 obj->oo_sa_hdl = NULL;
80         }
81 }
82
83 static int
84 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
85 {
86         int rc;
87
88         LASSERT(obj->oo_sa_hdl == NULL);
89         LASSERT(obj->oo_dn != NULL);
90
91         rc = osd_sa_handle_get(obj);
92         if (rc)
93                 return rc;
94
95         /* Cache the xattr object id, valid for the life of the object */
96         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
97         if (rc == -ENOENT) {
98                 obj->oo_xattr = ZFS_NO_OBJECT;
99                 rc = 0;
100         } else if (rc) {
101                 osd_object_sa_fini(obj);
102         }
103
104         return rc;
105 }
106
107 /*
108  * Add object to list of dirty objects in tx handle.
109  */
110 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
111 {
112         if (!list_empty(&obj->oo_sa_linkage))
113                 return;
114
115         write_lock(&obj->oo_attr_lock);
116         if (likely(list_empty(&obj->oo_sa_linkage)))
117                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
118         write_unlock(&obj->oo_attr_lock);
119 }
120
121 /*
122  * Release spill block dbuf hold for all dirty SAs.
123  */
124 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
125 {
126         struct osd_object *obj;
127
128         while (!list_empty(&oh->ot_sa_list)) {
129                 obj = list_entry(oh->ot_sa_list.next,
130                                  struct osd_object, oo_sa_linkage);
131                 write_lock(&obj->oo_attr_lock);
132                 list_del_init(&obj->oo_sa_linkage);
133                 write_unlock(&obj->oo_attr_lock);
134                 if (obj->oo_late_xattr) {
135                         /*
136                          * take oo_guard to protect oo_sa_xattr buffer
137                          * from concurrent update by osd_xattr_set()
138                          */
139                         LASSERT(oh->ot_assigned != 0);
140                         down_write(&obj->oo_guard);
141                         if (obj->oo_late_attr_set)
142                                 __osd_sa_attr_init(env, obj, oh);
143                         else if (obj->oo_late_xattr)
144                                 __osd_sa_xattr_update(env, obj, oh);
145                         up_write(&obj->oo_guard);
146                 }
147                 sa_spill_rele(obj->oo_sa_hdl);
148         }
149 }
150
151 /*
152  * Update the SA and add the object to the dirty list.
153  */
154 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
155                          void *buf, uint32_t buflen, struct osd_thandle *oh)
156 {
157         int rc;
158
159         LASSERT(obj->oo_sa_hdl != NULL);
160         LASSERT(oh->ot_tx != NULL);
161
162         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
163         osd_object_sa_dirty_add(obj, oh);
164
165         return rc;
166 }
167
168 /*
169  * Bulk update the SA and add the object to the dirty list.
170  */
171 static int
172 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
173                           int count, struct osd_thandle *oh)
174 {
175         int rc;
176
177         LASSERT(obj->oo_sa_hdl != NULL);
178         LASSERT(oh->ot_tx != NULL);
179
180         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
181         osd_object_sa_dirty_add(obj, oh);
182
183         return rc;
184 }
185
186 /*
187  * Retrieve the attributes of a DMU object
188  */
189 static int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
190                                  struct osd_object *obj, struct lu_attr *la)
191 {
192         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
193         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
194         int              cnt = 0;
195         int              rc;
196         ENTRY;
197
198         LASSERT(obj->oo_dn != NULL);
199
200         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
201                         LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
202
203         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
204         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
205         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
206         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
207         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
208         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
209         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
210         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
211         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
212         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
213
214         rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
215         if (rc)
216                 GOTO(out_sa, rc);
217
218 #ifdef ZFS_PROJINHERIT
219         if (o->od_projectused_dn && osa->flags & ZFS_PROJID) {
220                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_PROJID(o),
221                                 &osa->projid, 8);
222                 if (rc)
223                         GOTO(out_sa, rc);
224
225                 la->la_projid = osa->projid;
226                 la->la_valid |= LA_PROJID;
227                 obj->oo_with_projid = 1;
228         } else {
229                 la->la_projid = ZFS_DEFAULT_PROJID;
230                 la->la_valid &= ~LA_PROJID;
231         }
232 #else
233         la->la_projid = 0;
234         la->la_valid &= ~LA_PROJID;
235 #endif
236
237         la->la_atime = osa->atime[0];
238         la->la_mtime = osa->mtime[0];
239         la->la_ctime = osa->ctime[0];
240         la->la_mode = osa->mode;
241         la->la_uid = osa->uid;
242         la->la_gid = osa->gid;
243         la->la_nlink = osa->nlink;
244         la->la_flags = attrs_zfs2fs(osa->flags);
245         la->la_size = osa->size;
246
247         /* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
248          * flags is stored in LMA, and it is only for orphan directory */
249         if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
250                 struct osd_thread_info *info = osd_oti_get(env);
251                 struct lustre_mdt_attrs *lma;
252                 struct lu_buf buf;
253
254                 lma = (struct lustre_mdt_attrs *)info->oti_buf;
255                 buf.lb_buf = lma;
256                 buf.lb_len = sizeof(info->oti_buf);
257                 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
258                 if (rc > 0) {
259                         rc = 0;
260                         lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
261                         obj->oo_lma_flags =
262                                 lma_to_lustre_flags(lma->lma_incompat);
263
264                 } else if (rc == -ENODATA) {
265                         rc = 0;
266                 }
267         }
268
269         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
270                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
271                 if (rc)
272                         GOTO(out_sa, rc);
273                 la->la_rdev = osa->rdev;
274                 la->la_valid |= LA_RDEV;
275         }
276 out_sa:
277
278         RETURN(rc);
279 }
280
281 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
282 {
283         dmu_buf_t *db;
284         dmu_buf_impl_t *dbi;
285         int rc;
286
287         rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
288         if (rc)
289                 return rc;
290
291         dbi = (dmu_buf_impl_t *)db;
292         DB_DNODE_ENTER(dbi);
293         *dnp = DB_DNODE(dbi);
294         LASSERT(*dnp != NULL);
295
296         return 0;
297 }
298
299 /*
300  * Concurrency: no concurrent access is possible that early in object
301  * life-cycle.
302  */
303 struct lu_object *osd_object_alloc(const struct lu_env *env,
304                                    const struct lu_object_header *hdr,
305                                    struct lu_device *d)
306 {
307         struct osd_object *mo;
308
309         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
310         if (mo != NULL) {
311                 struct lu_object *l;
312                 struct lu_object_header *h;
313                 struct osd_device *o = osd_dev(d);
314
315                 l = &mo->oo_dt.do_lu;
316                 if (unlikely(o->od_in_init)) {
317                         OBD_ALLOC_PTR(h);
318                         if (!h) {
319                                 OBD_FREE_PTR(mo);
320                                 return NULL;
321                         }
322
323                         lu_object_header_init(h);
324                         lu_object_init(l, h, d);
325                         lu_object_add_top(h, l);
326                         mo->oo_header = h;
327                 } else {
328                         dt_object_init(&mo->oo_dt, NULL, d);
329                         mo->oo_header = NULL;
330                 }
331
332                 mo->oo_dt.do_ops = &osd_obj_ops;
333                 l->lo_ops = &osd_lu_obj_ops;
334                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
335                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
336                 init_rwsem(&mo->oo_sem);
337                 init_rwsem(&mo->oo_guard);
338                 rwlock_init(&mo->oo_attr_lock);
339                 mo->oo_destroy = OSD_DESTROY_NONE;
340                 return l;
341         } else {
342                 return NULL;
343         }
344 }
345
346 static void osd_obj_set_blksize(const struct lu_env *env,
347                                 struct osd_device *osd, struct osd_object *obj)
348 {
349         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
350         dmu_tx_t *tx;
351         dnode_t *dn = obj->oo_dn;
352         uint32_t blksz;
353         int rc = 0;
354         ENTRY;
355
356         LASSERT(!osd_oti_get(env)->oti_in_trans);
357
358         tx = dmu_tx_create(osd->od_os);
359         if (!tx) {
360                 CERROR("%s: fail to create tx to set blksize for "DFID"\n",
361                        osd->od_svname, PFID(fid));
362                 RETURN_EXIT;
363         }
364
365         dmu_tx_hold_bonus(tx, dn->dn_object);
366         rc = -dmu_tx_assign(tx, TXG_WAIT);
367         if (rc) {
368                 dmu_tx_abort(tx);
369                 CERROR("%s: fail to assign tx to set blksize for "DFID
370                        ": rc = %d\n", osd->od_svname, PFID(fid), rc);
371                 RETURN_EXIT;
372         }
373
374         down_write(&obj->oo_guard);
375         if (unlikely((1 << dn->dn_datablkshift) >= PAGE_SIZE))
376                 GOTO(out, rc = 1);
377
378         blksz = dn->dn_datablksz;
379         if (!is_power_of_2(blksz))
380                 blksz = size_roundup_power2(blksz);
381
382         if (blksz > osd->od_max_blksz)
383                 blksz = osd->od_max_blksz;
384         else if (blksz < PAGE_SIZE)
385                 blksz = PAGE_SIZE;
386         rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object, blksz, 0, tx);
387
388         GOTO(out, rc);
389
390 out:
391         up_write(&obj->oo_guard);
392         if (rc) {
393                 dmu_tx_abort(tx);
394                 if (unlikely(obj->oo_dn->dn_maxblkid > 0))
395                         rc = 1;
396                 if (rc < 0)
397                         CERROR("%s: fail to set blksize for "DFID": rc = %d\n",
398                                osd->od_svname, PFID(fid), rc);
399         } else {
400                 dmu_tx_commit(tx);
401                 CDEBUG(D_INODE, "%s: set blksize as %u for "DFID"\n",
402                        osd->od_svname, blksz, PFID(fid));
403         }
404 }
405
406 /*
407  * Concurrency: shouldn't matter.
408  */
409 static int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
410 {
411         struct osd_device       *osd = osd_obj2dev(obj);
412         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
413         int                      rc = 0;
414         ENTRY;
415
416         LASSERT(obj->oo_dn);
417
418         rc = osd_object_sa_init(obj, osd);
419         if (rc)
420                 RETURN(rc);
421
422         /* cache attrs in object */
423         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
424         if (rc)
425                 RETURN(rc);
426
427         if (likely(!fid_is_acct(fid))) {
428                 /* no body operations for accounting objects */
429                 obj->oo_dt.do_body_ops = &osd_body_ops;
430
431                 if (S_ISREG(obj->oo_attr.la_mode) &&
432                     obj->oo_dn->dn_maxblkid == 0 &&
433                     (1 << obj->oo_dn->dn_datablkshift) < PAGE_SIZE &&
434                     (fid_is_idif(fid) || fid_is_norm(fid) ||
435                      fid_is_echo(fid)) &&
436                     osd->od_is_ost && !osd->od_dt_dev.dd_rdonly)
437                         osd_obj_set_blksize(env, osd, obj);
438         }
439
440         /*
441          * initialize object before marking it existing
442          */
443         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
444
445         smp_mb();
446         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
447
448         RETURN(0);
449 }
450
451 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
452 {
453         struct osd_thread_info  *info = osd_oti_get(env);
454         struct lu_buf           buf;
455         int                     rc;
456         struct lustre_mdt_attrs *lma;
457         const struct lu_fid *rfid = lu_object_fid(&obj->oo_dt.do_lu);
458         ENTRY;
459
460         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
461         lma = (struct lustre_mdt_attrs *)info->oti_buf;
462         buf.lb_buf = lma;
463         buf.lb_len = sizeof(info->oti_buf);
464
465         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
466         if (rc > 0) {
467                 rc = 0;
468                 lustre_lma_swab(lma);
469                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
470                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
471                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
472                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
473                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
474                               PFID(rfid));
475                         rc = -EOPNOTSUPP;
476                 } else if (unlikely(!lu_fid_eq(rfid, &lma->lma_self_fid))) {
477                         CERROR("%s: FID-in-LMA "DFID" does not match the "
478                               "object self-fid "DFID"\n",
479                               osd_obj2dev(obj)->od_svname,
480                               PFID(&lma->lma_self_fid), PFID(rfid));
481                         rc = -EREMCHG;
482                 } else {
483                         struct osd_device *osd = osd_obj2dev(obj);
484
485                         if (lma->lma_compat & LMAC_STRIPE_INFO &&
486                             osd->od_is_ost)
487                                 obj->oo_pfid_in_lma = 1;
488                         if (unlikely(lma->lma_incompat & LMAI_REMOTE_PARENT) &&
489                             osd->od_remote_parent_dir != ZFS_NO_OBJECT)
490                                 lu_object_set_agent_entry(&obj->oo_dt.do_lu);
491                 }
492         } else if (rc == -ENODATA) {
493                 /* haven't initialize LMA xattr */
494                 rc = 0;
495         }
496
497         RETURN(rc);
498 }
499
500 /**
501  * Helper function to retrieve DMU object id from fid for accounting object
502  */
503 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
504                                   const struct lu_fid *fid)
505 {
506         dnode_t *dn = NULL;
507
508         LASSERT(fid_is_acct(fid));
509
510         switch (fid_oid(fid)) {
511         case ACCT_USER_OID:
512                 dn = osd->od_userused_dn;
513                 break;
514         case ACCT_GROUP_OID:
515                 dn = osd->od_groupused_dn;
516                 break;
517 #ifdef ZFS_PROJINHERIT
518         case ACCT_PROJECT_OID:
519                 dn = osd->od_projectused_dn;
520                 break;
521 #endif
522         default:
523                 break;
524         }
525
526         return dn;
527 }
528
529 /*
530  * Concurrency: no concurrent access is possible that early in object
531  * life-cycle.
532  */
533 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
534                            const struct lu_object_conf *conf)
535 {
536         struct osd_object *obj = osd_obj(l);
537         struct osd_device *osd = osd_obj2dev(obj);
538         const struct lu_fid *fid = lu_object_fid(l);
539         struct lustre_scrub *scrub = &osd->od_scrub;
540         struct osd_thread_info *info = osd_oti_get(env);
541         struct luz_direntry *zde = &info->oti_zde;
542         struct osd_idmap_cache *idc;
543         char *name = info->oti_str;
544         uint64_t oid;
545         int rc = 0;
546         int rc1;
547         bool remote = false;
548         ENTRY;
549
550         LASSERT(osd_invariant(obj));
551
552         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
553                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
554                 l->lo_header->loh_attr |= LOHA_EXISTS;
555
556                 GOTO(out, rc = 0);
557         }
558
559         if (conf && conf->loc_flags & LOC_F_NEW)
560                 GOTO(out, rc = 0);
561
562         if (unlikely(fid_is_acct(fid))) {
563                 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
564                 if (obj->oo_dn) {
565                         obj->oo_dt.do_index_ops = &osd_acct_index_ops;
566                         l->lo_header->loh_attr |= LOHA_EXISTS;
567                 }
568
569                 GOTO(out, rc = 0);
570         }
571
572         idc = osd_idc_find(env, osd, fid);
573         if (idc && !idc->oic_remote && idc->oic_dnode != ZFS_NO_OBJECT) {
574                 oid = idc->oic_dnode;
575                 goto zget;
576         }
577
578         rc = -ENOENT;
579         if (!list_empty(&osd->od_scrub.os_inconsistent_items))
580                 rc = osd_oii_lookup(osd, fid, &oid);
581
582         if (rc)
583                 rc = osd_fid_lookup(env, osd, fid, &oid);
584
585         if (rc == -ENOENT) {
586                 if (likely(!(fid_is_norm(fid) || fid_is_igif(fid)) ||
587                            fid_is_on_ost(env, osd, fid) ||
588                            !zfs_test_bit(osd_oi_fid2idx(osd, fid),
589                                          scrub->os_file.sf_oi_bitmap)))
590                         GOTO(out, rc = 0);
591
592                 rc = -EREMCHG;
593                 goto trigger;
594         }
595
596         if (rc)
597                 GOTO(out, rc);
598
599 zget:
600         LASSERT(obj->oo_dn == NULL);
601
602         rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
603         /* EEXIST will be returned if object is being deleted in ZFS */
604         if (rc == -EEXIST)
605                 GOTO(out, rc = 0);
606
607         if (rc) {
608                 CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
609                        osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
610                 GOTO(out, rc);
611         }
612
613         rc = osd_object_init0(env, obj);
614         if (rc)
615                 GOTO(out, rc);
616
617         if (unlikely(obj->oo_header))
618                 GOTO(out, rc = 0);
619
620         rc = osd_check_lma(env, obj);
621         if ((!rc && !remote) || (rc != -EREMCHG))
622                 GOTO(out, rc);
623
624 trigger:
625         /* We still have chance to get the valid dnode: for the object that is
626          * referenced by remote name entry, the object on the local MDT will be
627          * linked under the dir /REMOTE_PARENT_DIR with its FID string as name.
628          *
629          * During the OI scrub, if we cannot find the OI mapping, we may still
630          * have change to map the FID to local OID via lookup the dir
631          * /REMOTE_PARENT_DIR. */
632         if (!remote && !fid_is_on_ost(env, osd, fid)) {
633                 osd_fid2str(name, fid, sizeof(info->oti_str));
634                 rc = osd_zap_lookup(osd, osd->od_remote_parent_dir,
635                                     NULL, name, 8, 3, (void *)zde);
636                 if (!rc) {
637                         oid = zde->lzd_reg.zde_dnode;
638                         osd_dnode_rele(obj->oo_dn);
639                         obj->oo_dn = NULL;
640                         remote = true;
641                         goto zget;
642                 }
643         }
644
645         /* The case someone triggered the OI scrub already. */
646         if (thread_is_running(&scrub->os_thread)) {
647                 if (!rc) {
648                         LASSERT(remote);
649
650                         lu_object_set_agent_entry(l);
651                         osd_oii_insert(env, osd, fid, oid, false);
652                 } else {
653                         rc = -EINPROGRESS;
654                 }
655
656                 GOTO(out, rc);
657         }
658
659         /* The case NOT allow to trigger OI scrub automatically. */
660         if (osd->od_auto_scrub_interval == AS_NEVER)
661                 GOTO(out, rc);
662
663         /* It is me to trigger the OI scrub. */
664         rc1 = osd_scrub_start(env, osd, SS_CLEAR_DRYRUN |
665                               SS_CLEAR_FAILOUT | SS_AUTO_FULL);
666         LCONSOLE_WARN("%s: trigger OI scrub by RPC for the "DFID": rc = %d\n",
667                       osd_name(osd), PFID(fid), rc1);
668         if (!rc) {
669                 LASSERT(remote);
670
671                 lu_object_set_agent_entry(l);
672                 if (!rc1)
673                         osd_oii_insert(env, osd, fid, oid, false);
674         } else {
675                 if (!rc1)
676                         rc = -EINPROGRESS;
677                 else
678                         rc = -EREMCHG;
679         }
680
681         GOTO(out, rc);
682
683 out:
684         RETURN(rc);
685 }
686
687 /*
688  * Concurrency: no concurrent access is possible that late in object
689  * life-cycle.
690  */
691 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
692 {
693         struct osd_object *obj = osd_obj(l);
694         struct lu_object_header *h = obj->oo_header;
695
696         LASSERT(osd_invariant(obj));
697
698         dt_object_fini(&obj->oo_dt);
699         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
700         if (unlikely(h)) {
701                 lu_object_header_fini(h);
702                 OBD_FREE_PTR(h);
703         }
704 }
705
706 static int
707 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
708 {
709         int rc = -EBUSY;
710
711         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
712
713         /* the object is supposed to be exclusively locked by
714          * the caller (osd_destroy()), while the transaction
715          * (oh) is per-thread and not shared */
716         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
717                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
718                 rc = 0;
719         }
720
721         return rc;
722 }
723
724 /* Default to max data size covered by a level-1 indirect block */
725 static unsigned long osd_sync_destroy_max_size =
726         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
727 module_param(osd_sync_destroy_max_size, ulong, 0444);
728 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
729
730 static inline void
731 osd_object_set_destroy_type(struct osd_object *obj)
732 {
733         /*
734          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
735          * only once and use it consistently thereafter.
736          */
737         down_write(&obj->oo_guard);
738         if (obj->oo_destroy == OSD_DESTROY_NONE) {
739                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
740                         obj->oo_destroy = OSD_DESTROY_SYNC;
741                 else /* Larger objects are destroyed asynchronously */
742                         obj->oo_destroy = OSD_DESTROY_ASYNC;
743         }
744         up_write(&obj->oo_guard);
745 }
746
747 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
748                                struct thandle *th)
749 {
750         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
751         struct osd_object       *obj = osd_dt_obj(dt);
752         struct osd_device       *osd = osd_obj2dev(obj);
753         struct osd_thandle      *oh;
754         dnode_t *dn;
755         int                      rc;
756         uint64_t                 zapid;
757         ENTRY;
758
759         LASSERT(th != NULL);
760         LASSERT(dt_object_exists(dt));
761
762         oh = container_of0(th, struct osd_thandle, ot_super);
763         LASSERT(oh->ot_tx != NULL);
764
765         dmu_tx_mark_netfree(oh->ot_tx);
766
767         /* declare that we'll remove object from fid-dnode mapping */
768         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
769         osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
770
771         osd_declare_xattrs_destroy(env, obj, oh);
772
773         /* one less inode */
774         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
775                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
776                                -1, oh, NULL, OSD_QID_INODE);
777         if (rc)
778                 RETURN(rc);
779
780         /* data to be truncated */
781         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
782                                obj->oo_attr.la_gid, obj->oo_attr.la_projid,
783                                0, oh, NULL, OSD_QID_BLK);
784         if (rc)
785                 RETURN(rc);
786
787         osd_object_set_destroy_type(obj);
788         if (obj->oo_destroy == OSD_DESTROY_SYNC)
789                 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
790                                  0, DMU_OBJECT_END);
791         else
792                 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
793                                 osd->od_unlinked, TRUE, NULL);
794
795         /* remove agent entry (if have) from remote parent */
796         if (lu_object_has_agent_entry(&obj->oo_dt.do_lu))
797                 osd_tx_hold_zap(oh->ot_tx, osd->od_remote_parent_dir,
798                                 NULL, FALSE, NULL);
799
800         /* will help to find FID->ino when this object is being
801          * added to PENDING/ */
802         osd_idc_find_and_init(env, osd, obj);
803
804         RETURN(0);
805 }
806
807 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
808                        struct thandle *th)
809 {
810         struct osd_thread_info  *info = osd_oti_get(env);
811         char                    *buf = info->oti_str;
812         struct osd_object       *obj = osd_dt_obj(dt);
813         struct osd_device       *osd = osd_obj2dev(obj);
814         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
815         struct osd_thandle      *oh;
816         int                      rc;
817         uint64_t                 oid, zapid;
818         dnode_t *zdn;
819         ENTRY;
820
821         down_write(&obj->oo_guard);
822
823         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
824                 GOTO(out, rc = -ENOENT);
825
826         LASSERT(obj->oo_dn != NULL);
827
828         oh = container_of0(th, struct osd_thandle, ot_super);
829         LASSERT(oh != NULL);
830         LASSERT(oh->ot_tx != NULL);
831
832         /* remove obj ref from index dir (it depends) */
833         zapid = osd_get_name_n_idx(env, osd, fid, buf,
834                                    sizeof(info->oti_str), &zdn);
835         rc = osd_xattrs_destroy(env, obj, oh);
836         if (rc) {
837                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
838                        osd->od_svname, buf, rc);
839                 GOTO(out, rc);
840         }
841
842         if (lu_object_has_agent_entry(&obj->oo_dt.do_lu)) {
843                 rc = osd_delete_from_remote_parent(env, osd, obj, oh, true);
844                 if (rc)
845                         GOTO(out, rc);
846         }
847
848         oid = obj->oo_dn->dn_object;
849         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
850                 /* this may happen if the destroy wasn't declared
851                  * e.g. when the object is created and then destroyed
852                  * in the same transaction - we don't need additional
853                  * space for destroy specifically */
854                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
855                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
856                 if (rc)
857                         CERROR("%s: failed to free %s %llu: rc = %d\n",
858                                osd->od_svname, buf, oid, rc);
859         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
860                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
861                 if (rc)
862                         CERROR("%s: failed to free %s %llu: rc = %d\n",
863                                osd->od_svname, buf, oid, rc);
864         } else { /* asynchronous destroy */
865                 char *key = info->oti_key;
866
867                 rc = osd_object_unlinked_add(obj, oh);
868                 if (rc)
869                         GOTO(out, rc);
870
871                 snprintf(key, sizeof(info->oti_key), "%llx", oid);
872                 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
873                                  osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
874                 if (rc)
875                         CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
876                                osd->od_svname, buf, oid, rc);
877         }
878
879         /* Remove the OI mapping after the destroy to handle the race with
880          * OI scrub that may insert missed OI mapping during the interval. */
881         rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
882         if (unlikely(rc == -ENOENT))
883                 rc = 0;
884         if (rc)
885                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
886                        osd->od_svname, buf, rc);
887
888         GOTO(out, rc);
889
890 out:
891         /* not needed in the cache anymore */
892         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
893         if (rc == 0)
894                 obj->oo_destroyed = 1;
895         up_write(&obj->oo_guard);
896         RETURN (0);
897 }
898
899 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
900 {
901         struct osd_object *obj = osd_obj(l);
902         const struct lu_fid *fid = lu_object_fid(l);
903
904         if (obj->oo_dn) {
905                 if (likely(!fid_is_acct(fid))) {
906                         osd_object_sa_fini(obj);
907                         if (obj->oo_sa_xattr) {
908                                 nvlist_free(obj->oo_sa_xattr);
909                                 obj->oo_sa_xattr = NULL;
910                         }
911                         osd_dnode_rele(obj->oo_dn);
912                         list_del(&obj->oo_sa_linkage);
913                 }
914                 obj->oo_dn = NULL;
915         }
916 }
917
918 /*
919  * Concurrency: ->loo_object_release() is called under site spin-lock.
920  */
921 static void osd_object_release(const struct lu_env *env,
922                                struct lu_object *l)
923 {
924 }
925
926 /*
927  * Concurrency: shouldn't matter.
928  */
929 static int osd_object_print(const struct lu_env *env, void *cookie,
930                             lu_printer_t p, const struct lu_object *l)
931 {
932         struct osd_object *o = osd_obj(l);
933
934         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
935 }
936
937 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
938                           unsigned role)
939 {
940         struct osd_object *obj = osd_dt_obj(dt);
941
942         LASSERT(osd_invariant(obj));
943
944         down_read_nested(&obj->oo_sem, role);
945 }
946
947 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
948                            unsigned role)
949 {
950         struct osd_object *obj = osd_dt_obj(dt);
951
952         LASSERT(osd_invariant(obj));
953
954         down_write_nested(&obj->oo_sem, role);
955 }
956
957 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
958 {
959         struct osd_object *obj = osd_dt_obj(dt);
960
961         LASSERT(osd_invariant(obj));
962         up_read(&obj->oo_sem);
963 }
964
965 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
966 {
967         struct osd_object *obj = osd_dt_obj(dt);
968
969         LASSERT(osd_invariant(obj));
970         up_write(&obj->oo_sem);
971 }
972
973 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
974 {
975         struct osd_object *obj = osd_dt_obj(dt);
976         int rc = 1;
977
978         LASSERT(osd_invariant(obj));
979
980         if (down_write_trylock(&obj->oo_sem)) {
981                 rc = 0;
982                 up_write(&obj->oo_sem);
983         }
984         return rc;
985 }
986
987 static int osd_attr_get(const struct lu_env *env,
988                         struct dt_object *dt,
989                         struct lu_attr *attr)
990 {
991         struct osd_object       *obj = osd_dt_obj(dt);
992         uint64_t                 blocks;
993         uint32_t                 blksize;
994         int                      rc = 0;
995
996         down_read(&obj->oo_guard);
997
998         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
999                 GOTO(out, rc = -ENOENT);
1000
1001         if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
1002                 GOTO(out, rc = 0);
1003
1004         LASSERT(osd_invariant(obj));
1005         LASSERT(obj->oo_dn);
1006
1007         read_lock(&obj->oo_attr_lock);
1008         *attr = obj->oo_attr;
1009         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
1010                 attr->la_flags |= LUSTRE_ORPHAN_FL;
1011         read_unlock(&obj->oo_attr_lock);
1012
1013         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
1014          * from within sa_object_size() can block on a mutex, so
1015          * we can't call sa_object_size() holding rwlock */
1016         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
1017         /* we do not control size of indices, so always calculate
1018          * it from number of blocks reported by DMU */
1019         if (S_ISDIR(attr->la_mode))
1020                 attr->la_size = 512 * blocks;
1021         /* Block size may be not set; suggest maximal I/O transfers. */
1022         if (blksize == 0)
1023                 blksize = osd_spa_maxblocksize(
1024                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
1025
1026         attr->la_blksize = blksize;
1027         attr->la_blocks = blocks;
1028         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
1029
1030 out:
1031         up_read(&obj->oo_guard);
1032         return rc;
1033 }
1034
1035 /* Simple wrapper on top of qsd API which implement quota transfer for osd
1036  * setattr needs. As a reminder, only the root user can change ownership of
1037  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
1038 static inline int qsd_transfer(const struct lu_env *env,
1039                                struct qsd_instance *qsd,
1040                                struct lquota_trans *trans, int qtype,
1041                                __u64 orig_id, __u64 new_id, __u64 bspace,
1042                                struct lquota_id_info *qi, bool ignore_edquot)
1043 {
1044         int     rc;
1045
1046         if (unlikely(qsd == NULL))
1047                 return 0;
1048
1049         LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
1050         qi->lqi_type = qtype;
1051
1052         /* inode accounting */
1053         qi->lqi_is_blk = false;
1054
1055         /* one more inode for the new owner ... */
1056         qi->lqi_id.qid_uid = new_id;
1057         qi->lqi_space      = 1;
1058         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1059         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1060                 rc = 0;
1061         if (rc)
1062                 return rc;
1063
1064         /* and one less inode for the current id */
1065         qi->lqi_id.qid_uid = orig_id;;
1066         qi->lqi_space      = -1;
1067         /* can't get EDQUOT when reducing usage */
1068         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1069         if (rc == -EINPROGRESS)
1070                 rc = 0;
1071         if (rc)
1072                 return rc;
1073
1074         /* block accounting */
1075         qi->lqi_is_blk = true;
1076
1077         /* more blocks for the new owner ... */
1078         qi->lqi_id.qid_uid = new_id;
1079         qi->lqi_space      = bspace;
1080         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1081         if (ignore_edquot && (rc == -EDQUOT || rc == -EINPROGRESS))
1082                 rc = 0;
1083         if (rc)
1084                 return rc;
1085
1086         /* and finally less blocks for the current owner */
1087         qi->lqi_id.qid_uid = orig_id;
1088         qi->lqi_space      = -bspace;
1089         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
1090         /* can't get EDQUOT when reducing usage */
1091         if (rc == -EINPROGRESS)
1092                 rc = 0;
1093         return rc;
1094 }
1095
1096 static int osd_declare_attr_set(const struct lu_env *env,
1097                                 struct dt_object *dt,
1098                                 const struct lu_attr *attr,
1099                                 struct thandle *handle)
1100 {
1101         struct osd_thread_info  *info = osd_oti_get(env);
1102         struct osd_object       *obj = osd_dt_obj(dt);
1103         struct osd_device       *osd = osd_obj2dev(obj);
1104         dmu_tx_hold_t           *txh;
1105         struct osd_thandle      *oh;
1106         uint64_t                 bspace;
1107         uint32_t                 blksize;
1108         int                      rc = 0;
1109         bool                     found;
1110         ENTRY;
1111
1112
1113         LASSERT(handle != NULL);
1114         LASSERT(osd_invariant(obj));
1115
1116         oh = container_of0(handle, struct osd_thandle, ot_super);
1117
1118         down_read(&obj->oo_guard);
1119         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1120                 GOTO(out, rc = 0);
1121
1122         LASSERT(obj->oo_sa_hdl != NULL);
1123         LASSERT(oh->ot_tx != NULL);
1124         /* regular attributes are part of the bonus buffer */
1125         /* let's check whether this object is already part of
1126          * transaction.. */
1127         found = false;
1128         for (txh = list_head(&oh->ot_tx->tx_holds); txh;
1129              txh = list_next(&oh->ot_tx->tx_holds, txh)) {
1130                 if (txh->txh_dnode == NULL)
1131                         continue;
1132                 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
1133                         continue;
1134                 /* this object is part of the transaction already
1135                  * we don't need to declare bonus again */
1136                 found = true;
1137                 break;
1138         }
1139         if (!found)
1140                 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
1141         if (oh->ot_tx->tx_err != 0)
1142                 GOTO(out, rc = -oh->ot_tx->tx_err);
1143
1144         if (attr && attr->la_valid & LA_FLAGS) {
1145                 /* LMA is usually a part of bonus, no need to declare
1146                  * anything else */
1147         }
1148
1149         if (attr && (attr->la_valid & (LA_UID | LA_GID | LA_PROJID))) {
1150                 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
1151                 bspace = toqb(bspace * 512);
1152
1153                 CDEBUG(D_QUOTA, "%s: enforce quota on UID %u, GID %u,"
1154                        "the quota space is %lld (%u)\n", osd->od_svname,
1155                        attr->la_uid, attr->la_gid, bspace, blksize);
1156         }
1157
1158         if (attr && attr->la_valid & LA_UID) {
1159                 /* quota enforcement for user */
1160                 if (attr->la_uid != obj->oo_attr.la_uid) {
1161                         rc = qsd_transfer(env, osd->od_quota_slave,
1162                                           &oh->ot_quota_trans, USRQUOTA,
1163                                           obj->oo_attr.la_uid, attr->la_uid,
1164                                           bspace, &info->oti_qi, true);
1165                         if (rc)
1166                                 GOTO(out, rc);
1167                 }
1168         }
1169         if (attr && attr->la_valid & LA_GID) {
1170                 /* quota enforcement for group */
1171                 if (attr->la_gid != obj->oo_attr.la_gid) {
1172                         rc = qsd_transfer(env, osd->od_quota_slave,
1173                                           &oh->ot_quota_trans, GRPQUOTA,
1174                                           obj->oo_attr.la_gid, attr->la_gid,
1175                                           bspace, &info->oti_qi,
1176                                           !(attr->la_flags &
1177                                                         LUSTRE_SET_SYNC_FL));
1178                         if (rc)
1179                                 GOTO(out, rc);
1180                 }
1181         }
1182 #ifdef ZFS_PROJINHERIT
1183         if (attr && attr->la_valid & LA_PROJID) {
1184                 if (!osd->od_projectused_dn)
1185                         GOTO(out, rc = -EOPNOTSUPP);
1186
1187                 /* Usually, if project quota is upgradable for the device,
1188                  * then the upgrade will be done before or when mount the
1189                  * device. So when we come here, this project should have
1190                  * project ID attribute already (that is zero by default).
1191                  * Otherwise, there was something wrong during the former
1192                  * upgrade, let's return failure to report that.
1193                  *
1194                  * Please note that, different from other attributes, you
1195                  * can NOT simply set the project ID attribute under such
1196                  * case, because adding (NOT change) project ID attribute
1197                  * needs to change the object's attribute layout to match
1198                  * zfs backend quota accounting requirement. */
1199                 if (unlikely(!obj->oo_with_projid))
1200                         GOTO(out, rc = -ENXIO);
1201
1202                 /* quota enforcement for project */
1203                 if (attr->la_projid != obj->oo_attr.la_projid) {
1204                         rc = qsd_transfer(env, osd->od_quota_slave,
1205                                           &oh->ot_quota_trans, PRJQUOTA,
1206                                           obj->oo_attr.la_projid,
1207                                           attr->la_projid, bspace,
1208                                           &info->oti_qi, true);
1209                         if (rc)
1210                                 GOTO(out, rc);
1211                 }
1212         }
1213 #endif
1214 out:
1215         up_read(&obj->oo_guard);
1216         RETURN(rc);
1217 }
1218
1219 /*
1220  * Set the attributes of an object
1221  *
1222  * The transaction passed to this routine must have
1223  * dmu_tx_hold_bonus(tx, oid) called and then assigned
1224  * to a transaction group.
1225  */
1226 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
1227                         const struct lu_attr *la, struct thandle *handle)
1228 {
1229         struct osd_thread_info  *info = osd_oti_get(env);
1230         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
1231         struct osd_object       *obj = osd_dt_obj(dt);
1232         struct osd_device       *osd = osd_obj2dev(obj);
1233         struct osd_thandle      *oh;
1234         struct osa_attr         *osa = &info->oti_osa;
1235         __u64                    valid = la->la_valid;
1236         int                      cnt;
1237         int                      rc = 0;
1238
1239         ENTRY;
1240
1241         down_read(&obj->oo_guard);
1242         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1243                 GOTO(out, rc = -ENOENT);
1244
1245         LASSERT(handle != NULL);
1246         LASSERT(osd_invariant(obj));
1247         LASSERT(obj->oo_sa_hdl);
1248
1249         oh = container_of0(handle, struct osd_thandle, ot_super);
1250         /* Assert that the transaction has been assigned to a
1251            transaction group. */
1252         LASSERT(oh->ot_tx->tx_txg != 0);
1253
1254         if (OBD_FAIL_CHECK(OBD_FAIL_OSD_FID_MAPPING) && !osd->od_is_ost) {
1255                 struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
1256                 char *buf = info->oti_str;
1257                 dnode_t *zdn = NULL;
1258                 uint64_t zapid;
1259
1260                 zapid = osd_get_name_n_idx(env, osd, lu_object_fid(&dt->do_lu),
1261                                            buf, sizeof(info->oti_str), &zdn);
1262                 rc = osd_zap_lookup(osd, zapid, zdn, buf, 8,
1263                                     sizeof(*zde) / 8, zde);
1264                 if (!rc) {
1265                         zde->zde_dnode -= 1;
1266                         rc = -zap_update(osd->od_os, zapid, buf, 8,
1267                                          sizeof(*zde) / 8, zde, oh->ot_tx);
1268                 }
1269                 if (rc > 0)
1270                         rc = 0;
1271                 GOTO(out, rc);
1272         }
1273
1274         /* Only allow set size for regular file */
1275         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
1276                 valid &= ~(LA_SIZE | LA_BLOCKS);
1277
1278         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
1279                 valid &= ~LA_CTIME;
1280
1281         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
1282                 valid &= ~LA_MTIME;
1283
1284         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
1285                 valid &= ~LA_ATIME;
1286
1287         if (valid == 0)
1288                 GOTO(out, rc = 0);
1289
1290         if (valid & LA_FLAGS) {
1291                 struct lustre_mdt_attrs *lma;
1292                 struct lu_buf buf;
1293                 int size = 0;
1294
1295                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
1296                         LASSERT(!obj->oo_pfid_in_lma);
1297                         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
1298                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
1299                         buf.lb_buf = lma;
1300                         buf.lb_len = sizeof(info->oti_buf);
1301
1302                         /* Please do NOT call osd_xattr_get() directly, that
1303                          * will cause recursive down_read() on oo_guard. */
1304                         rc = osd_xattr_get_internal(env, obj, &buf,
1305                                                     XATTR_NAME_LMA, &size);
1306                         if (!rc && unlikely(size < sizeof(*lma))) {
1307                                 rc = -EINVAL;
1308                         } else if (!rc) {
1309                                 lma->lma_incompat =
1310                                         le32_to_cpu(lma->lma_incompat);
1311                                 lma->lma_incompat |=
1312                                         lustre_to_lma_flags(la->la_flags);
1313                                 lma->lma_incompat =
1314                                         cpu_to_le32(lma->lma_incompat);
1315                                 buf.lb_buf = lma;
1316                                 buf.lb_len = sizeof(*lma);
1317                                 rc = osd_xattr_set_internal(env, obj, &buf,
1318                                                             XATTR_NAME_LMA,
1319                                                             LU_XATTR_REPLACE,
1320                                                             oh);
1321                         }
1322                         if (rc < 0) {
1323                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
1324                                        osd->od_svname, rc);
1325                                 GOTO(out, rc);
1326                         }
1327                 }
1328         }
1329
1330         write_lock(&obj->oo_attr_lock);
1331         cnt = 0;
1332
1333         if (valid & LA_PROJID) {
1334 #ifdef ZFS_PROJINHERIT
1335                 /* osd_declare_attr_set() must be called firstly.
1336                  * If osd::od_projectused_dn is not set, then we
1337                  * can not arrive at here. */
1338                 LASSERT(osd->od_projectused_dn);
1339                 LASSERT(obj->oo_with_projid);
1340
1341                 osa->projid = obj->oo_attr.la_projid = la->la_projid;
1342                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1343                                  &osa->projid, 8);
1344 #else
1345                 valid &= ~LA_PROJID;
1346 #endif
1347         }
1348
1349         if (valid & LA_ATIME) {
1350                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1351                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1352                                  osa->atime, 16);
1353         }
1354         if (valid & LA_MTIME) {
1355                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1356                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1357                                  osa->mtime, 16);
1358         }
1359         if (valid & LA_CTIME) {
1360                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1361                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1362                                  osa->ctime, 16);
1363         }
1364         if (valid & LA_MODE) {
1365                 /* mode is stored along with type, so read it first */
1366                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1367                         (la->la_mode & ~S_IFMT);
1368                 osa->mode = obj->oo_attr.la_mode;
1369                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1370                                  &osa->mode, 8);
1371         }
1372         if (valid & LA_SIZE) {
1373                 osa->size = obj->oo_attr.la_size = la->la_size;
1374                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1375                                  &osa->size, 8);
1376         }
1377         if (valid & LA_NLINK) {
1378                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1379                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1380                                  &osa->nlink, 8);
1381         }
1382         if (valid & LA_RDEV) {
1383                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1384                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1385                                  &osa->rdev, 8);
1386         }
1387         if (valid & LA_FLAGS) {
1388                 osa->flags = attrs_fs2zfs(la->la_flags);
1389                 /* many flags are not supported by zfs, so ensure a good cached
1390                  * copy */
1391                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1392 #ifdef ZFS_PROJINHERIT
1393                 if (obj->oo_with_projid)
1394                         osa->flags |= ZFS_PROJID;
1395 #endif
1396                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1397                                  &osa->flags, 8);
1398         }
1399         if (valid & LA_UID) {
1400                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1401                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1402                                  &osa->uid, 8);
1403         }
1404         if (valid & LA_GID) {
1405                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1406                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1407                                  &osa->gid, 8);
1408         }
1409         obj->oo_attr.la_valid |= valid;
1410         write_unlock(&obj->oo_attr_lock);
1411
1412         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1413         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1414
1415 out:
1416         up_read(&obj->oo_guard);
1417         RETURN(rc);
1418 }
1419
1420 /*
1421  * Object creation.
1422  *
1423  * XXX temporary solution.
1424  */
1425
1426 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1427                         struct dt_object *parent, struct dt_object *child,
1428                         umode_t child_mode)
1429 {
1430         LASSERT(ah);
1431
1432         ah->dah_parent = parent;
1433         ah->dah_mode = child_mode;
1434
1435         if (parent != NULL && !dt_object_remote(parent)) {
1436                 /* will help to find FID->ino at dt_insert("..") */
1437                 struct osd_object *pobj = osd_dt_obj(parent);
1438
1439                 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1440         }
1441 }
1442
1443 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1444                               struct lu_attr *attr,
1445                               struct dt_allocation_hint *hint,
1446                               struct dt_object_format *dof,
1447                               struct thandle *handle)
1448 {
1449         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1450         struct osd_object       *obj = osd_dt_obj(dt);
1451         struct osd_device       *osd = osd_obj2dev(obj);
1452         struct osd_thandle      *oh;
1453         uint64_t                 zapid;
1454         dnode_t                 *dn;
1455         int                      rc, dnode_size;
1456         ENTRY;
1457
1458         LASSERT(dof);
1459
1460         switch (dof->dof_type) {
1461                 case DFT_REGULAR:
1462                 case DFT_SYM:
1463                 case DFT_NODE:
1464                         if (obj->oo_dt.do_body_ops == NULL)
1465                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1466                         break;
1467                 default:
1468                         break;
1469         }
1470
1471         LASSERT(handle != NULL);
1472         oh = container_of0(handle, struct osd_thandle, ot_super);
1473         LASSERT(oh->ot_tx != NULL);
1474
1475         /* this is the minimum set of EAs on every Lustre object */
1476         obj->oo_ea_in_bonus = OSD_BASE_EA_IN_BONUS;
1477         /* reserve 32 bytes for extra stuff like ACLs */
1478         dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1479
1480         switch (dof->dof_type) {
1481                 case DFT_DIR:
1482                         dt->do_index_ops = &osd_dir_ops;
1483                 case DFT_INDEX:
1484                         /* for zap create */
1485                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1486                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1487                         break;
1488                 case DFT_REGULAR:
1489                 case DFT_SYM:
1490                 case DFT_NODE:
1491                         /* first, we'll create new object */
1492                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1493                         break;
1494
1495                 default:
1496                         LBUG();
1497                         break;
1498         }
1499
1500         /* and we'll add it to some mapping */
1501         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1502         osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1503
1504         /* will help to find FID->ino mapping at dt_insert() */
1505         osd_idc_find_and_init(env, osd, obj);
1506
1507         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid,
1508                                attr->la_projid, 1, oh, NULL, OSD_QID_INODE);
1509
1510         RETURN(rc);
1511 }
1512
1513 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1514                     struct osd_object *obj, sa_handle_t *sa_hdl, dmu_tx_t *tx,
1515                     struct lu_attr *la, uint64_t parent,
1516                     nvlist_t *xattr)
1517 {
1518         sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk;
1519         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1520         uint64_t gen;
1521         uint64_t crtime[2];
1522         inode_timespec_t now;
1523         int cnt;
1524         int rc;
1525         char *dxattr = NULL;
1526         size_t sa_size;
1527
1528
1529         LASSERT(sa_hdl);
1530
1531         gen = dmu_tx_get_txg(tx);
1532         gethrestime(&now);
1533         ZFS_TIME_ENCODE(&now, crtime);
1534
1535         osa->atime[0] = la->la_atime;
1536         osa->ctime[0] = la->la_ctime;
1537         osa->mtime[0] = la->la_mtime;
1538         osa->mode = la->la_mode;
1539         osa->uid = la->la_uid;
1540         osa->gid = la->la_gid;
1541         osa->rdev = la->la_rdev;
1542         osa->nlink = la->la_nlink;
1543         if (la->la_valid & LA_FLAGS)
1544                 osa->flags = attrs_fs2zfs(la->la_flags);
1545         else
1546                 osa->flags = 0;
1547         osa->size  = la->la_size;
1548 #ifdef ZFS_PROJINHERIT
1549         if (osd->od_projectused_dn) {
1550                 if (la->la_valid & LA_PROJID)
1551                         osa->projid = la->la_projid;
1552                 else
1553                         osa->projid = ZFS_DEFAULT_PROJID;
1554                 osa->flags |= ZFS_PROJID;
1555                 if (obj)
1556                         obj->oo_with_projid = 1;
1557         } else {
1558                 osa->flags &= ~ZFS_PROJID;
1559         }
1560 #endif
1561
1562         /*
1563          * we need to create all SA below upon object create.
1564          *
1565          * XXX The attribute order matters since the accounting callback relies
1566          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1567          * look up the UID/GID/PROJID attributes. Moreover, the callback does
1568          * not seem to support the spill block.
1569          * We define attributes in the same order as SA_*_OFFSET in order to
1570          * work around the problem. See ORI-610.
1571          */
1572         cnt = 0;
1573         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1574         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1575         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1576         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1577         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1578         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1579         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1580         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1581         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1582         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1583         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
1584         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1585 #ifdef ZFS_PROJINHERIT
1586         if (osd->od_projectused_dn)
1587                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PROJID(osd), NULL,
1588                                  &osa->projid, 8);
1589 #endif
1590         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1591         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1592
1593         if (xattr) {
1594                 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1595                 LASSERT(rc == 0);
1596
1597                 dxattr = osd_zio_buf_alloc(sa_size);
1598                 LASSERT(dxattr);
1599
1600                 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1601                                 NV_ENCODE_XDR, KM_SLEEP);
1602                 LASSERT(rc == 0);
1603
1604                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1605                                 NULL, dxattr, sa_size);
1606         }
1607
1608         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1609         if (dxattr)
1610                 osd_zio_buf_free(dxattr, sa_size);
1611
1612         return rc;
1613 }
1614
1615 int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1616                        uint64_t oid, dnode_t **dnp)
1617 {
1618         dmu_tx_hold_t *txh;
1619         int rc = 0;
1620
1621         /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1622         for (txh = list_tail(&tx->tx_holds); txh;
1623              txh = list_prev(&tx->tx_holds, txh)) {
1624                 dnode_t *dn = txh->txh_dnode;
1625                 dmu_buf_impl_t *db;
1626
1627                 if (dn == NULL)
1628                         continue;
1629                 if (dn->dn_object != oid)
1630                         continue;
1631                 db = dn->dn_bonus;
1632                 if (db == NULL) {
1633                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1634                         if (dn->dn_bonus == NULL)
1635                                 dbuf_create_bonus(dn);
1636                         rw_exit(&dn->dn_struct_rwlock);
1637                 }
1638                 db = dn->dn_bonus;
1639                 LASSERT(db);
1640                 LASSERT(dn->dn_handle);
1641                 DB_DNODE_ENTER(db);
1642                 if (refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1643                         refcount_add(&dn->dn_holds, osd_obj_tag);
1644                         atomic_inc_32(&dn->dn_dbufs_count);
1645                 }
1646                 *dnp = dn;
1647                 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1648                 break;
1649         }
1650
1651         if (unlikely(*dnp == NULL))
1652                 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1653
1654         return rc;
1655 }
1656
1657 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1658 int osd_find_dnsize(struct osd_device *osd, int ea_in_bonus)
1659 {
1660         int dnsize;
1661
1662         if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1663                 dnsize = DNODE_MIN_SIZE;
1664                 do {
1665                         if (DN_BONUS_SIZE(dnsize) >= ea_in_bonus + 32)
1666                                 break;
1667                         dnsize <<= 1;
1668                 } while (dnsize < DNODE_MAX_SIZE);
1669                 if (dnsize > DNODE_MAX_SIZE)
1670                         dnsize = DNODE_MAX_SIZE;
1671         } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1672                 dnsize = 1024;
1673         } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1674                 dnsize = 2048;
1675         } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1676                 dnsize = 4096;
1677         } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1678                 dnsize = 8192;
1679         } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1680                 dnsize = 16384;
1681         } else {
1682                 dnsize = DNODE_MIN_SIZE;
1683         }
1684         return dnsize;
1685 }
1686 #endif
1687
1688 /*
1689  * The transaction passed to this routine must have
1690  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1691  * to a transaction group.
1692  */
1693 int __osd_object_create(const struct lu_env *env, struct osd_device *osd,
1694                         struct osd_object *obj, const struct lu_fid *fid,
1695                         dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1696 {
1697         dmu_object_type_t type = DMU_OT_PLAIN_FILE_CONTENTS;
1698         uint64_t oid;
1699         int size;
1700
1701         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1702          * would get an additional ditto copy */
1703         if (unlikely(S_ISREG(la->la_mode) &&
1704                      fid_seq_is_local_file(fid_seq(fid))))
1705                 type = DMU_OTN_UINT8_METADATA;
1706
1707         /* Create a new DMU object using the default dnode size. */
1708         if (obj)
1709                 size = obj->oo_ea_in_bonus;
1710         else
1711                 size = OSD_BASE_EA_IN_BONUS;
1712         oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1713                                    osd_find_dnsize(osd, size), tx);
1714
1715         LASSERT(la->la_valid & LA_MODE);
1716         la->la_size = 0;
1717         la->la_nlink = 1;
1718
1719         return osd_find_new_dnode(env, tx, oid, dnp);
1720 }
1721
1722 /*
1723  * The transaction passed to this routine must have
1724  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1725  * to a transaction group.
1726  *
1727  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1728  * This is fine for directories today, because storing the FID in the dirent
1729  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1730  * then we might need to re-evaluate the use of this flag and instead do
1731  * a conversion from the different internal ZAP hash formats being used. */
1732 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1733                      dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1734                      unsigned dnsize, zap_flags_t flags)
1735 {
1736         uint64_t oid;
1737
1738         /* Assert that the transaction has been assigned to a
1739            transaction group. */
1740         LASSERT(tx->tx_txg != 0);
1741         *dnp = NULL;
1742
1743         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1744                                    DMU_OT_DIRECTORY_CONTENTS,
1745                                    14, /* == ZFS fzap_default_blockshift */
1746                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1747                                    dnsize, tx);
1748
1749         la->la_size = 2;
1750         la->la_nlink = 1;
1751
1752         return osd_find_new_dnode(env, tx, oid, dnp);
1753 }
1754
1755 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1756                           struct lu_attr *la, struct osd_thandle *oh)
1757 {
1758         struct osd_device *osd = osd_obj2dev(obj);
1759         dnode_t *dn;
1760         int rc;
1761
1762         /* Index file should be created as regular file in order not to confuse
1763          * ZPL which could interpret them as directory.
1764          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1765          * binary keys */
1766         LASSERT(S_ISREG(la->la_mode));
1767         rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1768                 osd_find_dnsize(osd, obj->oo_ea_in_bonus), ZAP_FLAG_UINT64_KEY);
1769         if (rc)
1770                 return ERR_PTR(rc);
1771         return dn;
1772 }
1773
1774 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1775                           struct lu_attr *la, struct osd_thandle *oh)
1776 {
1777         struct osd_device *osd = osd_obj2dev(obj);
1778         dnode_t *dn;
1779         int rc;
1780
1781         LASSERT(S_ISDIR(la->la_mode));
1782         rc = __osd_zap_create(env, osd, &dn, oh->ot_tx, la,
1783                               osd_find_dnsize(osd, obj->oo_ea_in_bonus), 0);
1784         if (rc)
1785                 return ERR_PTR(rc);
1786         return dn;
1787 }
1788
1789 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1790                           struct lu_attr *la, struct osd_thandle *oh)
1791 {
1792         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1793         struct osd_device *osd = osd_obj2dev(obj);
1794         dnode_t *dn;
1795         int rc;
1796
1797         LASSERT(S_ISREG(la->la_mode));
1798         rc = __osd_object_create(env, osd, obj, fid, &dn, oh->ot_tx, la);
1799         if (rc)
1800                 return ERR_PTR(rc);
1801
1802         if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid))) {
1803                 /* The minimum block size must be at least page size otherwise
1804                  * it will break the assumption in tgt_thread_big_cache where
1805                  * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1806                  * RDMA due to subpage transfer size */
1807                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1808                                                PAGE_SIZE, 0, oh->ot_tx);
1809                 if (unlikely(rc)) {
1810                         CERROR("%s: can't change blocksize: %d\n",
1811                                osd->od_svname, rc);
1812                         return ERR_PTR(rc);
1813                 }
1814         }
1815
1816         return dn;
1817 }
1818
1819 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1820                           struct lu_attr *la, struct osd_thandle *oh)
1821 {
1822         dnode_t *dn;
1823         int rc;
1824
1825         LASSERT(S_ISLNK(la->la_mode));
1826         rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1827                                  lu_object_fid(&obj->oo_dt.do_lu),
1828                                  &dn, oh->ot_tx, la);
1829         if (rc)
1830                 return ERR_PTR(rc);
1831         return dn;
1832 }
1833
1834 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1835                           struct lu_attr *la, struct osd_thandle *oh)
1836 {
1837         dnode_t *dn;
1838         int rc;
1839
1840         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1841                 la->la_valid |= LA_RDEV;
1842
1843         rc = __osd_object_create(env, osd_obj2dev(obj), obj,
1844                                  lu_object_fid(&obj->oo_dt.do_lu),
1845                                  &dn, oh->ot_tx, la);
1846         if (rc)
1847                 return ERR_PTR(rc);
1848         return dn;
1849 }
1850
1851 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1852                                    struct osd_object *obj,
1853                                    struct lu_attr *la,
1854                                    struct osd_thandle *oh);
1855
1856 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1857 {
1858         osd_obj_type_f result;
1859
1860         switch (type) {
1861         case DFT_DIR:
1862                 result = osd_mkdir;
1863                 break;
1864         case DFT_INDEX:
1865                 result = osd_mkidx;
1866                 break;
1867         case DFT_REGULAR:
1868                 result = osd_mkreg;
1869                 break;
1870         case DFT_SYM:
1871                 result = osd_mksym;
1872                 break;
1873         case DFT_NODE:
1874                 result = osd_mknod;
1875                 break;
1876         default:
1877                 LBUG();
1878                 break;
1879         }
1880         return result;
1881 }
1882
1883 /*
1884  * Concurrency: @dt is write locked.
1885  */
1886 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1887                       struct lu_attr *attr, struct dt_allocation_hint *hint,
1888                       struct dt_object_format *dof, struct thandle *th)
1889 {
1890         struct osd_thread_info  *info = osd_oti_get(env);
1891         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1892         struct zpl_direntry     *zde = &info->oti_zde.lzd_reg;
1893         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1894         struct osd_object       *obj = osd_dt_obj(dt);
1895         struct osd_device       *osd = osd_obj2dev(obj);
1896         char                    *buf = info->oti_str;
1897         struct osd_thandle      *oh;
1898         dnode_t *dn = NULL, *zdn = NULL;
1899         uint64_t                 zapid, parent = 0;
1900         int                      rc;
1901         __u32 compat = 0;
1902
1903         ENTRY;
1904
1905         LASSERT(!fid_is_acct(fid));
1906
1907         /* concurrent create declarations should not see
1908          * the object inconsistent (db, attr, etc).
1909          * in regular cases acquisition should be cheap */
1910         down_write(&obj->oo_guard);
1911
1912         if (unlikely(dt_object_exists(dt)))
1913                 GOTO(out, rc = -EEXIST);
1914
1915         LASSERT(osd_invariant(obj));
1916         LASSERT(dof != NULL);
1917
1918         LASSERT(th != NULL);
1919         oh = container_of0(th, struct osd_thandle, ot_super);
1920
1921         LASSERT(obj->oo_dn == NULL);
1922
1923         /* to follow ZFS on-disk format we need
1924          * to initialize parent dnode properly */
1925         if (hint != NULL && hint->dah_parent != NULL &&
1926             !dt_object_remote(hint->dah_parent))
1927                 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1928
1929         /* we may fix some attributes, better do not change the source */
1930         obj->oo_attr = *attr;
1931         obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1932
1933 #ifdef ZFS_PROJINHERIT
1934         if (osd->od_projectused_dn) {
1935                 if (!(obj->oo_attr.la_valid & LA_PROJID))
1936                         obj->oo_attr.la_projid = ZFS_DEFAULT_PROJID;
1937                 obj->oo_with_projid = 1;
1938         }
1939 #endif
1940
1941         dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1942         if (IS_ERR(dn)) {
1943                 rc = PTR_ERR(dn);
1944                 dn = NULL;
1945                 GOTO(out, rc);
1946         }
1947
1948         zde->zde_pad = 0;
1949         zde->zde_dnode = dn->dn_object;
1950         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1951
1952         zapid = osd_get_name_n_idx(env, osd, fid, buf,
1953                                    sizeof(info->oti_str), &zdn);
1954         if (CFS_FAIL_CHECK(OBD_FAIL_OSD_NO_OI_ENTRY) ||
1955             (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_NO_ENTRY)))
1956                 goto skip_add;
1957
1958         if (osd->od_is_ost && OBD_FAIL_CHECK(OBD_FAIL_OSD_COMPAT_INVALID_ENTRY))
1959                 zde->zde_dnode++;
1960
1961         rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
1962         if (rc)
1963                 GOTO(out, rc);
1964
1965 skip_add:
1966         obj->oo_dn = dn;
1967         /* Now add in all of the "SA" attributes */
1968         rc = osd_sa_handle_get(obj);
1969         if (rc)
1970                 GOTO(out, rc);
1971
1972         rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
1973         if (rc)
1974                 GOTO(out, rc);
1975
1976         /* initialize LMA */
1977         if (fid_is_idif(fid) || (fid_is_norm(fid) && osd->od_is_ost))
1978                 compat |= LMAC_FID_ON_OST;
1979         lustre_lma_init(lma, fid, compat, 0);
1980         lustre_lma_swab(lma);
1981         rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
1982                                     (uchar_t *)lma, sizeof(*lma));
1983         if (rc)
1984                 GOTO(out, rc);
1985
1986         /* configure new osd object */
1987         obj->oo_parent = parent != 0 ? parent : zapid;
1988         obj->oo_late_attr_set = 1;
1989         rc = __osd_sa_xattr_schedule_update(env, obj, oh);
1990         if (rc)
1991                 GOTO(out, rc);
1992
1993         /* XXX: oo_lma_flags */
1994         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
1995         if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
1996                 /* no body operations for accounting objects */
1997                 obj->oo_dt.do_body_ops = &osd_body_ops;
1998
1999         osd_idc_find_and_init(env, osd, obj);
2000
2001 out:
2002         if (unlikely(rc && dn)) {
2003                 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
2004                 osd_dnode_rele(dn);
2005                 obj->oo_dn = NULL;
2006         } else if (!rc) {
2007                 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
2008         }
2009         up_write(&obj->oo_guard);
2010         RETURN(rc);
2011 }
2012
2013 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
2014                                struct thandle *th)
2015 {
2016         osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2017         return osd_declare_attr_set(env, dt, NULL, th);
2018 }
2019
2020 /*
2021  * Concurrency: @dt is write locked.
2022  */
2023 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
2024                        struct thandle *handle)
2025 {
2026         struct osd_object       *obj = osd_dt_obj(dt);
2027         struct osd_thandle      *oh;
2028         struct osd_device       *osd = osd_obj2dev(obj);
2029         uint64_t                 nlink;
2030         int rc;
2031
2032         ENTRY;
2033
2034         down_read(&obj->oo_guard);
2035         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2036                 GOTO(out, rc = -ENOENT);
2037
2038         LASSERT(osd_invariant(obj));
2039         LASSERT(obj->oo_sa_hdl != NULL);
2040
2041         oh = container_of0(handle, struct osd_thandle, ot_super);
2042
2043         write_lock(&obj->oo_attr_lock);
2044         nlink = ++obj->oo_attr.la_nlink;
2045         write_unlock(&obj->oo_attr_lock);
2046
2047         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2048
2049 out:
2050         up_read(&obj->oo_guard);
2051         RETURN(rc);
2052 }
2053
2054 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
2055                                struct thandle *handle)
2056 {
2057         osd_idc_find_and_init(env, osd_dev(dt->do_lu.lo_dev), osd_dt_obj(dt));
2058         return osd_declare_attr_set(env, dt, NULL, handle);
2059 }
2060
2061 /*
2062  * Concurrency: @dt is write locked.
2063  */
2064 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
2065                        struct thandle *handle)
2066 {
2067         struct osd_object       *obj = osd_dt_obj(dt);
2068         struct osd_thandle      *oh;
2069         struct osd_device       *osd = osd_obj2dev(obj);
2070         uint64_t                 nlink;
2071         int                      rc;
2072
2073         ENTRY;
2074
2075         down_read(&obj->oo_guard);
2076
2077         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
2078                 GOTO(out, rc = -ENOENT);
2079
2080         LASSERT(osd_invariant(obj));
2081         LASSERT(obj->oo_sa_hdl != NULL);
2082
2083         oh = container_of0(handle, struct osd_thandle, ot_super);
2084         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2085
2086         write_lock(&obj->oo_attr_lock);
2087         nlink = --obj->oo_attr.la_nlink;
2088         write_unlock(&obj->oo_attr_lock);
2089
2090         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
2091
2092 out:
2093         up_read(&obj->oo_guard);
2094         RETURN(rc);
2095 }
2096
2097 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
2098                            __u64 start, __u64 end)
2099 {
2100         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
2101         ENTRY;
2102
2103         /* XXX: no other option than syncing the whole filesystem until we
2104          * support ZIL.  If the object tracked the txg that it was last
2105          * modified in, it could pass that txg here instead of "0".  Maybe
2106          * the changes are already committed, so no wait is needed at all? */
2107         if (!osd->od_dt_dev.dd_rdonly) {
2108                 if (osd_object_sync_delay_us < 0)
2109                         txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
2110                 else
2111                         udelay(osd_object_sync_delay_us);
2112         }
2113
2114         RETURN(0);
2115 }
2116
2117 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
2118 {
2119         return 0;
2120 }
2121
2122 static struct dt_object_operations osd_obj_ops = {
2123         .do_read_lock           = osd_read_lock,
2124         .do_write_lock          = osd_write_lock,
2125         .do_read_unlock         = osd_read_unlock,
2126         .do_write_unlock        = osd_write_unlock,
2127         .do_write_locked        = osd_write_locked,
2128         .do_attr_get            = osd_attr_get,
2129         .do_declare_attr_set    = osd_declare_attr_set,
2130         .do_attr_set            = osd_attr_set,
2131         .do_ah_init             = osd_ah_init,
2132         .do_declare_create      = osd_declare_create,
2133         .do_create              = osd_create,
2134         .do_declare_destroy     = osd_declare_destroy,
2135         .do_destroy             = osd_destroy,
2136         .do_index_try           = osd_index_try,
2137         .do_declare_ref_add     = osd_declare_ref_add,
2138         .do_ref_add             = osd_ref_add,
2139         .do_declare_ref_del     = osd_declare_ref_del,
2140         .do_ref_del             = osd_ref_del,
2141         .do_xattr_get           = osd_xattr_get,
2142         .do_declare_xattr_set   = osd_declare_xattr_set,
2143         .do_xattr_set           = osd_xattr_set,
2144         .do_declare_xattr_del   = osd_declare_xattr_del,
2145         .do_xattr_del           = osd_xattr_del,
2146         .do_xattr_list          = osd_xattr_list,
2147         .do_object_sync         = osd_object_sync,
2148         .do_invalidate          = osd_invalidate,
2149 };
2150
2151 static struct lu_object_operations osd_lu_obj_ops = {
2152         .loo_object_init        = osd_object_init,
2153         .loo_object_delete      = osd_object_delete,
2154         .loo_object_release     = osd_object_release,
2155         .loo_object_free        = osd_object_free,
2156         .loo_object_print       = osd_object_print,
2157         .loo_object_invariant   = osd_object_invariant,
2158 };
2159
2160 static int osd_otable_it_attr_get(const struct lu_env *env,
2161                                 struct dt_object *dt,
2162                                 struct lu_attr *attr)
2163 {
2164         attr->la_valid = 0;
2165         return 0;
2166 }
2167
2168 static struct dt_object_operations osd_obj_otable_it_ops = {
2169         .do_attr_get            = osd_otable_it_attr_get,
2170         .do_index_try           = osd_index_try,
2171 };
2172
2173 module_param(osd_object_sync_delay_us, int, 0644);
2174 MODULE_PARM_DESC(osd_object_sync_delay_us,
2175                  "If zero or larger delay N usec instead of doing object sync");