Whamcloud - gitweb
6eb175e2a991b960284b7d7d7bee83262f2ed47d
[fs/lustre-release.git] / lustre / osd-zfs / osd_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osd-zfs/osd_object.c
33  *
34  * Author: Alex Zhuravlev <bzzz@whamcloud.com>
35  * Author: Mike Pershin <tappro@whamcloud.com>
36  * Author: Johann Lombardi <johann@whamcloud.com>
37  */
38
39 #define DEBUG_SUBSYSTEM S_OSD
40
41 #include <libcfs/libcfs.h>
42 #include <obd_support.h>
43 #include <lustre_net.h>
44 #include <obd.h>
45 #include <obd_class.h>
46 #include <lustre_disk.h>
47 #include <lustre_fid.h>
48
49 #include "osd_internal.h"
50
51 #include <sys/dnode.h>
52 #include <sys/dbuf.h>
53 #include <sys/spa.h>
54 #include <sys/stat.h>
55 #include <sys/zap.h>
56 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/dmu_tx.h>
59 #include <sys/dmu_objset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/sa_impl.h>
62 #include <sys/txg.h>
63
64 char *osd_obj_tag = "osd_object";
65
66 static struct dt_object_operations osd_obj_ops;
67 static struct lu_object_operations osd_lu_obj_ops;
68 extern struct dt_body_operations osd_body_ops;
69 static struct dt_object_operations osd_obj_otable_it_ops;
70
71 extern struct kmem_cache *osd_object_kmem;
72
73 static void
74 osd_object_sa_fini(struct osd_object *obj)
75 {
76         if (obj->oo_sa_hdl) {
77                 sa_handle_destroy(obj->oo_sa_hdl);
78                 obj->oo_sa_hdl = NULL;
79         }
80 }
81
82 static int
83 osd_object_sa_init(struct osd_object *obj, struct osd_device *o)
84 {
85         int rc;
86
87         LASSERT(obj->oo_sa_hdl == NULL);
88         LASSERT(obj->oo_dn != NULL);
89
90         rc = osd_sa_handle_get(obj);
91         if (rc)
92                 return rc;
93
94         /* Cache the xattr object id, valid for the life of the object */
95         rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_XATTR(o), &obj->oo_xattr, 8);
96         if (rc == -ENOENT) {
97                 obj->oo_xattr = ZFS_NO_OBJECT;
98                 rc = 0;
99         } else if (rc) {
100                 osd_object_sa_fini(obj);
101         }
102
103         return rc;
104 }
105
106 /*
107  * Add object to list of dirty objects in tx handle.
108  */
109 void osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
110 {
111         if (!list_empty(&obj->oo_sa_linkage))
112                 return;
113
114         write_lock(&obj->oo_attr_lock);
115         if (likely(list_empty(&obj->oo_sa_linkage)))
116                 list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
117         write_unlock(&obj->oo_attr_lock);
118 }
119
120 /*
121  * Release spill block dbuf hold for all dirty SAs.
122  */
123 void osd_object_sa_dirty_rele(const struct lu_env *env, struct osd_thandle *oh)
124 {
125         struct osd_object *obj;
126
127         while (!list_empty(&oh->ot_sa_list)) {
128                 obj = list_entry(oh->ot_sa_list.next,
129                                  struct osd_object, oo_sa_linkage);
130                 write_lock(&obj->oo_attr_lock);
131                 list_del_init(&obj->oo_sa_linkage);
132                 write_unlock(&obj->oo_attr_lock);
133                 if (obj->oo_late_xattr) {
134                         /*
135                          * take oo_guard to protect oo_sa_xattr buffer
136                          * from concurrent update by osd_xattr_set()
137                          */
138                         LASSERT(oh->ot_assigned != 0);
139                         down_write(&obj->oo_guard);
140                         if (obj->oo_late_attr_set)
141                                 __osd_sa_attr_init(env, obj, oh);
142                         else if (obj->oo_late_xattr)
143                                 __osd_sa_xattr_update(env, obj, oh);
144                         up_write(&obj->oo_guard);
145                 }
146                 sa_spill_rele(obj->oo_sa_hdl);
147         }
148 }
149
150 /*
151  * Update the SA and add the object to the dirty list.
152  */
153 int osd_object_sa_update(struct osd_object *obj, sa_attr_type_t type,
154                          void *buf, uint32_t buflen, struct osd_thandle *oh)
155 {
156         int rc;
157
158         LASSERT(obj->oo_sa_hdl != NULL);
159         LASSERT(oh->ot_tx != NULL);
160
161         rc = -sa_update(obj->oo_sa_hdl, type, buf, buflen, oh->ot_tx);
162         osd_object_sa_dirty_add(obj, oh);
163
164         return rc;
165 }
166
167 /*
168  * Bulk update the SA and add the object to the dirty list.
169  */
170 static int
171 osd_object_sa_bulk_update(struct osd_object *obj, sa_bulk_attr_t *attrs,
172                           int count, struct osd_thandle *oh)
173 {
174         int rc;
175
176         LASSERT(obj->oo_sa_hdl != NULL);
177         LASSERT(oh->ot_tx != NULL);
178
179         rc = -sa_bulk_update(obj->oo_sa_hdl, attrs, count, oh->ot_tx);
180         osd_object_sa_dirty_add(obj, oh);
181
182         return rc;
183 }
184
185 /*
186  * Retrieve the attributes of a DMU object
187  */
188 int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o,
189                           struct osd_object *obj, struct lu_attr *la)
190 {
191         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
192         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
193         int              cnt = 0;
194         int              rc;
195         ENTRY;
196
197         LASSERT(obj->oo_dn != NULL);
198
199         la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE |
200                         LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK;
201
202         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16);
203         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16);
204         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16);
205         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8);
206         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8);
207         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8);
208         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8);
209         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8);
210         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8);
211         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
212
213         rc = -sa_bulk_lookup(obj->oo_sa_hdl, bulk, cnt);
214         if (rc)
215                 GOTO(out_sa, rc);
216
217         la->la_atime = osa->atime[0];
218         la->la_mtime = osa->mtime[0];
219         la->la_ctime = osa->ctime[0];
220         la->la_mode = osa->mode;
221         la->la_uid = osa->uid;
222         la->la_gid = osa->gid;
223         la->la_nlink = osa->nlink;
224         la->la_flags = attrs_zfs2fs(osa->flags);
225         la->la_size = osa->size;
226
227         /* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN
228          * flags is stored in LMA, and it is only for orphan directory */
229         if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) {
230                 struct osd_thread_info *info = osd_oti_get(env);
231                 struct lustre_mdt_attrs *lma;
232                 struct lu_buf buf;
233
234                 lma = (struct lustre_mdt_attrs *)info->oti_buf;
235                 buf.lb_buf = lma;
236                 buf.lb_len = sizeof(info->oti_buf);
237                 rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
238                 if (rc > 0) {
239                         rc = 0;
240                         lma->lma_incompat = le32_to_cpu(lma->lma_incompat);
241                         obj->oo_lma_flags =
242                                 lma_to_lustre_flags(lma->lma_incompat);
243
244                 } else if (rc == -ENODATA) {
245                         rc = 0;
246                 }
247         }
248
249         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) {
250                 rc = -sa_lookup(obj->oo_sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8);
251                 if (rc)
252                         GOTO(out_sa, rc);
253                 la->la_rdev = osa->rdev;
254                 la->la_valid |= LA_RDEV;
255         }
256 out_sa:
257
258         RETURN(rc);
259 }
260
261 int __osd_obj2dnode(objset_t *os, uint64_t oid, dnode_t **dnp)
262 {
263         dmu_buf_t *db;
264         dmu_buf_impl_t *dbi;
265         int rc;
266
267         rc = -dmu_bonus_hold(os, oid, osd_obj_tag, &db);
268         if (rc)
269                 return rc;
270
271         dbi = (dmu_buf_impl_t *)db;
272         DB_DNODE_ENTER(dbi);
273         *dnp = DB_DNODE(dbi);
274         LASSERT(*dnp != NULL);
275
276         return 0;
277 }
278
279 /*
280  * Concurrency: no concurrent access is possible that early in object
281  * life-cycle.
282  */
283 struct lu_object *osd_object_alloc(const struct lu_env *env,
284                                    const struct lu_object_header *hdr,
285                                    struct lu_device *d)
286 {
287         struct osd_object *mo;
288
289         OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, GFP_NOFS);
290         if (mo != NULL) {
291                 struct lu_object *l;
292
293                 l = &mo->oo_dt.do_lu;
294                 dt_object_init(&mo->oo_dt, NULL, d);
295                 mo->oo_dt.do_ops = &osd_obj_ops;
296                 l->lo_ops = &osd_lu_obj_ops;
297                 INIT_LIST_HEAD(&mo->oo_sa_linkage);
298                 INIT_LIST_HEAD(&mo->oo_unlinked_linkage);
299                 init_rwsem(&mo->oo_sem);
300                 init_rwsem(&mo->oo_guard);
301                 rwlock_init(&mo->oo_attr_lock);
302                 mo->oo_destroy = OSD_DESTROY_NONE;
303                 return l;
304         } else {
305                 return NULL;
306         }
307 }
308
309 /*
310  * Concurrency: shouldn't matter.
311  */
312 int osd_object_init0(const struct lu_env *env, struct osd_object *obj)
313 {
314         struct osd_device       *osd = osd_obj2dev(obj);
315         const struct lu_fid     *fid = lu_object_fid(&obj->oo_dt.do_lu);
316         int                      rc = 0;
317         ENTRY;
318
319         if (obj->oo_dn == NULL)
320                 RETURN(0);
321
322         /* object exist */
323
324         rc = osd_object_sa_init(obj, osd);
325         if (rc)
326                 RETURN(rc);
327
328         /* cache attrs in object */
329         rc = __osd_object_attr_get(env, osd, obj, &obj->oo_attr);
330         if (rc)
331                 RETURN(rc);
332
333         if (likely(!fid_is_acct(fid)))
334                 /* no body operations for accounting objects */
335                 obj->oo_dt.do_body_ops = &osd_body_ops;
336
337         /*
338          * initialize object before marking it existing
339          */
340         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
341
342         smp_mb();
343         obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
344
345         RETURN(0);
346 }
347
348 static int osd_check_lma(const struct lu_env *env, struct osd_object *obj)
349 {
350         struct osd_thread_info  *info = osd_oti_get(env);
351         struct lu_buf           buf;
352         int                     rc;
353         struct lustre_mdt_attrs *lma;
354         ENTRY;
355
356         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
357         lma = (struct lustre_mdt_attrs *)info->oti_buf;
358         buf.lb_buf = lma;
359         buf.lb_len = sizeof(info->oti_buf);
360
361         rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA);
362         if (rc > 0) {
363                 rc = 0;
364                 lustre_lma_swab(lma);
365                 if (unlikely((lma->lma_incompat & ~LMA_INCOMPAT_SUPP) ||
366                              CFS_FAIL_CHECK(OBD_FAIL_OSD_LMA_INCOMPAT))) {
367                         CWARN("%s: unsupported incompat LMA feature(s) %#x for "
368                               "fid = "DFID"\n", osd_obj2dev(obj)->od_svname,
369                               lma->lma_incompat & ~LMA_INCOMPAT_SUPP,
370                               PFID(lu_object_fid(&obj->oo_dt.do_lu)));
371                         rc = -EOPNOTSUPP;
372                 }
373         } else if (rc == -ENODATA) {
374                 /* haven't initialize LMA xattr */
375                 rc = 0;
376         }
377
378         RETURN(rc);
379 }
380
381 /**
382  * Helper function to retrieve DMU object id from fid for accounting object
383  */
384 static dnode_t *osd_quota_fid2dmu(const struct osd_device *osd,
385                                   const struct lu_fid *fid)
386 {
387         dnode_t *dn = NULL;
388
389         LASSERT(fid_is_acct(fid));
390
391         switch (fid_oid(fid)) {
392         case ACCT_USER_OID:
393                 dn = osd->od_userused_dn;
394                 break;
395         case ACCT_GROUP_OID:
396                 dn = osd->od_groupused_dn;
397                 break;
398         default:
399                 break;
400         }
401
402         return dn;
403 }
404
405 /*
406  * Concurrency: no concurrent access is possible that early in object
407  * life-cycle.
408  */
409 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
410                            const struct lu_object_conf *conf)
411 {
412         struct osd_object *obj = osd_obj(l);
413         struct osd_device *osd = osd_obj2dev(obj);
414         const struct lu_fid *fid = lu_object_fid(l);
415         uint64_t oid;
416         int rc = 0;
417         ENTRY;
418
419         LASSERT(osd_invariant(obj));
420
421         if (fid_is_otable_it(&l->lo_header->loh_fid)) {
422                 obj->oo_dt.do_ops = &osd_obj_otable_it_ops;
423                 l->lo_header->loh_attr |= LOHA_EXISTS;
424                 RETURN(0);
425         }
426
427         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
428                 GOTO(out, rc = 0);
429
430         if (unlikely(fid_is_acct(fid))) {
431                 obj->oo_dn = osd_quota_fid2dmu(osd, fid);
432                 if (obj->oo_dn) {
433                         obj->oo_dt.do_index_ops = &osd_acct_index_ops;
434                         l->lo_header->loh_attr |= LOHA_EXISTS;
435                 }
436
437                 GOTO(out, rc = 0);
438         }
439
440         rc = osd_fid_lookup(env, osd, fid, &oid);
441         if (rc == 0) {
442                 LASSERT(obj->oo_dn == NULL);
443                 rc = __osd_obj2dnode(osd->od_os, oid, &obj->oo_dn);
444                 /* EEXIST will be returned if object is being deleted in ZFS */
445                 if (rc == -EEXIST) {
446                         rc = 0;
447                         GOTO(out, rc);
448                 }
449                 if (rc != 0) {
450                         CERROR("%s: lookup "DFID"/%#llx failed: rc = %d\n",
451                                osd->od_svname, PFID(lu_object_fid(l)), oid, rc);
452                         GOTO(out, rc);
453                 }
454                 LASSERT(obj->oo_dn);
455                 rc = osd_object_init0(env, obj);
456                 if (rc != 0)
457                         GOTO(out, rc);
458
459                 rc = osd_check_lma(env, obj);
460                 if (rc != 0)
461                         GOTO(out, rc);
462         } else if (rc == -ENOENT) {
463                 rc = 0;
464         }
465         LASSERT(osd_invariant(obj));
466 out:
467         RETURN(rc);
468 }
469
470 /*
471  * Concurrency: no concurrent access is possible that late in object
472  * life-cycle.
473  */
474 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
475 {
476         struct osd_object *obj = osd_obj(l);
477
478         LASSERT(osd_invariant(obj));
479
480         dt_object_fini(&obj->oo_dt);
481         OBD_SLAB_FREE_PTR(obj, osd_object_kmem);
482 }
483
484 static int
485 osd_object_unlinked_add(struct osd_object *obj, struct osd_thandle *oh)
486 {
487         int rc = -EBUSY;
488
489         LASSERT(obj->oo_destroy == OSD_DESTROY_ASYNC);
490
491         /* the object is supposed to be exclusively locked by
492          * the caller (osd_destroy()), while the transaction
493          * (oh) is per-thread and not shared */
494         if (likely(list_empty(&obj->oo_unlinked_linkage))) {
495                 list_add(&obj->oo_unlinked_linkage, &oh->ot_unlinked_list);
496                 rc = 0;
497         }
498
499         return rc;
500 }
501
502 /* Default to max data size covered by a level-1 indirect block */
503 static unsigned long osd_sync_destroy_max_size =
504         1UL << (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT + SPA_MAXBLOCKSHIFT);
505 module_param(osd_sync_destroy_max_size, ulong, 0444);
506 MODULE_PARM_DESC(osd_sync_destroy_max_size, "Maximum object size to use synchronous destroy.");
507
508 static inline void
509 osd_object_set_destroy_type(struct osd_object *obj)
510 {
511         /*
512          * Lock-less OST_WRITE can race with OST_DESTROY, so set destroy type
513          * only once and use it consistently thereafter.
514          */
515         down_write(&obj->oo_guard);
516         if (obj->oo_destroy == OSD_DESTROY_NONE) {
517                 if (obj->oo_attr.la_size <= osd_sync_destroy_max_size)
518                         obj->oo_destroy = OSD_DESTROY_SYNC;
519                 else /* Larger objects are destroyed asynchronously */
520                         obj->oo_destroy = OSD_DESTROY_ASYNC;
521         }
522         up_write(&obj->oo_guard);
523 }
524
525 static int osd_declare_destroy(const struct lu_env *env, struct dt_object *dt,
526                                struct thandle *th)
527 {
528         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
529         struct osd_object       *obj = osd_dt_obj(dt);
530         struct osd_device       *osd = osd_obj2dev(obj);
531         struct osd_thandle      *oh;
532         dnode_t *dn;
533         int                      rc;
534         uint64_t                 zapid;
535         ENTRY;
536
537         LASSERT(th != NULL);
538         LASSERT(dt_object_exists(dt));
539
540         oh = container_of0(th, struct osd_thandle, ot_super);
541         LASSERT(oh->ot_tx != NULL);
542
543         /* declare that we'll remove object from fid-dnode mapping */
544         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
545         osd_tx_hold_zap(oh->ot_tx, zapid, dn, FALSE, NULL);
546
547         osd_declare_xattrs_destroy(env, obj, oh);
548
549         /* one less inode */
550         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
551                                obj->oo_attr.la_gid, -1, oh, false, NULL, false);
552         if (rc)
553                 RETURN(rc);
554
555         /* data to be truncated */
556         rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid,
557                                obj->oo_attr.la_gid, 0, oh, true, NULL, false);
558         if (rc)
559                 RETURN(rc);
560
561         osd_object_set_destroy_type(obj);
562         if (obj->oo_destroy == OSD_DESTROY_SYNC)
563                 dmu_tx_hold_free(oh->ot_tx, obj->oo_dn->dn_object,
564                                  0, DMU_OBJECT_END);
565         else
566                 osd_tx_hold_zap(oh->ot_tx, osd->od_unlinked->dn_object,
567                                 osd->od_unlinked, TRUE, NULL);
568
569         /* will help to find FID->ino when this object is being
570          * added to PENDING/ */
571         osd_idc_find_and_init(env, osd, obj);
572
573         RETURN(0);
574 }
575
576 static int osd_destroy(const struct lu_env *env, struct dt_object *dt,
577                        struct thandle *th)
578 {
579         struct osd_thread_info  *info = osd_oti_get(env);
580         char                    *buf = info->oti_str;
581         struct osd_object       *obj = osd_dt_obj(dt);
582         struct osd_device       *osd = osd_obj2dev(obj);
583         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
584         struct osd_thandle      *oh;
585         int                      rc;
586         uint64_t                 oid, zapid;
587         dnode_t *zdn;
588         ENTRY;
589
590         down_write(&obj->oo_guard);
591
592         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
593                 GOTO(out, rc = -ENOENT);
594
595         LASSERT(obj->oo_dn != NULL);
596
597         oh = container_of0(th, struct osd_thandle, ot_super);
598         LASSERT(oh != NULL);
599         LASSERT(oh->ot_tx != NULL);
600
601         /* remove obj ref from index dir (it depends) */
602         zapid = osd_get_name_n_idx(env, osd, fid, buf,
603                                    sizeof(info->oti_str), &zdn);
604         rc = osd_zap_remove(osd, zapid, zdn, buf, oh->ot_tx);
605         if (rc) {
606                 CERROR("%s: zap_remove(%s) failed: rc = %d\n",
607                        osd->od_svname, buf, rc);
608                 GOTO(out, rc);
609         }
610
611         rc = osd_xattrs_destroy(env, obj, oh);
612         if (rc) {
613                 CERROR("%s: cannot destroy xattrs for %s: rc = %d\n",
614                        osd->od_svname, buf, rc);
615                 GOTO(out, rc);
616         }
617
618         oid = obj->oo_dn->dn_object;
619         if (unlikely(obj->oo_destroy == OSD_DESTROY_NONE)) {
620                 /* this may happen if the destroy wasn't declared
621                  * e.g. when the object is created and then destroyed
622                  * in the same transaction - we don't need additional
623                  * space for destroy specifically */
624                 LASSERT(obj->oo_attr.la_size <= osd_sync_destroy_max_size);
625                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
626                 if (rc)
627                         CERROR("%s: failed to free %s %llu: rc = %d\n",
628                                osd->od_svname, buf, oid, rc);
629         } else if (obj->oo_destroy == OSD_DESTROY_SYNC) {
630                 rc = -dmu_object_free(osd->od_os, oid, oh->ot_tx);
631                 if (rc)
632                         CERROR("%s: failed to free %s %llu: rc = %d\n",
633                                osd->od_svname, buf, oid, rc);
634         } else { /* asynchronous destroy */
635                 char *key = info->oti_key;
636
637                 rc = osd_object_unlinked_add(obj, oh);
638                 if (rc)
639                         GOTO(out, rc);
640
641                 snprintf(key, sizeof(info->oti_key), "%llx", oid);
642                 rc = osd_zap_add(osd, osd->od_unlinked->dn_object,
643                                  osd->od_unlinked, key, 8, 1, &oid, oh->ot_tx);
644                 if (rc)
645                         CERROR("%s: zap_add_int() failed %s %llu: rc = %d\n",
646                                osd->od_svname, buf, oid, rc);
647         }
648
649 out:
650         /* not needed in the cache anymore */
651         set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
652         if (rc == 0)
653                 obj->oo_destroyed = 1;
654         up_write(&obj->oo_guard);
655         RETURN (0);
656 }
657
658 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
659 {
660         struct osd_object *obj = osd_obj(l);
661         const struct lu_fid *fid = lu_object_fid(l);
662
663         if (obj->oo_dn) {
664                 if (likely(!fid_is_acct(fid))) {
665                         osd_object_sa_fini(obj);
666                         if (obj->oo_sa_xattr) {
667                                 nvlist_free(obj->oo_sa_xattr);
668                                 obj->oo_sa_xattr = NULL;
669                         }
670                         osd_dnode_rele(obj->oo_dn);
671                         list_del(&obj->oo_sa_linkage);
672                 }
673                 obj->oo_dn = NULL;
674         }
675 }
676
677 /*
678  * Concurrency: ->loo_object_release() is called under site spin-lock.
679  */
680 static void osd_object_release(const struct lu_env *env,
681                                struct lu_object *l)
682 {
683 }
684
685 /*
686  * Concurrency: shouldn't matter.
687  */
688 static int osd_object_print(const struct lu_env *env, void *cookie,
689                             lu_printer_t p, const struct lu_object *l)
690 {
691         struct osd_object *o = osd_obj(l);
692
693         return (*p)(env, cookie, LUSTRE_OSD_ZFS_NAME"-object@%p", o);
694 }
695
696 static void osd_read_lock(const struct lu_env *env, struct dt_object *dt,
697                           unsigned role)
698 {
699         struct osd_object *obj = osd_dt_obj(dt);
700
701         LASSERT(osd_invariant(obj));
702
703         down_read_nested(&obj->oo_sem, role);
704 }
705
706 static void osd_write_lock(const struct lu_env *env, struct dt_object *dt,
707                            unsigned role)
708 {
709         struct osd_object *obj = osd_dt_obj(dt);
710
711         LASSERT(osd_invariant(obj));
712
713         down_write_nested(&obj->oo_sem, role);
714 }
715
716 static void osd_read_unlock(const struct lu_env *env, struct dt_object *dt)
717 {
718         struct osd_object *obj = osd_dt_obj(dt);
719
720         LASSERT(osd_invariant(obj));
721         up_read(&obj->oo_sem);
722 }
723
724 static void osd_write_unlock(const struct lu_env *env, struct dt_object *dt)
725 {
726         struct osd_object *obj = osd_dt_obj(dt);
727
728         LASSERT(osd_invariant(obj));
729         up_write(&obj->oo_sem);
730 }
731
732 static int osd_write_locked(const struct lu_env *env, struct dt_object *dt)
733 {
734         struct osd_object *obj = osd_dt_obj(dt);
735         int rc = 1;
736
737         LASSERT(osd_invariant(obj));
738
739         if (down_write_trylock(&obj->oo_sem)) {
740                 rc = 0;
741                 up_write(&obj->oo_sem);
742         }
743         return rc;
744 }
745
746 static int osd_attr_get(const struct lu_env *env,
747                         struct dt_object *dt,
748                         struct lu_attr *attr)
749 {
750         struct osd_object       *obj = osd_dt_obj(dt);
751         uint64_t                 blocks;
752         uint32_t                 blksize;
753         int                      rc = 0;
754
755         down_read(&obj->oo_guard);
756
757         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
758                 GOTO(out, rc = -ENOENT);
759
760         if (unlikely(fid_is_acct(lu_object_fid(&dt->do_lu))))
761                 GOTO(out, rc = 0);
762
763         LASSERT(osd_invariant(obj));
764         LASSERT(obj->oo_dn);
765
766         read_lock(&obj->oo_attr_lock);
767         *attr = obj->oo_attr;
768         if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
769                 attr->la_flags |= LUSTRE_ORPHAN_FL;
770         read_unlock(&obj->oo_attr_lock);
771
772         /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
773          * from within sa_object_size() can block on a mutex, so
774          * we can't call sa_object_size() holding rwlock */
775         sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
776         /* we do not control size of indices, so always calculate
777          * it from number of blocks reported by DMU */
778         if (S_ISDIR(attr->la_mode))
779                 attr->la_size = 512 * blocks;
780         /* Block size may be not set; suggest maximal I/O transfers. */
781         if (blksize == 0)
782                 blksize = osd_spa_maxblocksize(
783                         dmu_objset_spa(osd_obj2dev(obj)->od_os));
784
785         attr->la_blksize = blksize;
786         attr->la_blocks = blocks;
787         attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;
788
789 out:
790         up_read(&obj->oo_guard);
791         return rc;
792 }
793
794 /* Simple wrapper on top of qsd API which implement quota transfer for osd
795  * setattr needs. As a reminder, only the root user can change ownership of
796  * a file, that's why EDQUOT & EINPROGRESS errors are discarded */
797 static inline int qsd_transfer(const struct lu_env *env,
798                                struct qsd_instance *qsd,
799                                struct lquota_trans *trans, int qtype,
800                                __u64 orig_id, __u64 new_id, __u64 bspace,
801                                struct lquota_id_info *qi)
802 {
803         int     rc;
804
805         if (unlikely(qsd == NULL))
806                 return 0;
807
808         LASSERT(qtype >= 0 && qtype < LL_MAXQUOTAS);
809         qi->lqi_type = qtype;
810
811         /* inode accounting */
812         qi->lqi_is_blk = false;
813
814         /* one more inode for the new owner ... */
815         qi->lqi_id.qid_uid = new_id;
816         qi->lqi_space      = 1;
817         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
818         if (rc == -EDQUOT || rc == -EINPROGRESS)
819                 rc = 0;
820         if (rc)
821                 return rc;
822
823         /* and one less inode for the current id */
824         qi->lqi_id.qid_uid = orig_id;;
825         qi->lqi_space      = -1;
826         /* can't get EDQUOT when reducing usage */
827         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
828         if (rc == -EINPROGRESS)
829                 rc = 0;
830         if (rc)
831                 return rc;
832
833         /* block accounting */
834         qi->lqi_is_blk = true;
835
836         /* more blocks for the new owner ... */
837         qi->lqi_id.qid_uid = new_id;
838         qi->lqi_space      = bspace;
839         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
840         if (rc == -EDQUOT || rc == -EINPROGRESS)
841                 rc = 0;
842         if (rc)
843                 return rc;
844
845         /* and finally less blocks for the current owner */
846         qi->lqi_id.qid_uid = orig_id;
847         qi->lqi_space      = -bspace;
848         rc = qsd_op_begin(env, qsd, trans, qi, NULL);
849         /* can't get EDQUOT when reducing usage */
850         if (rc == -EINPROGRESS)
851                 rc = 0;
852         return rc;
853 }
854
855 static int osd_declare_attr_set(const struct lu_env *env,
856                                 struct dt_object *dt,
857                                 const struct lu_attr *attr,
858                                 struct thandle *handle)
859 {
860         struct osd_thread_info  *info = osd_oti_get(env);
861         struct osd_object       *obj = osd_dt_obj(dt);
862         struct osd_device       *osd = osd_obj2dev(obj);
863         dmu_tx_hold_t           *txh;
864         struct osd_thandle      *oh;
865         uint64_t                 bspace;
866         uint32_t                 blksize;
867         int                      rc = 0;
868         bool                     found;
869         ENTRY;
870
871
872         LASSERT(handle != NULL);
873         LASSERT(osd_invariant(obj));
874
875         oh = container_of0(handle, struct osd_thandle, ot_super);
876
877         down_read(&obj->oo_guard);
878         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
879                 GOTO(out, rc = 0);
880
881         LASSERT(obj->oo_sa_hdl != NULL);
882         LASSERT(oh->ot_tx != NULL);
883         /* regular attributes are part of the bonus buffer */
884         /* let's check whether this object is already part of
885          * transaction.. */
886         found = false;
887         for (txh = list_head(&oh->ot_tx->tx_holds); txh;
888              txh = list_next(&oh->ot_tx->tx_holds, txh)) {
889                 if (txh->txh_dnode == NULL)
890                         continue;
891                 if (txh->txh_dnode->dn_object != obj->oo_dn->dn_object)
892                         continue;
893                 /* this object is part of the transaction already
894                  * we don't need to declare bonus again */
895                 found = true;
896                 break;
897         }
898         if (!found)
899                 dmu_tx_hold_bonus(oh->ot_tx, obj->oo_dn->dn_object);
900         if (oh->ot_tx->tx_err != 0)
901                 GOTO(out, rc = -oh->ot_tx->tx_err);
902
903         if (attr && attr->la_valid & LA_FLAGS) {
904                 /* LMA is usually a part of bonus, no need to declare
905                  * anything else */
906         }
907
908         if (attr && (attr->la_valid & (LA_UID | LA_GID))) {
909                 sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
910                 bspace = toqb(bspace * blksize);
911         }
912
913         if (attr && attr->la_valid & LA_UID) {
914                 /* quota enforcement for user */
915                 if (attr->la_uid != obj->oo_attr.la_uid) {
916                         rc = qsd_transfer(env, osd->od_quota_slave,
917                                           &oh->ot_quota_trans, USRQUOTA,
918                                           obj->oo_attr.la_uid, attr->la_uid,
919                                           bspace, &info->oti_qi);
920                         if (rc)
921                                 GOTO(out, rc);
922                 }
923         }
924         if (attr && attr->la_valid & LA_GID) {
925                 /* quota enforcement for group */
926                 if (attr->la_gid != obj->oo_attr.la_gid) {
927                         rc = qsd_transfer(env, osd->od_quota_slave,
928                                           &oh->ot_quota_trans, GRPQUOTA,
929                                           obj->oo_attr.la_gid, attr->la_gid,
930                                           bspace, &info->oti_qi);
931                         if (rc)
932                                 GOTO(out, rc);
933                 }
934         }
935
936 out:
937         up_read(&obj->oo_guard);
938         RETURN(rc);
939 }
940
941 /*
942  * Set the attributes of an object
943  *
944  * The transaction passed to this routine must have
945  * dmu_tx_hold_bonus(tx, oid) called and then assigned
946  * to a transaction group.
947  */
948 static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
949                         const struct lu_attr *la, struct thandle *handle)
950 {
951         struct osd_thread_info  *info = osd_oti_get(env);
952         sa_bulk_attr_t          *bulk = osd_oti_get(env)->oti_attr_bulk;
953         struct osd_object       *obj = osd_dt_obj(dt);
954         struct osd_device       *osd = osd_obj2dev(obj);
955         struct osd_thandle      *oh;
956         struct osa_attr         *osa = &info->oti_osa;
957         __u64                    valid = la->la_valid;
958         int                      cnt;
959         int                      rc = 0;
960
961         ENTRY;
962
963         down_read(&obj->oo_guard);
964         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
965                 GOTO(out, rc = -ENOENT);
966
967         LASSERT(handle != NULL);
968         LASSERT(osd_invariant(obj));
969         LASSERT(obj->oo_sa_hdl);
970
971         oh = container_of0(handle, struct osd_thandle, ot_super);
972         /* Assert that the transaction has been assigned to a
973            transaction group. */
974         LASSERT(oh->ot_tx->tx_txg != 0);
975
976         /* Only allow set size for regular file */
977         if (!S_ISREG(dt->do_lu.lo_header->loh_attr))
978                 valid &= ~(LA_SIZE | LA_BLOCKS);
979
980         if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime)
981                 valid &= ~LA_CTIME;
982
983         if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime)
984                 valid &= ~LA_MTIME;
985
986         if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime)
987                 valid &= ~LA_ATIME;
988
989         if (valid == 0)
990                 GOTO(out, rc = 0);
991
992         if (valid & LA_FLAGS) {
993                 struct lustre_mdt_attrs *lma;
994                 struct lu_buf buf;
995
996                 if (la->la_flags & LUSTRE_LMA_FL_MASKS) {
997                         CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma));
998                         lma = (struct lustre_mdt_attrs *)&info->oti_buf;
999                         buf.lb_buf = lma;
1000                         buf.lb_len = sizeof(info->oti_buf);
1001                         rc = osd_xattr_get(env, &obj->oo_dt, &buf,
1002                                            XATTR_NAME_LMA);
1003                         if (rc > 0) {
1004                                 lma->lma_incompat =
1005                                         le32_to_cpu(lma->lma_incompat);
1006                                 lma->lma_incompat |=
1007                                         lustre_to_lma_flags(la->la_flags);
1008                                 lma->lma_incompat =
1009                                         cpu_to_le32(lma->lma_incompat);
1010                                 buf.lb_buf = lma;
1011                                 buf.lb_len = sizeof(*lma);
1012                                 rc = osd_xattr_set_internal(env, obj, &buf,
1013                                                             XATTR_NAME_LMA,
1014                                                             LU_XATTR_REPLACE,
1015                                                             oh);
1016                         }
1017                         if (rc < 0) {
1018                                 CWARN("%s: failed to set LMA flags: rc = %d\n",
1019                                        osd->od_svname, rc);
1020                                 RETURN(rc);
1021                         }
1022                 }
1023         }
1024
1025         write_lock(&obj->oo_attr_lock);
1026         cnt = 0;
1027         if (valid & LA_ATIME) {
1028                 osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
1029                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL,
1030                                  osa->atime, 16);
1031         }
1032         if (valid & LA_MTIME) {
1033                 osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime;
1034                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL,
1035                                  osa->mtime, 16);
1036         }
1037         if (valid & LA_CTIME) {
1038                 osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime;
1039                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL,
1040                                  osa->ctime, 16);
1041         }
1042         if (valid & LA_MODE) {
1043                 /* mode is stored along with type, so read it first */
1044                 obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) |
1045                         (la->la_mode & ~S_IFMT);
1046                 osa->mode = obj->oo_attr.la_mode;
1047                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL,
1048                                  &osa->mode, 8);
1049         }
1050         if (valid & LA_SIZE) {
1051                 osa->size = obj->oo_attr.la_size = la->la_size;
1052                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL,
1053                                  &osa->size, 8);
1054         }
1055         if (valid & LA_NLINK) {
1056                 osa->nlink = obj->oo_attr.la_nlink = la->la_nlink;
1057                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL,
1058                                  &osa->nlink, 8);
1059         }
1060         if (valid & LA_RDEV) {
1061                 osa->rdev = obj->oo_attr.la_rdev = la->la_rdev;
1062                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL,
1063                                  &osa->rdev, 8);
1064         }
1065         if (valid & LA_FLAGS) {
1066                 osa->flags = attrs_fs2zfs(la->la_flags);
1067                 /* many flags are not supported by zfs, so ensure a good cached
1068                  * copy */
1069                 obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags);
1070                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL,
1071                                  &osa->flags, 8);
1072         }
1073         if (valid & LA_UID) {
1074                 osa->uid = obj->oo_attr.la_uid = la->la_uid;
1075                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL,
1076                                  &osa->uid, 8);
1077         }
1078         if (valid & LA_GID) {
1079                 osa->gid = obj->oo_attr.la_gid = la->la_gid;
1080                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL,
1081                                  &osa->gid, 8);
1082         }
1083         obj->oo_attr.la_valid |= valid;
1084         write_unlock(&obj->oo_attr_lock);
1085
1086         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1087         rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
1088
1089 out:
1090         up_read(&obj->oo_guard);
1091         RETURN(rc);
1092 }
1093
1094 /*
1095  * Object creation.
1096  *
1097  * XXX temporary solution.
1098  */
1099
1100 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1101                         struct dt_object *parent, struct dt_object *child,
1102                         umode_t child_mode)
1103 {
1104         LASSERT(ah);
1105
1106         ah->dah_parent = parent;
1107         ah->dah_mode = child_mode;
1108
1109         if (parent != NULL && !dt_object_remote(parent)) {
1110                 /* will help to find FID->ino at dt_insert("..") */
1111                 struct osd_object *pobj = osd_dt_obj(parent);
1112
1113                 osd_idc_find_and_init(env, osd_obj2dev(pobj), pobj);
1114         }
1115 }
1116
1117 static int osd_declare_create(const struct lu_env *env, struct dt_object *dt,
1118                               struct lu_attr *attr,
1119                               struct dt_allocation_hint *hint,
1120                               struct dt_object_format *dof,
1121                               struct thandle *handle)
1122 {
1123         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1124         struct osd_object       *obj = osd_dt_obj(dt);
1125         struct osd_device       *osd = osd_obj2dev(obj);
1126         struct osd_thandle      *oh;
1127         uint64_t                 zapid;
1128         dnode_t                 *dn;
1129         int                      rc, dnode_size;
1130         ENTRY;
1131
1132         LASSERT(dof);
1133
1134         switch (dof->dof_type) {
1135                 case DFT_REGULAR:
1136                 case DFT_SYM:
1137                 case DFT_NODE:
1138                         if (obj->oo_dt.do_body_ops == NULL)
1139                                 obj->oo_dt.do_body_ops = &osd_body_ops;
1140                         break;
1141                 default:
1142                         break;
1143         }
1144
1145         LASSERT(handle != NULL);
1146         oh = container_of0(handle, struct osd_thandle, ot_super);
1147         LASSERT(oh->ot_tx != NULL);
1148
1149         /* this is the minimum set of EAs on every Lustre object */
1150         obj->oo_ea_in_bonus = ZFS_SA_BASE_ATTR_SIZE +
1151                                 sizeof(__u64) + /* VBR VERSION */
1152                                 sizeof(struct lustre_mdt_attrs); /* LMA */
1153         /* reserve 32 bytes for extra stuff like ACLs */
1154         dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32);
1155
1156         switch (dof->dof_type) {
1157                 case DFT_DIR:
1158                         dt->do_index_ops = &osd_dir_ops;
1159                 case DFT_INDEX:
1160                         /* for zap create */
1161                         dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL);
1162                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1163                         break;
1164                 case DFT_REGULAR:
1165                 case DFT_SYM:
1166                 case DFT_NODE:
1167                         /* first, we'll create new object */
1168                         dmu_tx_hold_sa_create(oh->ot_tx, dnode_size);
1169                         break;
1170
1171                 default:
1172                         LBUG();
1173                         break;
1174         }
1175
1176         /* and we'll add it to some mapping */
1177         zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0, &dn);
1178         osd_tx_hold_zap(oh->ot_tx, zapid, dn, TRUE, NULL);
1179
1180         /* will help to find FID->ino mapping at dt_insert() */
1181         osd_idc_find_and_init(env, osd, obj);
1182
1183         rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh,
1184                                false, NULL, false);
1185
1186         RETURN(rc);
1187 }
1188
1189 int __osd_attr_init(const struct lu_env *env, struct osd_device *osd,
1190                     sa_handle_t *sa_hdl, dmu_tx_t *tx,
1191                     struct lu_attr *la, uint64_t parent,
1192                     nvlist_t *xattr)
1193 {
1194         sa_bulk_attr_t  *bulk = osd_oti_get(env)->oti_attr_bulk;
1195         struct osa_attr *osa = &osd_oti_get(env)->oti_osa;
1196         uint64_t         gen;
1197         uint64_t         crtime[2];
1198         timestruc_t      now;
1199         int              cnt;
1200         int              rc;
1201         char *dxattr = NULL;
1202         size_t sa_size;
1203
1204
1205         LASSERT(sa_hdl);
1206
1207         gen = dmu_tx_get_txg(tx);
1208         gethrestime(&now);
1209         ZFS_TIME_ENCODE(&now, crtime);
1210
1211         osa->atime[0] = la->la_atime;
1212         osa->ctime[0] = la->la_ctime;
1213         osa->mtime[0] = la->la_mtime;
1214         osa->mode = la->la_mode;
1215         osa->uid = la->la_uid;
1216         osa->gid = la->la_gid;
1217         osa->rdev = la->la_rdev;
1218         osa->nlink = la->la_nlink;
1219         osa->flags = attrs_fs2zfs(la->la_flags);
1220         osa->size  = la->la_size;
1221
1222         /*
1223          * we need to create all SA below upon object create.
1224          *
1225          * XXX The attribute order matters since the accounting callback relies
1226          * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to
1227          * look up the UID/GID attributes. Moreover, the callback does not seem
1228          * to support the spill block.
1229          * We define attributes in the same order as SA_*_OFFSET in order to
1230          * work around the problem. See ORI-610.
1231          */
1232         cnt = 0;
1233         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8);
1234         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8);
1235         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8);
1236         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8);
1237         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8);
1238         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8);
1239         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8);
1240         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16);
1241         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16);
1242         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16);
1243         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16);
1244         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8);
1245         SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8);
1246         LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk));
1247
1248         if (xattr) {
1249                 rc = -nvlist_size(xattr, &sa_size, NV_ENCODE_XDR);
1250                 LASSERT(rc == 0);
1251
1252                 dxattr = osd_zio_buf_alloc(sa_size);
1253                 LASSERT(dxattr);
1254
1255                 rc = -nvlist_pack(xattr, &dxattr, &sa_size,
1256                                 NV_ENCODE_XDR, KM_SLEEP);
1257                 LASSERT(rc == 0);
1258
1259                 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_DXATTR(osd),
1260                                 NULL, dxattr, sa_size);
1261         }
1262
1263         rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx);
1264         if (dxattr)
1265                 osd_zio_buf_free(dxattr, sa_size);
1266
1267         return rc;
1268 }
1269
1270 static int osd_find_new_dnode(const struct lu_env *env, dmu_tx_t *tx,
1271                               uint64_t oid, dnode_t **dnp)
1272 {
1273         dmu_tx_hold_t *txh;
1274         int rc = 0;
1275
1276         /* take dnode_t from tx to save on dnode#->dnode_t lookup */
1277         for (txh = list_tail(&tx->tx_holds); txh;
1278              txh = list_prev(&tx->tx_holds, txh)) {
1279                 dnode_t *dn = txh->txh_dnode;
1280                 dmu_buf_impl_t *db;
1281
1282                 if (dn == NULL)
1283                         continue;
1284                 if (dn->dn_object != oid)
1285                         continue;
1286                 db = dn->dn_bonus;
1287                 if (db == NULL) {
1288                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1289                         if (dn->dn_bonus == NULL)
1290                                 dbuf_create_bonus(dn);
1291                         rw_exit(&dn->dn_struct_rwlock);
1292                 }
1293                 db = dn->dn_bonus;
1294                 LASSERT(db);
1295                 LASSERT(dn->dn_handle);
1296                 DB_DNODE_ENTER(db);
1297                 if (refcount_add(&db->db_holds, osd_obj_tag) == 1) {
1298                         refcount_add(&dn->dn_holds, tag);
1299                         atomic_inc_32(&dn->dn_dbufs_count);
1300                 }
1301                 *dnp = dn;
1302                 dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH);
1303                 break;
1304         }
1305
1306         if (unlikely(*dnp == NULL))
1307                 rc = __osd_obj2dnode(tx->tx_objset, oid, dnp);
1308
1309         return rc;
1310 }
1311
1312 #ifdef HAVE_DMU_OBJECT_ALLOC_DNSIZE
1313 static int osd_find_dnsize(struct osd_object *obj)
1314 {
1315         struct osd_device *osd = osd_obj2dev(obj);
1316         int dnsize;
1317
1318         if (osd->od_dnsize == ZFS_DNSIZE_AUTO) {
1319                 dnsize = DNODE_MIN_SIZE;
1320                 do {
1321                         if (DN_BONUS_SIZE(dnsize) >= obj->oo_ea_in_bonus + 32)
1322                                 break;
1323                         dnsize <<= 1;
1324                 } while (dnsize < DNODE_MAX_SIZE);
1325                 if (dnsize > DNODE_MAX_SIZE)
1326                         dnsize = DNODE_MAX_SIZE;
1327         } else if (osd->od_dnsize == ZFS_DNSIZE_1K) {
1328                 dnsize = 1024;
1329         } else if (osd->od_dnsize == ZFS_DNSIZE_2K) {
1330                 dnsize = 2048;
1331         } else if (osd->od_dnsize == ZFS_DNSIZE_4K) {
1332                 dnsize = 4096;
1333         } else if (osd->od_dnsize == ZFS_DNSIZE_8K) {
1334                 dnsize = 8192;
1335         } else if (osd->od_dnsize == ZFS_DNSIZE_16K) {
1336                 dnsize = 16384;
1337         } else {
1338                 dnsize = DNODE_MIN_SIZE;
1339         }
1340         return dnsize;
1341 }
1342 #else
1343 static int inline osd_find_dnsize(struct osd_object *obj)
1344 {
1345         return DN_MAX_BONUSLEN;
1346 }
1347 #endif
1348
1349 /*
1350  * The transaction passed to this routine must have
1351  * dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT) called and then assigned
1352  * to a transaction group.
1353  */
1354 int __osd_object_create(const struct lu_env *env, struct osd_object *obj,
1355                         dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la)
1356 {
1357         struct osd_device   *osd = osd_obj2dev(obj);
1358         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1359         dmu_object_type_t    type = DMU_OT_PLAIN_FILE_CONTENTS;
1360         uint64_t oid;
1361
1362         /* Use DMU_OTN_UINT8_METADATA for local objects so their data blocks
1363          * would get an additional ditto copy */
1364         if (unlikely(S_ISREG(la->la_mode) &&
1365                      fid_seq_is_local_file(fid_seq(fid))))
1366                 type = DMU_OTN_UINT8_METADATA;
1367
1368         /* Create a new DMU object using the default dnode size. */
1369         oid = osd_dmu_object_alloc(osd->od_os, type, 0,
1370                                    osd_find_dnsize(obj), tx);
1371
1372         LASSERT(la->la_valid & LA_MODE);
1373         la->la_size = 0;
1374         la->la_nlink = 1;
1375
1376         return osd_find_new_dnode(env, tx, oid, dnp);
1377 }
1378
1379 /*
1380  * The transaction passed to this routine must have
1381  * dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, ...) called and then assigned
1382  * to a transaction group.
1383  *
1384  * Using ZAP_FLAG_HASH64 will force the ZAP to always be a FAT ZAP.
1385  * This is fine for directories today, because storing the FID in the dirent
1386  * will also require a FAT ZAP.  If there is a new type of micro ZAP created
1387  * then we might need to re-evaluate the use of this flag and instead do
1388  * a conversion from the different internal ZAP hash formats being used. */
1389 int __osd_zap_create(const struct lu_env *env, struct osd_device *osd,
1390                      dnode_t **dnp, dmu_tx_t *tx, struct lu_attr *la,
1391                      unsigned dnsize, zap_flags_t flags)
1392 {
1393         uint64_t oid;
1394
1395         /* Assert that the transaction has been assigned to a
1396            transaction group. */
1397         LASSERT(tx->tx_txg != 0);
1398         *dnp = NULL;
1399
1400         oid = osd_zap_create_flags(osd->od_os, 0, flags | ZAP_FLAG_HASH64,
1401                                    DMU_OT_DIRECTORY_CONTENTS,
1402                                    14, /* == ZFS fzap_default_blockshift */
1403                                    DN_MAX_INDBLKSHIFT, /* indirect blockshift */
1404                                    dnsize, tx);
1405
1406         la->la_size = 2;
1407         la->la_nlink = 1;
1408
1409         return osd_find_new_dnode(env, tx, oid, dnp);
1410 }
1411
1412 static dnode_t *osd_mkidx(const struct lu_env *env, struct osd_object *obj,
1413                           struct lu_attr *la, struct osd_thandle *oh)
1414 {
1415         dnode_t *dn;
1416         int rc;
1417
1418         /* Index file should be created as regular file in order not to confuse
1419          * ZPL which could interpret them as directory.
1420          * We set ZAP_FLAG_UINT64_KEY to let ZFS know than we are going to use
1421          * binary keys */
1422         LASSERT(S_ISREG(la->la_mode));
1423         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la,
1424                               osd_find_dnsize(obj), ZAP_FLAG_UINT64_KEY);
1425         if (rc)
1426                 return ERR_PTR(rc);
1427         return dn;
1428 }
1429
1430 static dnode_t *osd_mkdir(const struct lu_env *env, struct osd_object *obj,
1431                           struct lu_attr *la, struct osd_thandle *oh)
1432 {
1433         dnode_t *dn;
1434         int rc;
1435
1436         LASSERT(S_ISDIR(la->la_mode));
1437         rc = __osd_zap_create(env, osd_obj2dev(obj), &dn, oh->ot_tx, la,
1438                               osd_find_dnsize(obj), 0);
1439         if (rc)
1440                 return ERR_PTR(rc);
1441         return dn;
1442 }
1443
1444 static dnode_t *osd_mkreg(const struct lu_env *env, struct osd_object *obj,
1445                           struct lu_attr *la, struct osd_thandle *oh)
1446 {
1447         const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
1448         struct osd_device *osd = osd_obj2dev(obj);
1449         dnode_t *dn;
1450         int rc;
1451
1452         LASSERT(S_ISREG(la->la_mode));
1453         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1454         if (rc)
1455                 return ERR_PTR(rc);
1456
1457         if ((fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid)) &&
1458             osd->od_is_ost) {
1459                 /* The minimum block size must be at least page size otherwise
1460                  * it will break the assumption in tgt_thread_big_cache where
1461                  * the array size is PTLRPC_MAX_BRW_PAGES. It will also affect
1462                  * RDMA due to subpage transfer size */
1463                 rc = -dmu_object_set_blocksize(osd->od_os, dn->dn_object,
1464                                                PAGE_SIZE, 0, oh->ot_tx);
1465                 if (unlikely(rc)) {
1466                         CERROR("%s: can't change blocksize: %d\n",
1467                                osd->od_svname, rc);
1468                         return ERR_PTR(rc);
1469                 }
1470         }
1471
1472         return dn;
1473 }
1474
1475 static dnode_t *osd_mksym(const struct lu_env *env, struct osd_object *obj,
1476                           struct lu_attr *la, struct osd_thandle *oh)
1477 {
1478         dnode_t *dn;
1479         int rc;
1480
1481         LASSERT(S_ISLNK(la->la_mode));
1482         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1483         if (rc)
1484                 return ERR_PTR(rc);
1485         return dn;
1486 }
1487
1488 static dnode_t *osd_mknod(const struct lu_env *env, struct osd_object *obj,
1489                           struct lu_attr *la, struct osd_thandle *oh)
1490 {
1491         dnode_t *dn;
1492         int rc;
1493
1494         if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode))
1495                 la->la_valid |= LA_RDEV;
1496
1497         rc = __osd_object_create(env, obj, &dn, oh->ot_tx, la);
1498         if (rc)
1499                 return ERR_PTR(rc);
1500         return dn;
1501 }
1502
1503 typedef dnode_t *(*osd_obj_type_f)(const struct lu_env *env,
1504                                    struct osd_object *obj,
1505                                    struct lu_attr *la,
1506                                    struct osd_thandle *oh);
1507
1508 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1509 {
1510         osd_obj_type_f result;
1511
1512         switch (type) {
1513         case DFT_DIR:
1514                 result = osd_mkdir;
1515                 break;
1516         case DFT_INDEX:
1517                 result = osd_mkidx;
1518                 break;
1519         case DFT_REGULAR:
1520                 result = osd_mkreg;
1521                 break;
1522         case DFT_SYM:
1523                 result = osd_mksym;
1524                 break;
1525         case DFT_NODE:
1526                 result = osd_mknod;
1527                 break;
1528         default:
1529                 LBUG();
1530                 break;
1531         }
1532         return result;
1533 }
1534
1535 /*
1536  * Concurrency: @dt is write locked.
1537  */
1538 static int osd_create(const struct lu_env *env, struct dt_object *dt,
1539                       struct lu_attr *attr, struct dt_allocation_hint *hint,
1540                       struct dt_object_format *dof, struct thandle *th)
1541 {
1542         struct osd_thread_info  *info = osd_oti_get(env);
1543         struct lustre_mdt_attrs *lma = &info->oti_mdt_attrs;
1544         struct zpl_direntry     *zde = &info->oti_zde.lzd_reg;
1545         const struct lu_fid     *fid = lu_object_fid(&dt->do_lu);
1546         struct osd_object       *obj = osd_dt_obj(dt);
1547         struct osd_device       *osd = osd_obj2dev(obj);
1548         char                    *buf = info->oti_str;
1549         struct osd_thandle      *oh;
1550         dnode_t *dn = NULL, *zdn = NULL;
1551         uint64_t                 zapid, parent = 0;
1552         int                      rc;
1553
1554         ENTRY;
1555
1556         LASSERT(!fid_is_acct(fid));
1557
1558         /* concurrent create declarations should not see
1559          * the object inconsistent (db, attr, etc).
1560          * in regular cases acquisition should be cheap */
1561         down_write(&obj->oo_guard);
1562
1563         if (unlikely(dt_object_exists(dt)))
1564                 GOTO(out, rc = -EEXIST);
1565
1566         LASSERT(osd_invariant(obj));
1567         LASSERT(dof != NULL);
1568
1569         LASSERT(th != NULL);
1570         oh = container_of0(th, struct osd_thandle, ot_super);
1571
1572         LASSERT(obj->oo_dn == NULL);
1573
1574         /* to follow ZFS on-disk format we need
1575          * to initialize parent dnode properly */
1576         if (hint != NULL && hint->dah_parent != NULL &&
1577             !dt_object_remote(hint->dah_parent))
1578                 parent = osd_dt_obj(hint->dah_parent)->oo_dn->dn_object;
1579
1580         /* we may fix some attributes, better do not change the source */
1581         obj->oo_attr = *attr;
1582         obj->oo_attr.la_valid |= LA_SIZE | LA_NLINK | LA_TYPE;
1583
1584         dn = osd_create_type_f(dof->dof_type)(env, obj, &obj->oo_attr, oh);
1585         if (IS_ERR(dn)) {
1586                 rc = PTR_ERR(dn);
1587                 dn = NULL;
1588                 GOTO(out, rc);
1589         }
1590
1591         zde->zde_pad = 0;
1592         zde->zde_dnode = dn->dn_object;
1593         zde->zde_type = IFTODT(attr->la_mode & S_IFMT);
1594
1595         zapid = osd_get_name_n_idx(env, osd, fid, buf,
1596                                    sizeof(info->oti_str), &zdn);
1597         rc = osd_zap_add(osd, zapid, zdn, buf, 8, 1, zde, oh->ot_tx);
1598         if (rc)
1599                 GOTO(out, rc);
1600         obj->oo_dn = dn;
1601         /* Now add in all of the "SA" attributes */
1602         rc = osd_sa_handle_get(obj);
1603         if (rc)
1604                 GOTO(out, rc);
1605
1606         rc = -nvlist_alloc(&obj->oo_sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
1607         if (rc)
1608                 GOTO(out, rc);
1609
1610         /* initialize LMA */
1611         lustre_lma_init(lma, fid, 0, 0);
1612         lustre_lma_swab(lma);
1613         rc = -nvlist_add_byte_array(obj->oo_sa_xattr, XATTR_NAME_LMA,
1614                                     (uchar_t *)lma, sizeof(*lma));
1615         if (rc)
1616                 GOTO(out, rc);
1617
1618         /* configure new osd object */
1619         obj->oo_parent = parent != 0 ? parent : zapid;
1620         obj->oo_late_attr_set = 1;
1621         rc = __osd_sa_xattr_schedule_update(env, obj, oh);
1622         if (rc)
1623                 GOTO(out, rc);
1624
1625         /* XXX: oo_lma_flags */
1626         obj->oo_dt.do_lu.lo_header->loh_attr |= obj->oo_attr.la_mode & S_IFMT;
1627         if (likely(!fid_is_acct(lu_object_fid(&obj->oo_dt.do_lu))))
1628                 /* no body operations for accounting objects */
1629                 obj->oo_dt.do_body_ops = &osd_body_ops;
1630
1631         osd_idc_find_and_init(env, osd, obj);
1632
1633 out:
1634         if (unlikely(rc && dn)) {
1635                 dmu_object_free(osd->od_os, dn->dn_object, oh->ot_tx);
1636                 osd_dnode_rele(dn);
1637                 obj->oo_dn = NULL;
1638         } else if (!rc) {
1639                 obj->oo_dt.do_lu.lo_header->loh_attr |= LOHA_EXISTS;
1640         }
1641         up_write(&obj->oo_guard);
1642         RETURN(rc);
1643 }
1644
1645 static int osd_declare_ref_add(const struct lu_env *env, struct dt_object *dt,
1646                                struct thandle *th)
1647 {
1648         return osd_declare_attr_set(env, dt, NULL, th);
1649 }
1650
1651 /*
1652  * Concurrency: @dt is write locked.
1653  */
1654 static int osd_ref_add(const struct lu_env *env, struct dt_object *dt,
1655                        struct thandle *handle)
1656 {
1657         struct osd_object       *obj = osd_dt_obj(dt);
1658         struct osd_thandle      *oh;
1659         struct osd_device       *osd = osd_obj2dev(obj);
1660         uint64_t                 nlink;
1661         int rc;
1662
1663         ENTRY;
1664
1665         down_read(&obj->oo_guard);
1666         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1667                 GOTO(out, rc = -ENOENT);
1668
1669         LASSERT(osd_invariant(obj));
1670         LASSERT(obj->oo_sa_hdl != NULL);
1671
1672         oh = container_of0(handle, struct osd_thandle, ot_super);
1673
1674         write_lock(&obj->oo_attr_lock);
1675         nlink = ++obj->oo_attr.la_nlink;
1676         write_unlock(&obj->oo_attr_lock);
1677
1678         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1679
1680 out:
1681         up_read(&obj->oo_guard);
1682         RETURN(rc);
1683 }
1684
1685 static int osd_declare_ref_del(const struct lu_env *env, struct dt_object *dt,
1686                                struct thandle *handle)
1687 {
1688         return osd_declare_attr_set(env, dt, NULL, handle);
1689 }
1690
1691 /*
1692  * Concurrency: @dt is write locked.
1693  */
1694 static int osd_ref_del(const struct lu_env *env, struct dt_object *dt,
1695                        struct thandle *handle)
1696 {
1697         struct osd_object       *obj = osd_dt_obj(dt);
1698         struct osd_thandle      *oh;
1699         struct osd_device       *osd = osd_obj2dev(obj);
1700         uint64_t                 nlink;
1701         int                      rc;
1702
1703         ENTRY;
1704
1705         down_read(&obj->oo_guard);
1706
1707         if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
1708                 GOTO(out, rc = -ENOENT);
1709
1710         LASSERT(osd_invariant(obj));
1711         LASSERT(obj->oo_sa_hdl != NULL);
1712
1713         oh = container_of0(handle, struct osd_thandle, ot_super);
1714         LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
1715
1716         write_lock(&obj->oo_attr_lock);
1717         nlink = --obj->oo_attr.la_nlink;
1718         write_unlock(&obj->oo_attr_lock);
1719
1720         rc = osd_object_sa_update(obj, SA_ZPL_LINKS(osd), &nlink, 8, oh);
1721
1722 out:
1723         up_read(&obj->oo_guard);
1724         RETURN(rc);
1725 }
1726
1727 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt,
1728                            __u64 start, __u64 end)
1729 {
1730         struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
1731         ENTRY;
1732
1733         /* XXX: no other option than syncing the whole filesystem until we
1734          * support ZIL.  If the object tracked the txg that it was last
1735          * modified in, it could pass that txg here instead of "0".  Maybe
1736          * the changes are already committed, so no wait is needed at all? */
1737         if (!osd->od_dt_dev.dd_rdonly)
1738                 txg_wait_synced(dmu_objset_pool(osd->od_os), 0ULL);
1739
1740         RETURN(0);
1741 }
1742
1743 static int osd_invalidate(const struct lu_env *env, struct dt_object *dt)
1744 {
1745         return 0;
1746 }
1747
1748 static struct dt_object_operations osd_obj_ops = {
1749         .do_read_lock           = osd_read_lock,
1750         .do_write_lock          = osd_write_lock,
1751         .do_read_unlock         = osd_read_unlock,
1752         .do_write_unlock        = osd_write_unlock,
1753         .do_write_locked        = osd_write_locked,
1754         .do_attr_get            = osd_attr_get,
1755         .do_declare_attr_set    = osd_declare_attr_set,
1756         .do_attr_set            = osd_attr_set,
1757         .do_ah_init             = osd_ah_init,
1758         .do_declare_create      = osd_declare_create,
1759         .do_create              = osd_create,
1760         .do_declare_destroy     = osd_declare_destroy,
1761         .do_destroy             = osd_destroy,
1762         .do_index_try           = osd_index_try,
1763         .do_declare_ref_add     = osd_declare_ref_add,
1764         .do_ref_add             = osd_ref_add,
1765         .do_declare_ref_del     = osd_declare_ref_del,
1766         .do_ref_del             = osd_ref_del,
1767         .do_xattr_get           = osd_xattr_get,
1768         .do_declare_xattr_set   = osd_declare_xattr_set,
1769         .do_xattr_set           = osd_xattr_set,
1770         .do_declare_xattr_del   = osd_declare_xattr_del,
1771         .do_xattr_del           = osd_xattr_del,
1772         .do_xattr_list          = osd_xattr_list,
1773         .do_object_sync         = osd_object_sync,
1774         .do_invalidate          = osd_invalidate,
1775 };
1776
1777 static struct lu_object_operations osd_lu_obj_ops = {
1778         .loo_object_init        = osd_object_init,
1779         .loo_object_delete      = osd_object_delete,
1780         .loo_object_release     = osd_object_release,
1781         .loo_object_free        = osd_object_free,
1782         .loo_object_print       = osd_object_print,
1783         .loo_object_invariant   = osd_object_invariant,
1784 };
1785
1786 static int osd_otable_it_attr_get(const struct lu_env *env,
1787                                 struct dt_object *dt,
1788                                 struct lu_attr *attr)
1789 {
1790         attr->la_valid = 0;
1791         return 0;
1792 }
1793
1794 static struct dt_object_operations osd_obj_otable_it_ops = {
1795         .do_attr_get            = osd_otable_it_attr_get,
1796         .do_index_try           = osd_index_try,
1797 };