1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * Copyright (c) 2011 Whamcloud, Inc.
39 * This file is part of Lustre, http://www.lustre.org/
40 * Lustre is a trademark of Sun Microsystems, Inc.
42 * lustre/osd/osd_handler.c
44 * Top-level entry points into osd module
46 * Author: Nikita Danilov <nikita@clusterfs.com>
47 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
51 # define EXPORT_SYMTAB
53 #define DEBUG_SUBSYSTEM S_MDS
55 #include <linux/module.h>
57 /* LUSTRE_VERSION_CODE */
58 #include <lustre_ver.h>
59 /* prerequisite for linux/xattr.h */
60 #include <linux/types.h>
61 /* prerequisite for linux/xattr.h */
63 /* XATTR_{REPLACE,CREATE} */
64 #include <linux/xattr.h>
69 * struct OBD_{ALLOC,FREE}*()
72 #include <obd_support.h>
73 /* struct ptlrpc_thread */
74 #include <lustre_net.h>
77 #include <lustre_fid.h>
79 #include "osd_internal.h"
82 /* llo_* api support */
83 #include <md_object.h>
85 #ifdef HAVE_LDISKFS_PDO
87 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
88 "ldiskfs with parallel directory operations");
93 static const char dot[] = ".";
94 static const char dotdot[] = "..";
95 static const char remote_obj_dir[] = "REM_OBJ_DIR";
97 struct osd_directory {
98 struct iam_container od_container;
99 struct iam_descr od_descr;
103 struct dt_object oo_dt;
105 * Inode for file system object represented by this osd_object. This
106 * inode is pinned for the whole duration of lu_object life.
108 * Not modified concurrently (either setup early during object
109 * creation, or assigned by osd_object_create() under write lock).
111 struct inode *oo_inode;
113 * to protect index ops.
115 struct htree_lock_head *oo_hl_head;
116 cfs_rw_semaphore_t oo_ext_idx_sem;
117 cfs_rw_semaphore_t oo_sem;
118 struct osd_directory *oo_dir;
119 /** protects inode attributes. */
120 cfs_spinlock_t oo_guard;
122 * Following two members are used to indicate the presence of dot and
123 * dotdot in the given directory. This is required for interop mode
126 int oo_compat_dot_created;
127 int oo_compat_dotdot_created;
129 const struct lu_env *oo_owner;
130 #ifdef CONFIG_LOCKDEP
131 struct lockdep_map oo_dep_map;
135 static const struct lu_object_operations osd_lu_obj_ops;
136 static const struct lu_device_operations osd_lu_ops;
137 static struct lu_context_key osd_key;
138 static const struct dt_object_operations osd_obj_ops;
139 static const struct dt_object_operations osd_obj_ea_ops;
140 static const struct dt_body_operations osd_body_ops;
141 static const struct dt_body_operations osd_body_ops_new;
142 static const struct dt_index_operations osd_index_iam_ops;
143 static const struct dt_index_operations osd_index_ea_ops;
145 #define OSD_TRACK_DECLARES
146 #ifdef OSD_TRACK_DECLARES
147 #define OSD_DECLARE_OP(oh, op) { \
148 LASSERT(oh->ot_handle == NULL); \
149 ((oh)->ot_declare_ ##op)++; }
150 #define OSD_EXEC_OP(handle, op) { \
151 struct osd_thandle *oh; \
152 oh = container_of0(handle, struct osd_thandle, ot_super);\
153 if (((oh)->ot_declare_ ##op) > 0) { \
154 ((oh)->ot_declare_ ##op)--; \
158 #define OSD_DECLARE_OP(oh, op)
159 #define OSD_EXEC_OP(oh, op)
162 /* There are at most 10 uid/gids are affected in a transaction, and
163 * that's rename case:
164 * - 2 for source parent uid & gid;
165 * - 2 for source child uid & gid ('..' entry update when the child
167 * - 2 for target parent uid & gid;
168 * - 2 for target child uid & gid (if the target child exists);
169 * - 2 for root uid & gid (last_rcvd, llog, etc);
171 * The 0 to (OSD_MAX_UGID_CNT - 1) bits of ot_id_type is for indicating
172 * the id type of each id in the ot_id_array.
174 #define OSD_MAX_UGID_CNT 10
177 struct thandle ot_super;
179 struct journal_callback ot_jcb;
180 cfs_list_t ot_dcb_list;
181 /* Link to the device, for debugging. */
182 struct lu_ref_link *ot_dev_link;
183 unsigned short ot_credits;
184 unsigned short ot_id_cnt;
185 unsigned short ot_id_type;
186 uid_t ot_id_array[OSD_MAX_UGID_CNT];
188 #ifdef OSD_TRACK_DECLARES
189 unsigned char ot_declare_attr_set;
190 unsigned char ot_declare_punch;
191 unsigned char ot_declare_xattr_set;
192 unsigned char ot_declare_create;
193 unsigned char ot_declare_destroy;
194 unsigned char ot_declare_ref_add;
195 unsigned char ot_declare_ref_del;
196 unsigned char ot_declare_write;
197 unsigned char ot_declare_insert;
198 unsigned char ot_declare_delete;
201 #if OSD_THANDLE_STATS
202 /** time when this handle was allocated */
203 cfs_time_t oth_alloced;
205 /** time when this thanle was started */
206 cfs_time_t oth_started;
211 * Basic transaction credit op
221 DTO_LOG_REC, /**< XXX temporary: dt layer knows nothing about llog. */
232 static int lu_device_is_osd(const struct lu_device *d)
234 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &osd_lu_ops);
237 static struct osd_device *osd_dt_dev(const struct dt_device *d)
239 LASSERT(lu_device_is_osd(&d->dd_lu_dev));
240 return container_of0(d, struct osd_device, od_dt_dev);
243 static struct osd_device *osd_dev(const struct lu_device *d)
245 LASSERT(lu_device_is_osd(d));
246 return osd_dt_dev(container_of0(d, struct dt_device, dd_lu_dev));
249 static struct osd_device *osd_obj2dev(const struct osd_object *o)
251 return osd_dev(o->oo_dt.do_lu.lo_dev);
254 static struct super_block *osd_sb(const struct osd_device *dev)
256 return dev->od_mount->lmi_mnt->mnt_sb;
259 static int osd_object_is_root(const struct osd_object *obj)
261 return osd_sb(osd_obj2dev(obj))->s_root->d_inode == obj->oo_inode;
264 static struct osd_object *osd_obj(const struct lu_object *o)
266 LASSERT(lu_device_is_osd(o->lo_dev));
267 return container_of0(o, struct osd_object, oo_dt.do_lu);
270 static struct osd_object *osd_dt_obj(const struct dt_object *d)
272 return osd_obj(&d->do_lu);
275 static struct lu_device *osd2lu_dev(struct osd_device *osd)
277 return &osd->od_dt_dev.dd_lu_dev;
280 static journal_t *osd_journal(const struct osd_device *dev)
282 return LDISKFS_SB(osd_sb(dev))->s_journal;
285 static int osd_has_index(const struct osd_object *obj)
287 return obj->oo_dt.do_index_ops != NULL;
290 static int osd_object_invariant(const struct lu_object *l)
292 return osd_invariant(osd_obj(l));
295 #ifdef HAVE_QUOTA_SUPPORT
297 osd_push_ctxt(const struct lu_env *env, struct osd_ctxt *save)
299 struct md_ucred *uc = md_ucred(env);
304 save->oc_uid = current_fsuid();
305 save->oc_gid = current_fsgid();
306 save->oc_cap = current_cap();
307 if ((tc = prepare_creds())) {
308 tc->fsuid = uc->mu_fsuid;
309 tc->fsgid = uc->mu_fsgid;
312 /* XXX not suboptimal */
313 cfs_curproc_cap_unpack(uc->mu_cap);
317 osd_pop_ctxt(struct osd_ctxt *save)
321 if ((tc = prepare_creds())) {
322 tc->fsuid = save->oc_uid;
323 tc->fsgid = save->oc_gid;
324 tc->cap_effective = save->oc_cap;
330 static inline struct osd_thread_info *osd_oti_get(const struct lu_env *env)
332 return lu_context_key_get(&env->le_ctx, &osd_key);
336 * Concurrency: doesn't matter
338 static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
340 return osd_oti_get(env)->oti_r_locks > 0;
344 * Concurrency: doesn't matter
346 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
348 struct osd_thread_info *oti = osd_oti_get(env);
349 return oti->oti_w_locks > 0 && o->oo_owner == env;
353 * Concurrency: doesn't access mutable data
355 static int osd_root_get(const struct lu_env *env,
356 struct dt_device *dev, struct lu_fid *f)
360 inode = osd_sb(osd_dt_dev(dev))->s_root->d_inode;
361 LU_IGIF_BUILD(f, inode->i_ino, inode->i_generation);
365 static inline int osd_qid_type(struct osd_thandle *oh, int i)
367 return (oh->ot_id_type & (1 << i)) ? GRPQUOTA : USRQUOTA;
370 static inline void osd_qid_set_type(struct osd_thandle *oh, int i, int type)
372 oh->ot_id_type |= ((type == GRPQUOTA) ? (1 << i) : 0);
375 static void osd_declare_qid(struct dt_object *dt, struct osd_thandle *oh,
376 int type, uid_t id, struct inode *inode)
379 int i, allocated = 0;
380 struct osd_object *obj;
384 LASSERTF(oh->ot_id_cnt <= OSD_MAX_UGID_CNT, "count=%u",
387 /* id entry is allocated in the quota file */
388 if (inode && inode->i_dquot[type] && inode->i_dquot[type]->dq_off)
391 for (i = 0; i < oh->ot_id_cnt; i++) {
392 if (oh->ot_id_array[i] == id && osd_qid_type(oh, i) == type)
396 if (unlikely(i >= OSD_MAX_UGID_CNT)) {
397 CERROR("more than %d uid/gids for a transaction?\n", i);
401 oh->ot_id_array[i] = id;
402 osd_qid_set_type(oh, i, type);
404 obj = osd_dt_obj(dt);
405 oh->ot_credits += (allocated || id == 0) ?
406 1 : LDISKFS_QUOTA_INIT_BLOCKS(osd_sb(osd_obj2dev(obj)));
411 * OSD object methods.
415 * Concurrency: no concurrent access is possible that early in object
418 static struct lu_object *osd_object_alloc(const struct lu_env *env,
419 const struct lu_object_header *hdr,
422 struct osd_object *mo;
428 l = &mo->oo_dt.do_lu;
429 dt_object_init(&mo->oo_dt, NULL, d);
430 if (osd_dev(d)->od_iop_mode)
431 mo->oo_dt.do_ops = &osd_obj_ea_ops;
433 mo->oo_dt.do_ops = &osd_obj_ops;
435 l->lo_ops = &osd_lu_obj_ops;
436 cfs_init_rwsem(&mo->oo_sem);
437 cfs_init_rwsem(&mo->oo_ext_idx_sem);
438 cfs_spin_lock_init(&mo->oo_guard);
446 * retrieve object from backend ext fs.
448 static struct inode *osd_iget(struct osd_thread_info *info,
449 struct osd_device *dev,
450 const struct osd_inode_id *id)
452 struct inode *inode = NULL;
454 #ifdef HAVE_EXT4_LDISKFS
455 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
457 /* Newer kernels return an error instead of a NULL pointer */
460 inode = iget(osd_sb(dev), id->oii_ino);
463 CERROR("no inode\n");
464 inode = ERR_PTR(-EACCES);
465 } else if (id->oii_gen != OSD_OII_NOGEN &&
466 inode->i_generation != id->oii_gen) {
468 inode = ERR_PTR(-ESTALE);
469 } else if (inode->i_nlink == 0) {
470 /* due to parallel readdir and unlink,
471 * we can have dead inode here. */
472 CWARN("stale inode\n");
473 make_bad_inode(inode);
475 inode = ERR_PTR(-ESTALE);
476 } else if (is_bad_inode(inode)) {
477 CERROR("bad inode %lx\n",inode->i_ino);
479 inode = ERR_PTR(-ENOENT);
481 /* Do not update file c/mtime in ldiskfs.
482 * NB: we don't have any lock to protect this because we don't
483 * have reference on osd_object now, but contention with
484 * another lookup + attr_set can't happen in the tiny window
485 * between if (...) and set S_NOCMTIME. */
486 if (!(inode->i_flags & S_NOCMTIME))
487 inode->i_flags |= S_NOCMTIME;
492 static int osd_fid_lookup(const struct lu_env *env,
493 struct osd_object *obj, const struct lu_fid *fid)
495 struct osd_thread_info *info;
496 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
497 struct osd_device *dev;
498 struct osd_inode_id *id;
502 LINVRNT(osd_invariant(obj));
503 LASSERT(obj->oo_inode == NULL);
504 LASSERTF(fid_is_sane(fid) || osd_fid_is_root(fid), DFID, PFID(fid));
506 * This assertion checks that osd layer sees only local
507 * fids. Unfortunately it is somewhat expensive (does a
508 * cache-lookup). Disabling it for production/acceptance-testing.
510 LASSERT(1 || fid_is_local(env, ldev->ld_site, fid));
514 info = osd_oti_get(env);
518 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
521 result = osd_oi_lookup(info, osd_fid2oi(dev, fid), fid, id);
523 if (result == -ENOENT)
528 inode = osd_iget(info, dev, id);
531 * If fid wasn't found in oi, inode-less object is
532 * created, for which lu_object_exists() returns
533 * false. This is used in a (frequent) case when
534 * objects are created as locking anchors or
535 * place holders for objects yet to be created.
537 result = PTR_ERR(inode);
541 obj->oo_inode = inode;
542 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
543 if (dev->od_iop_mode) {
544 obj->oo_compat_dot_created = 1;
545 obj->oo_compat_dotdot_created = 1;
548 if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
551 LASSERT(obj->oo_hl_head == NULL);
552 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
553 if (obj->oo_hl_head == NULL) {
554 obj->oo_inode = NULL;
559 LINVRNT(osd_invariant(obj));
565 * Concurrency: shouldn't matter.
567 static void osd_object_init0(struct osd_object *obj)
569 LASSERT(obj->oo_inode != NULL);
570 obj->oo_dt.do_body_ops = &osd_body_ops;
571 obj->oo_dt.do_lu.lo_header->loh_attr |=
572 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
576 * Concurrency: no concurrent access is possible that early in object
579 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
580 const struct lu_object_conf *unused)
582 struct osd_object *obj = osd_obj(l);
585 LINVRNT(osd_invariant(obj));
587 result = osd_fid_lookup(env, obj, lu_object_fid(l));
588 obj->oo_dt.do_body_ops = &osd_body_ops_new;
590 if (obj->oo_inode != NULL)
591 osd_object_init0(obj);
593 LINVRNT(osd_invariant(obj));
598 * Concurrency: no concurrent access is possible that late in object
601 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
603 struct osd_object *obj = osd_obj(l);
605 LINVRNT(osd_invariant(obj));
607 dt_object_fini(&obj->oo_dt);
608 if (obj->oo_hl_head != NULL)
609 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
616 static struct iam_path_descr *osd_it_ipd_get(const struct lu_env *env,
617 const struct iam_container *bag)
619 return bag->ic_descr->id_ops->id_ipd_alloc(bag,
620 osd_oti_get(env)->oti_it_ipd);
623 static struct iam_path_descr *osd_idx_ipd_get(const struct lu_env *env,
624 const struct iam_container *bag)
626 return bag->ic_descr->id_ops->id_ipd_alloc(bag,
627 osd_oti_get(env)->oti_idx_ipd);
630 static void osd_ipd_put(const struct lu_env *env,
631 const struct iam_container *bag,
632 struct iam_path_descr *ipd)
634 bag->ic_descr->id_ops->id_ipd_free(ipd);
638 * Concurrency: no concurrent access is possible that late in object
641 static void osd_index_fini(struct osd_object *o)
643 struct iam_container *bag;
645 if (o->oo_dir != NULL) {
646 bag = &o->oo_dir->od_container;
647 if (o->oo_inode != NULL) {
648 if (bag->ic_object == o->oo_inode)
649 iam_container_fini(bag);
651 OBD_FREE_PTR(o->oo_dir);
657 * Concurrency: no concurrent access is possible that late in object
658 * life-cycle (for all existing callers, that is. New callers have to provide
659 * their own locking.)
661 static int osd_inode_unlinked(const struct inode *inode)
663 return inode->i_nlink == 0;
667 OSD_TXN_OI_DELETE_CREDITS = 20,
668 OSD_TXN_INODE_DELETE_CREDITS = 20
675 #if OSD_THANDLE_STATS
677 * Set time when the handle is allocated
679 static void osd_th_alloced(struct osd_thandle *oth)
681 oth->oth_alloced = cfs_time_current();
685 * Set time when the handle started
687 static void osd_th_started(struct osd_thandle *oth)
689 oth->oth_started = cfs_time_current();
693 * Helper function to convert time interval to microseconds packed in
694 * long int (default time units for the counter in "stats" initialized
695 * by lu_time_init() )
697 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
701 cfs_duration_usec(cfs_time_sub(end, start), &val);
702 return val.tv_sec * 1000000 + val.tv_usec;
706 * Check whether the we deal with this handle for too long.
708 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
709 cfs_time_t alloced, cfs_time_t started,
712 cfs_time_t now = cfs_time_current();
714 LASSERT(dev != NULL);
716 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
717 interval_to_usec(alloced, started));
718 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
719 interval_to_usec(started, closed));
720 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
721 interval_to_usec(closed, now));
723 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
724 CWARN("transaction handle %p was open for too long: "
726 "alloced "CFS_TIME_T" ,"
727 "started "CFS_TIME_T" ,"
728 "closed "CFS_TIME_T"\n",
729 oth, now, alloced, started, closed);
730 libcfs_debug_dumpstack(NULL);
734 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
736 cfs_time_t __closed = cfs_time_current(); \
737 cfs_time_t __alloced = oth->oth_alloced; \
738 cfs_time_t __started = oth->oth_started; \
741 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
744 #else /* OSD_THANDLE_STATS */
746 #define osd_th_alloced(h) do {} while(0)
747 #define osd_th_started(h) do {} while(0)
748 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
750 #endif /* OSD_THANDLE_STATS */
753 * Concurrency: doesn't access mutable data.
755 static int osd_param_is_sane(const struct osd_device *dev,
756 const struct thandle *th)
758 struct osd_thandle *oh;
759 oh = container_of0(th, struct osd_thandle, ot_super);
760 return oh->ot_credits <= osd_journal(dev)->j_max_transaction_buffers;
764 * Concurrency: shouldn't matter.
766 #ifdef HAVE_LDISKFS_JOURNAL_CALLBACK_ADD
767 static void osd_trans_commit_cb(struct super_block *sb,
768 struct journal_callback *jcb, int error)
770 static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
773 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
774 struct thandle *th = &oh->ot_super;
775 struct lu_device *lud = &th->th_dev->dd_lu_dev;
776 struct dt_txn_commit_cb *dcb, *tmp;
778 LASSERT(oh->ot_handle == NULL);
781 CERROR("transaction @0x%p commit error: %d\n", th, error);
783 dt_txn_hook_commit(th);
785 /* call per-transaction callbacks if any */
786 cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage)
787 dcb->dcb_func(NULL, th, dcb, error);
789 lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
793 lu_context_exit(&th->th_ctx);
794 lu_context_fini(&th->th_ctx);
798 static struct thandle *osd_trans_create(const struct lu_env *env,
801 struct osd_thread_info *oti = osd_oti_get(env);
802 struct osd_thandle *oh;
806 th = ERR_PTR(-ENOMEM);
807 OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
812 th->th_tags = LCT_TX_HANDLE;
814 oti->oti_dev = osd_dt_dev(d);
815 CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
822 * Concurrency: shouldn't matter.
824 int osd_trans_start(const struct lu_env *env, struct dt_device *d,
827 struct osd_thread_info *oti = osd_oti_get(env);
828 struct osd_device *dev = osd_dt_dev(d);
830 struct osd_thandle *oh;
835 LASSERT(current->journal_info == NULL);
837 oh = container_of0(th, struct osd_thandle, ot_super);
839 LASSERT(oh->ot_handle == NULL);
841 rc = dt_txn_hook_start(env, d, th);
845 if (!osd_param_is_sane(dev, th)) {
846 CWARN("%s: too many transaction credits (%d > %d)\n",
847 d->dd_lu_dev.ld_obd->obd_name, oh->ot_credits,
848 osd_journal(dev)->j_max_transaction_buffers);
849 /* XXX Limit the credits to 'max_transaction_buffers', and
850 * let the underlying filesystem to catch the error if
851 * we really need so many credits.
853 * This should be removed when we can calculate the
854 * credits precisely. */
855 oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
856 #ifdef OSD_TRACK_DECLARES
857 CERROR(" attr_set: %d, punch: %d, xattr_set: %d,\n",
858 oh->ot_declare_attr_set, oh->ot_declare_punch,
859 oh->ot_declare_xattr_set);
860 CERROR(" create: %d, ref_add: %d, ref_del: %d, write: %d\n",
861 oh->ot_declare_create, oh->ot_declare_ref_add,
862 oh->ot_declare_ref_del, oh->ot_declare_write);
863 CERROR(" insert: %d, delete: %d, destroy: %d\n",
864 oh->ot_declare_insert, oh->ot_declare_delete,
865 oh->ot_declare_destroy);
870 * XXX temporary stuff. Some abstraction layer should
873 jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
877 LASSERT(oti->oti_txns == 0);
878 lu_context_init(&th->th_ctx, th->th_tags);
879 lu_context_enter(&th->th_ctx);
881 lu_device_get(&d->dd_lu_dev);
882 oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
886 * XXX: current rule is that we first start tx,
887 * then lock object(s), but we can't use
888 * this rule for data (due to locking specifics
889 * in ldiskfs). also in long-term we'd like to
890 * use usually-used (locks;tx) ordering. so,
891 * UGLY thing is that we'll use one ordering for
892 * data (ofd) and reverse ordering for metadata
893 * (mdd). then at some point we'll fix the latter
895 if (lu_device_is_md(&d->dd_lu_dev)) {
896 LASSERT(oti->oti_r_locks == 0);
897 LASSERT(oti->oti_w_locks == 0);
910 * Concurrency: shouldn't matter.
912 static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
915 struct osd_thandle *oh;
916 struct osd_thread_info *oti = osd_oti_get(env);
920 oh = container_of0(th, struct osd_thandle, ot_super);
922 if (oh->ot_handle != NULL) {
923 handle_t *hdl = oh->ot_handle;
925 hdl->h_sync = th->th_sync;
928 * add commit callback
929 * notice we don't do this in osd_trans_start()
930 * as underlying transaction can change during truncate
932 osd_journal_callback_set(hdl, osd_trans_commit_cb,
935 LASSERT(oti->oti_txns == 1);
938 * XXX: current rule is that we first start tx,
939 * then lock object(s), but we can't use
940 * this rule for data (due to locking specifics
941 * in ldiskfs). also in long-term we'd like to
942 * use usually-used (locks;tx) ordering. so,
943 * UGLY thing is that we'll use one ordering for
944 * data (ofd) and reverse ordering for metadata
945 * (mdd). then at some point we'll fix the latter
947 if (lu_device_is_md(&th->th_dev->dd_lu_dev)) {
948 LASSERT(oti->oti_r_locks == 0);
949 LASSERT(oti->oti_w_locks == 0);
951 rc = dt_txn_hook_stop(env, th);
953 CERROR("Failure in transaction hook: %d\n", rc);
954 oh->ot_handle = NULL;
955 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
956 rc = ldiskfs_journal_stop(hdl));
958 CERROR("Failure to stop transaction: %d\n", rc);
966 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
968 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
971 cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
977 * Called just before object is freed. Releases all resources except for
978 * object itself (that is released by osd_object_free()).
980 * Concurrency: no concurrent access is possible that late in object
983 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
985 struct osd_object *obj = osd_obj(l);
986 struct inode *inode = obj->oo_inode;
988 LINVRNT(osd_invariant(obj));
991 * If object is unlinked remove fid->ino mapping from object index.
997 obj->oo_inode = NULL;
1002 * Concurrency: ->loo_object_release() is called under site spin-lock.
1004 static void osd_object_release(const struct lu_env *env,
1005 struct lu_object *l)
1010 * Concurrency: shouldn't matter.
1012 static int osd_object_print(const struct lu_env *env, void *cookie,
1013 lu_printer_t p, const struct lu_object *l)
1015 struct osd_object *o = osd_obj(l);
1016 struct iam_descr *d;
1018 if (o->oo_dir != NULL)
1019 d = o->oo_dir->od_container.ic_descr;
1022 return (*p)(env, cookie, LUSTRE_OSD_NAME"-object@%p(i:%p:%lu/%u)[%s]",
1024 o->oo_inode ? o->oo_inode->i_ino : 0UL,
1025 o->oo_inode ? o->oo_inode->i_generation : 0,
1026 d ? d->id_ops->id_name : "plain");
1030 * Concurrency: shouldn't matter.
1032 int osd_statfs(const struct lu_env *env, struct dt_device *d,
1035 struct osd_device *osd = osd_dt_dev(d);
1036 struct super_block *sb = osd_sb(osd);
1039 cfs_spin_lock(&osd->od_osfs_lock);
1040 /* cache 1 second */
1041 if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
1042 result = ll_do_statfs(sb, &osd->od_kstatfs);
1043 if (likely(result == 0)) /* N.B. statfs can't really fail */
1044 osd->od_osfs_age = cfs_time_current_64();
1047 if (likely(result == 0))
1048 *sfs = osd->od_kstatfs;
1049 cfs_spin_unlock(&osd->od_osfs_lock);
1055 * Concurrency: doesn't access mutable data.
1057 static void osd_conf_get(const struct lu_env *env,
1058 const struct dt_device *dev,
1059 struct dt_device_param *param)
1061 struct super_block *sb = osd_sb(osd_dt_dev(dev));
1064 * XXX should be taken from not-yet-existing fs abstraction layer.
1066 param->ddp_max_name_len = LDISKFS_NAME_LEN;
1067 param->ddp_max_nlink = LDISKFS_LINK_MAX;
1068 param->ddp_block_shift = osd_sb(osd_dt_dev(dev))->s_blocksize_bits;
1069 param->ddp_mntopts = 0;
1070 if (test_opt(sb, XATTR_USER))
1071 param->ddp_mntopts |= MNTOPT_USERXATTR;
1072 if (test_opt(sb, POSIX_ACL))
1073 param->ddp_mntopts |= MNTOPT_ACL;
1075 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
1076 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
1077 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
1080 param->ddp_max_ea_size = sb->s_blocksize;
1085 * Helper function to get and fill the buffer with input values.
1087 static struct lu_buf *osd_buf_get(const struct lu_env *env, void *area, ssize_t len)
1091 buf = &osd_oti_get(env)->oti_buf;
1098 * Concurrency: shouldn't matter.
1100 static int osd_sync(const struct lu_env *env, struct dt_device *d)
1102 CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_NAME);
1103 return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
1107 * Start commit for OSD device.
1109 * An implementation of dt_commit_async method for OSD device.
1110 * Asychronously starts underlayng fs sync and thereby a transaction
1113 * \param env environment
1114 * \param d dt device
1116 * \see dt_device_operations
1118 static int osd_commit_async(const struct lu_env *env,
1119 struct dt_device *d)
1121 struct super_block *s = osd_sb(osd_dt_dev(d));
1124 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_NAME);
1125 RETURN(s->s_op->sync_fs(s, 0));
1129 * Concurrency: shouldn't matter.
1132 static void osd_ro(const struct lu_env *env, struct dt_device *d)
1134 struct super_block *sb = osd_sb(osd_dt_dev(d));
1137 CERROR("*** setting device %s read-only ***\n", LUSTRE_OSD_NAME);
1139 __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
1144 * Concurrency: serialization provided by callers.
1146 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
1147 int mode, unsigned long timeout, __u32 alg,
1148 struct lustre_capa_key *keys)
1150 struct osd_device *dev = osd_dt_dev(d);
1153 dev->od_fl_capa = mode;
1154 dev->od_capa_timeout = timeout;
1155 dev->od_capa_alg = alg;
1156 dev->od_capa_keys = keys;
1161 * Concurrency: serialization provided by callers.
1163 static void osd_init_quota_ctxt(const struct lu_env *env, struct dt_device *d,
1164 struct dt_quota_ctxt *ctxt, void *data)
1166 struct obd_device *obd = (void *)ctxt;
1167 struct vfsmount *mnt = (struct vfsmount *)data;
1170 obd->u.obt.obt_sb = mnt->mnt_root->d_inode->i_sb;
1171 OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
1172 obd->obd_lvfs_ctxt.pwdmnt = mnt;
1173 obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
1174 obd->obd_lvfs_ctxt.fs = get_ds();
1180 * Note: we do not count into QUOTA here.
1181 * If we mount with --data_journal we may need more.
1183 static const int osd_dto_credits_noquota[DTO_NR] = {
1186 * INDEX_EXTRA_TRANS_BLOCKS(8) +
1187 * SINGLEDATA_TRANS_BLOCKS(8)
1188 * XXX Note: maybe iam need more, since iam have more level than
1191 [DTO_INDEX_INSERT] = 16,
1192 [DTO_INDEX_DELETE] = 16,
1196 [DTO_INDEX_UPDATE] = 16,
1198 * Create a object. The same as create object in EXT3.
1199 * DATA_TRANS_BLOCKS(14) +
1200 * INDEX_EXTRA_BLOCKS(8) +
1201 * 3(inode bits, groups, GDT)
1203 [DTO_OBJECT_CREATE] = 25,
1205 * XXX: real credits to be fixed
1207 [DTO_OBJECT_DELETE] = 25,
1209 * Attr set credits (inode)
1211 [DTO_ATTR_SET_BASE] = 1,
1213 * Xattr set. The same as xattr of EXT3.
1214 * DATA_TRANS_BLOCKS(14)
1215 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1216 * are also counted in. Do not know why?
1218 [DTO_XATTR_SET] = 14,
1221 * credits for inode change during write.
1223 [DTO_WRITE_BASE] = 3,
1225 * credits for single block write.
1227 [DTO_WRITE_BLOCK] = 14,
1229 * Attr set credits for chown.
1230 * This is extra credits for setattr, and it is null without quota
1232 [DTO_ATTR_SET_CHOWN]= 0
1235 static const struct dt_device_operations osd_dt_ops = {
1236 .dt_root_get = osd_root_get,
1237 .dt_statfs = osd_statfs,
1238 .dt_trans_create = osd_trans_create,
1239 .dt_trans_start = osd_trans_start,
1240 .dt_trans_stop = osd_trans_stop,
1241 .dt_trans_cb_add = osd_trans_cb_add,
1242 .dt_conf_get = osd_conf_get,
1243 .dt_sync = osd_sync,
1245 .dt_commit_async = osd_commit_async,
1246 .dt_init_capa_ctxt = osd_init_capa_ctxt,
1247 .dt_init_quota_ctxt= osd_init_quota_ctxt,
1250 static void osd_object_read_lock(const struct lu_env *env,
1251 struct dt_object *dt, unsigned role)
1253 struct osd_object *obj = osd_dt_obj(dt);
1254 struct osd_thread_info *oti = osd_oti_get(env);
1256 LINVRNT(osd_invariant(obj));
1258 LASSERT(obj->oo_owner != env);
1259 cfs_down_read_nested(&obj->oo_sem, role);
1261 LASSERT(obj->oo_owner == NULL);
1265 static void osd_object_write_lock(const struct lu_env *env,
1266 struct dt_object *dt, unsigned role)
1268 struct osd_object *obj = osd_dt_obj(dt);
1269 struct osd_thread_info *oti = osd_oti_get(env);
1271 LINVRNT(osd_invariant(obj));
1273 LASSERT(obj->oo_owner != env);
1274 cfs_down_write_nested(&obj->oo_sem, role);
1276 LASSERT(obj->oo_owner == NULL);
1277 obj->oo_owner = env;
1281 static void osd_object_read_unlock(const struct lu_env *env,
1282 struct dt_object *dt)
1284 struct osd_object *obj = osd_dt_obj(dt);
1285 struct osd_thread_info *oti = osd_oti_get(env);
1287 LINVRNT(osd_invariant(obj));
1289 LASSERT(oti->oti_r_locks > 0);
1291 cfs_up_read(&obj->oo_sem);
1294 static void osd_object_write_unlock(const struct lu_env *env,
1295 struct dt_object *dt)
1297 struct osd_object *obj = osd_dt_obj(dt);
1298 struct osd_thread_info *oti = osd_oti_get(env);
1300 LINVRNT(osd_invariant(obj));
1302 LASSERT(obj->oo_owner == env);
1303 LASSERT(oti->oti_w_locks > 0);
1305 obj->oo_owner = NULL;
1306 cfs_up_write(&obj->oo_sem);
1309 static int osd_object_write_locked(const struct lu_env *env,
1310 struct dt_object *dt)
1312 struct osd_object *obj = osd_dt_obj(dt);
1314 LINVRNT(osd_invariant(obj));
1316 return obj->oo_owner == env;
1319 static int capa_is_sane(const struct lu_env *env,
1320 struct osd_device *dev,
1321 struct lustre_capa *capa,
1322 struct lustre_capa_key *keys)
1324 struct osd_thread_info *oti = osd_oti_get(env);
1325 struct lustre_capa *tcapa = &oti->oti_capa;
1326 struct obd_capa *oc;
1330 oc = capa_lookup(dev->od_capa_hash, capa, 0);
1332 if (capa_is_expired(oc)) {
1333 DEBUG_CAPA(D_ERROR, capa, "expired");
1340 if (capa_is_expired_sec(capa)) {
1341 DEBUG_CAPA(D_ERROR, capa, "expired");
1345 cfs_spin_lock(&capa_lock);
1346 for (i = 0; i < 2; i++) {
1347 if (keys[i].lk_keyid == capa->lc_keyid) {
1348 oti->oti_capa_key = keys[i];
1352 cfs_spin_unlock(&capa_lock);
1355 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1359 rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1363 if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1364 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1368 oc = capa_add(dev->od_capa_hash, capa);
1374 static int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1375 struct lustre_capa *capa, __u64 opc)
1377 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1378 struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1379 struct md_capainfo *ci;
1382 if (!dev->od_fl_capa)
1385 if (capa == BYPASS_CAPA)
1388 ci = md_capainfo(env);
1392 if (ci->mc_auth == LC_ID_NONE)
1396 CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
1400 if (!lu_fid_eq(fid, &capa->lc_fid)) {
1401 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1406 if (!capa_opc_supported(capa, opc)) {
1407 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1411 if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
1412 DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
1419 static struct timespec *osd_inode_time(const struct lu_env *env,
1420 struct inode *inode, __u64 seconds)
1422 struct osd_thread_info *oti = osd_oti_get(env);
1423 struct timespec *t = &oti->oti_time;
1425 t->tv_sec = seconds;
1427 *t = timespec_trunc(*t, get_sb_time_gran(inode->i_sb));
1432 static void osd_inode_getattr(const struct lu_env *env,
1433 struct inode *inode, struct lu_attr *attr)
1435 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1436 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1437 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
1439 attr->la_atime = LTIME_S(inode->i_atime);
1440 attr->la_mtime = LTIME_S(inode->i_mtime);
1441 attr->la_ctime = LTIME_S(inode->i_ctime);
1442 attr->la_mode = inode->i_mode;
1443 attr->la_size = i_size_read(inode);
1444 attr->la_blocks = inode->i_blocks;
1445 attr->la_uid = inode->i_uid;
1446 attr->la_gid = inode->i_gid;
1447 attr->la_flags = LDISKFS_I(inode)->i_flags;
1448 attr->la_nlink = inode->i_nlink;
1449 attr->la_rdev = inode->i_rdev;
1450 attr->la_blksize = ll_inode_blksize(inode);
1451 attr->la_blkbits = inode->i_blkbits;
1454 static int osd_attr_get(const struct lu_env *env,
1455 struct dt_object *dt,
1456 struct lu_attr *attr,
1457 struct lustre_capa *capa)
1459 struct osd_object *obj = osd_dt_obj(dt);
1461 LASSERT(dt_object_exists(dt));
1462 LINVRNT(osd_invariant(obj));
1464 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1467 cfs_spin_lock(&obj->oo_guard);
1468 osd_inode_getattr(env, obj->oo_inode, attr);
1469 cfs_spin_unlock(&obj->oo_guard);
1473 static int osd_declare_attr_set(const struct lu_env *env,
1474 struct dt_object *dt,
1475 const struct lu_attr *attr,
1476 struct thandle *handle)
1478 struct osd_thandle *oh;
1479 struct osd_object *obj;
1481 LASSERT(dt != NULL);
1482 LASSERT(handle != NULL);
1484 obj = osd_dt_obj(dt);
1485 LASSERT(osd_invariant(obj));
1487 oh = container_of0(handle, struct osd_thandle, ot_super);
1488 LASSERT(oh->ot_handle == NULL);
1490 OSD_DECLARE_OP(oh, attr_set);
1491 oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
1493 if (attr && attr->la_valid & LA_UID) {
1495 osd_declare_qid(dt, oh, USRQUOTA, obj->oo_inode->i_uid,
1497 osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
1499 if (attr && attr->la_valid & LA_GID) {
1501 osd_declare_qid(dt, oh, GRPQUOTA, obj->oo_inode->i_gid,
1503 osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
1509 static int osd_inode_setattr(const struct lu_env *env,
1510 struct inode *inode, const struct lu_attr *attr)
1514 bits = attr->la_valid;
1516 LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
1518 #ifdef HAVE_QUOTA_SUPPORT
1519 if ((bits & LA_UID && attr->la_uid != inode->i_uid) ||
1520 (bits & LA_GID && attr->la_gid != inode->i_gid)) {
1521 struct osd_ctxt *save = &osd_oti_get(env)->oti_ctxt;
1527 iattr.ia_valid |= ATTR_UID;
1529 iattr.ia_valid |= ATTR_GID;
1530 iattr.ia_uid = attr->la_uid;
1531 iattr.ia_gid = attr->la_gid;
1532 osd_push_ctxt(env, save);
1533 rc = ll_vfs_dq_transfer(inode, &iattr) ? -EDQUOT : 0;
1540 if (bits & LA_ATIME)
1541 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1542 if (bits & LA_CTIME)
1543 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1544 if (bits & LA_MTIME)
1545 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1546 if (bits & LA_SIZE) {
1547 LDISKFS_I(inode)->i_disksize = attr->la_size;
1548 i_size_write(inode, attr->la_size);
1552 /* OSD should not change "i_blocks" which is used by quota.
1553 * "i_blocks" should be changed by ldiskfs only. */
1554 if (bits & LA_BLOCKS)
1555 inode->i_blocks = attr->la_blocks;
1558 inode->i_mode = (inode->i_mode & S_IFMT) |
1559 (attr->la_mode & ~S_IFMT);
1561 inode->i_uid = attr->la_uid;
1563 inode->i_gid = attr->la_gid;
1564 if (bits & LA_NLINK)
1565 inode->i_nlink = attr->la_nlink;
1567 inode->i_rdev = attr->la_rdev;
1569 if (bits & LA_FLAGS) {
1570 /* always keep S_NOCMTIME */
1571 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1577 static int osd_attr_set(const struct lu_env *env,
1578 struct dt_object *dt,
1579 const struct lu_attr *attr,
1580 struct thandle *handle,
1581 struct lustre_capa *capa)
1583 struct osd_object *obj = osd_dt_obj(dt);
1586 LASSERT(handle != NULL);
1587 LASSERT(dt_object_exists(dt));
1588 LASSERT(osd_invariant(obj));
1590 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1593 OSD_EXEC_OP(handle, attr_set);
1595 cfs_spin_lock(&obj->oo_guard);
1596 rc = osd_inode_setattr(env, obj->oo_inode, attr);
1597 cfs_spin_unlock(&obj->oo_guard);
1600 obj->oo_inode->i_sb->s_op->dirty_inode(obj->oo_inode);
1607 * XXX temporary solution.
1609 static int osd_create_pre(struct osd_thread_info *info, struct osd_object *obj,
1610 struct lu_attr *attr, struct thandle *th)
1615 static int osd_create_post(struct osd_thread_info *info, struct osd_object *obj,
1616 struct lu_attr *attr, struct thandle *th)
1618 osd_object_init0(obj);
1619 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
1620 unlock_new_inode(obj->oo_inode);
1624 static struct dentry * osd_child_dentry_get(const struct lu_env *env,
1625 struct osd_object *obj,
1629 struct osd_thread_info *info = osd_oti_get(env);
1630 struct dentry *child_dentry = &info->oti_child_dentry;
1631 struct dentry *obj_dentry = &info->oti_obj_dentry;
1633 obj_dentry->d_inode = obj->oo_inode;
1634 obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
1635 obj_dentry->d_name.hash = 0;
1637 child_dentry->d_name.hash = 0;
1638 child_dentry->d_parent = obj_dentry;
1639 child_dentry->d_name.name = name;
1640 child_dentry->d_name.len = namelen;
1641 return child_dentry;
1645 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1647 struct dt_allocation_hint *hint,
1651 struct osd_device *osd = osd_obj2dev(obj);
1652 struct osd_thandle *oth;
1653 struct dt_object *parent;
1654 struct inode *inode;
1655 #ifdef HAVE_QUOTA_SUPPORT
1656 struct osd_ctxt *save = &info->oti_ctxt;
1659 LINVRNT(osd_invariant(obj));
1660 LASSERT(obj->oo_inode == NULL);
1661 LASSERT(obj->oo_hl_head == NULL);
1663 if (S_ISDIR(mode) && ldiskfs_pdo) {
1664 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1665 if (obj->oo_hl_head == NULL)
1669 oth = container_of(th, struct osd_thandle, ot_super);
1670 LASSERT(oth->ot_handle->h_transaction != NULL);
1672 if (hint && hint->dah_parent)
1673 parent = hint->dah_parent;
1675 parent = osd->od_obj_area;
1677 LASSERT(parent != NULL);
1678 LASSERT(osd_dt_obj(parent)->oo_inode->i_op != NULL);
1680 #ifdef HAVE_QUOTA_SUPPORT
1681 osd_push_ctxt(info->oti_env, save);
1683 inode = ldiskfs_create_inode(oth->ot_handle,
1684 osd_dt_obj(parent)->oo_inode, mode);
1685 #ifdef HAVE_QUOTA_SUPPORT
1688 if (!IS_ERR(inode)) {
1689 /* Do not update file c/mtime in ldiskfs.
1690 * NB: don't need any lock because no contention at this
1692 inode->i_flags |= S_NOCMTIME;
1693 obj->oo_inode = inode;
1696 if (obj->oo_hl_head != NULL) {
1697 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1698 obj->oo_hl_head = NULL;
1700 result = PTR_ERR(inode);
1702 LINVRNT(osd_invariant(obj));
1710 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1711 struct lu_attr *attr,
1712 struct dt_allocation_hint *hint,
1713 struct dt_object_format *dof,
1717 struct osd_thandle *oth;
1718 struct osd_device *osd = osd_obj2dev(obj);
1719 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1721 LASSERT(S_ISDIR(attr->la_mode));
1723 oth = container_of(th, struct osd_thandle, ot_super);
1724 LASSERT(oth->ot_handle->h_transaction != NULL);
1725 result = osd_mkfile(info, obj, mode, hint, th);
1726 if (result == 0 && osd->od_iop_mode == 0) {
1727 LASSERT(obj->oo_inode != NULL);
1729 * XXX uh-oh... call low-level iam function directly.
1732 result = iam_lvar_create(obj->oo_inode, OSD_NAME_LEN, 4,
1733 sizeof (struct osd_fid_pack),
1739 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
1740 struct lu_attr *attr,
1741 struct dt_allocation_hint *hint,
1742 struct dt_object_format *dof,
1746 struct osd_thandle *oth;
1747 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
1749 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1751 LASSERT(S_ISREG(attr->la_mode));
1753 oth = container_of(th, struct osd_thandle, ot_super);
1754 LASSERT(oth->ot_handle->h_transaction != NULL);
1756 result = osd_mkfile(info, obj, mode, hint, th);
1758 LASSERT(obj->oo_inode != NULL);
1759 if (feat->dif_flags & DT_IND_VARKEY)
1760 result = iam_lvar_create(obj->oo_inode,
1761 feat->dif_keysize_max,
1763 feat->dif_recsize_max,
1766 result = iam_lfix_create(obj->oo_inode,
1767 feat->dif_keysize_max,
1769 feat->dif_recsize_max,
1776 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1777 struct lu_attr *attr,
1778 struct dt_allocation_hint *hint,
1779 struct dt_object_format *dof,
1782 LASSERT(S_ISREG(attr->la_mode));
1783 return osd_mkfile(info, obj, (attr->la_mode &
1784 (S_IFMT | S_IRWXUGO | S_ISVTX)), hint, th);
1787 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1788 struct lu_attr *attr,
1789 struct dt_allocation_hint *hint,
1790 struct dt_object_format *dof,
1793 LASSERT(S_ISLNK(attr->la_mode));
1794 return osd_mkfile(info, obj, (attr->la_mode &
1795 (S_IFMT | S_IRWXUGO | S_ISVTX)), hint, th);
1798 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1799 struct lu_attr *attr,
1800 struct dt_allocation_hint *hint,
1801 struct dt_object_format *dof,
1804 cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX);
1807 LINVRNT(osd_invariant(obj));
1808 LASSERT(obj->oo_inode == NULL);
1809 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1810 S_ISFIFO(mode) || S_ISSOCK(mode));
1812 result = osd_mkfile(info, obj, mode, hint, th);
1814 LASSERT(obj->oo_inode != NULL);
1815 init_special_inode(obj->oo_inode, mode, attr->la_rdev);
1817 LINVRNT(osd_invariant(obj));
1821 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1823 struct dt_allocation_hint *hint,
1824 struct dt_object_format *dof,
1827 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1829 osd_obj_type_f result;
1845 result = osd_mk_index;
1856 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1857 struct dt_object *parent, cfs_umode_t child_mode)
1861 memset(ah, 0, sizeof(*ah));
1862 ah->dah_parent = parent;
1863 ah->dah_mode = child_mode;
1867 * Helper function for osd_object_create()
1869 * \retval 0, on success
1871 static int __osd_object_create(struct osd_thread_info *info,
1872 struct osd_object *obj, struct lu_attr *attr,
1873 struct dt_allocation_hint *hint,
1874 struct dt_object_format *dof,
1880 result = osd_create_pre(info, obj, attr, th);
1882 result = osd_create_type_f(dof->dof_type)(info, obj,
1883 attr, hint, dof, th);
1885 result = osd_create_post(info, obj, attr, th);
1891 * Helper function for osd_object_create()
1893 * \retval 0, on success
1895 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
1896 const struct lu_fid *fid, struct thandle *th)
1898 struct osd_thread_info *info = osd_oti_get(env);
1899 struct osd_inode_id *id = &info->oti_id;
1900 struct osd_device *osd = osd_obj2dev(obj);
1901 struct md_ucred *uc = md_ucred(env);
1903 LASSERT(obj->oo_inode != NULL);
1904 LASSERT(uc != NULL);
1906 id->oii_ino = obj->oo_inode->i_ino;
1907 id->oii_gen = obj->oo_inode->i_generation;
1909 return osd_oi_insert(info, osd_fid2oi(osd, fid), fid, id, th,
1910 uc->mu_cap & CFS_CAP_SYS_RESOURCE_MASK);
1913 static int osd_declare_object_create(const struct lu_env *env,
1914 struct dt_object *dt,
1915 struct lu_attr *attr,
1916 struct dt_allocation_hint *hint,
1917 struct dt_object_format *dof,
1918 struct thandle *handle)
1920 struct osd_thandle *oh;
1922 LASSERT(handle != NULL);
1924 oh = container_of0(handle, struct osd_thandle, ot_super);
1925 LASSERT(oh->ot_handle == NULL);
1927 OSD_DECLARE_OP(oh, create);
1928 oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
1929 /* XXX: So far, only normal fid needs be inserted into the oi,
1930 * things could be changed later. Revise following code then. */
1931 if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
1932 OSD_DECLARE_OP(oh, insert);
1933 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
1935 /* If this is directory, then we expect . and .. to be inserted as
1936 * well. The one directory block always needs to be created for the
1937 * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
1938 * block), there is no danger of needing a tree for the first block.
1940 if (attr && S_ISDIR(attr->la_mode)) {
1941 OSD_DECLARE_OP(oh, insert);
1942 OSD_DECLARE_OP(oh, insert);
1943 oh->ot_credits += osd_dto_credits_noquota[DTO_WRITE_BASE];
1947 osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
1948 osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
1953 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1954 struct lu_attr *attr,
1955 struct dt_allocation_hint *hint,
1956 struct dt_object_format *dof,
1959 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1960 struct osd_object *obj = osd_dt_obj(dt);
1961 struct osd_thread_info *info = osd_oti_get(env);
1966 LINVRNT(osd_invariant(obj));
1967 LASSERT(!dt_object_exists(dt));
1968 LASSERT(osd_write_locked(env, obj));
1969 LASSERT(th != NULL);
1971 OSD_EXEC_OP(th, create);
1973 result = __osd_object_create(info, obj, attr, hint, dof, th);
1975 result = __osd_oi_insert(env, obj, fid, th);
1977 LASSERT(ergo(result == 0, dt_object_exists(dt)));
1978 LASSERT(osd_invariant(obj));
1983 * Called to destroy on-disk representation of the object
1985 * Concurrency: must be locked
1987 static int osd_declare_object_destroy(const struct lu_env *env,
1988 struct dt_object *dt,
1991 struct osd_object *obj = osd_dt_obj(dt);
1992 struct inode *inode = obj->oo_inode;
1993 struct osd_thandle *oh;
1996 oh = container_of0(th, struct osd_thandle, ot_super);
1997 LASSERT(oh->ot_handle == NULL);
2000 OSD_DECLARE_OP(oh, destroy);
2001 OSD_DECLARE_OP(oh, delete);
2002 oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_DELETE];
2003 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
2005 osd_declare_qid(dt, oh, USRQUOTA, inode->i_uid, inode);
2006 osd_declare_qid(dt, oh, GRPQUOTA, inode->i_gid, inode);
2011 static int osd_object_destroy(const struct lu_env *env,
2012 struct dt_object *dt,
2015 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2016 struct osd_object *obj = osd_dt_obj(dt);
2017 struct inode *inode = obj->oo_inode;
2018 struct osd_device *osd = osd_obj2dev(obj);
2019 struct osd_thandle *oh;
2023 oh = container_of0(th, struct osd_thandle, ot_super);
2024 LASSERT(oh->ot_handle);
2026 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
2028 if (S_ISDIR(inode->i_mode)) {
2029 LASSERT(osd_inode_unlinked(inode) ||
2030 inode->i_nlink == 1);
2031 cfs_spin_lock(&obj->oo_guard);
2033 cfs_spin_unlock(&obj->oo_guard);
2034 inode->i_sb->s_op->dirty_inode(inode);
2036 LASSERT(osd_inode_unlinked(inode));
2039 OSD_EXEC_OP(th, destroy);
2041 result = osd_oi_delete(osd_oti_get(env),
2042 osd_fid2oi(osd, fid), fid, th);
2044 /* XXX: add to ext3 orphan list */
2045 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
2047 /* not needed in the cache anymore */
2048 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
2054 * Helper function for osd_xattr_set()
2056 static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2057 const struct lu_buf *buf, const char *name, int fl)
2059 struct osd_object *obj = osd_dt_obj(dt);
2060 struct inode *inode = obj->oo_inode;
2061 struct osd_thread_info *info = osd_oti_get(env);
2062 struct dentry *dentry = &info->oti_child_dentry;
2066 LASSERT(dt_object_exists(dt));
2067 LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
2068 LASSERT(osd_write_locked(env, obj));
2070 if (fl & LU_XATTR_REPLACE)
2071 fs_flags |= XATTR_REPLACE;
2073 if (fl & LU_XATTR_CREATE)
2074 fs_flags |= XATTR_CREATE;
2076 dentry->d_inode = inode;
2077 rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
2078 buf->lb_len, fs_flags);
2083 * Put the fid into lustre_mdt_attrs, and then place the structure
2084 * inode's ea. This fid should not be altered during the life time
2087 * \retval +ve, on success
2088 * \retval -ve, on error
2090 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
2092 static int osd_ea_fid_set(const struct lu_env *env, struct dt_object *dt,
2093 const struct lu_fid *fid)
2095 struct osd_thread_info *info = osd_oti_get(env);
2096 struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
2098 lustre_lma_init(mdt_attrs, fid);
2099 lustre_lma_swab(mdt_attrs);
2100 return __osd_xattr_set(env, dt,
2101 osd_buf_get(env, mdt_attrs, sizeof *mdt_attrs),
2102 XATTR_NAME_LMA, LU_XATTR_CREATE);
2107 * Helper function to form igif
2109 static inline void osd_igif_get(const struct lu_env *env, struct inode *inode,
2112 LU_IGIF_BUILD(fid, inode->i_ino, inode->i_generation);
2116 * Helper function to pack the fid, ldiskfs stores fid in packed format.
2118 void osd_fid_pack(struct osd_fid_pack *pack, const struct dt_rec *fid,
2119 struct lu_fid *befider)
2121 fid_cpu_to_be(befider, (struct lu_fid *)fid);
2122 memcpy(pack->fp_area, befider, sizeof(*befider));
2123 pack->fp_len = sizeof(*befider) + 1;
2127 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
2128 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
2129 * To have compatilibility with 1.8 ldiskfs driver we need to have
2130 * magic number at start of fid data.
2131 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
2134 void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
2135 const struct dt_rec *fid)
2137 param->edp_magic = LDISKFS_LUFID_MAGIC;
2138 param->edp_len = sizeof(struct lu_fid) + 1;
2140 fid_cpu_to_be((struct lu_fid *)param->edp_data,
2141 (struct lu_fid *)fid);
2144 int osd_fid_unpack(struct lu_fid *fid, const struct osd_fid_pack *pack)
2149 switch (pack->fp_len) {
2150 case sizeof *fid + 1:
2151 memcpy(fid, pack->fp_area, sizeof *fid);
2152 fid_be_to_cpu(fid, fid);
2155 CERROR("Unexpected packed fid size: %d\n", pack->fp_len);
2162 * Try to read the fid from inode ea into dt_rec, if return value
2163 * i.e. rc is +ve, then we got fid, otherwise we will have to form igif
2165 * \param fid object fid.
2167 * \retval 0 on success
2169 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
2170 __u32 ino, struct lu_fid *fid)
2172 struct osd_thread_info *info = osd_oti_get(env);
2173 struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
2174 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
2175 struct dentry *dentry = &info->oti_child_dentry;
2176 struct osd_inode_id *id = &info->oti_id;
2177 struct osd_device *dev;
2178 struct inode *inode;
2182 dev = osd_dev(ldev);
2185 id->oii_gen = OSD_OII_NOGEN;
2187 inode = osd_iget(info, dev, id);
2188 if (IS_ERR(inode)) {
2189 rc = PTR_ERR(inode);
2192 dentry->d_inode = inode;
2194 LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
2195 rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA, (void *)mdt_attrs,
2198 /* Check LMA compatibility */
2200 (mdt_attrs->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP))) {
2201 CWARN("Inode %lx: Unsupported incompat LMA feature(s) %#x\n",
2202 inode->i_ino, le32_to_cpu(mdt_attrs->lma_incompat) &
2203 ~LMA_INCOMPAT_SUPP);
2208 lustre_lma_swab(mdt_attrs);
2209 memcpy(fid, &mdt_attrs->lma_self_fid, sizeof(*fid));
2211 } else if (rc == -ENODATA) {
2212 osd_igif_get(env, inode, fid);
2221 * OSD layer object create function for interoperability mode (b11826).
2222 * This is mostly similar to osd_object_create(). Only difference being, fid is
2223 * inserted into inode ea here.
2225 * \retval 0, on success
2226 * \retval -ve, on error
2228 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
2229 struct lu_attr *attr,
2230 struct dt_allocation_hint *hint,
2231 struct dt_object_format *dof,
2234 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2235 struct osd_object *obj = osd_dt_obj(dt);
2236 struct osd_thread_info *info = osd_oti_get(env);
2241 LASSERT(osd_invariant(obj));
2242 LASSERT(!dt_object_exists(dt));
2243 LASSERT(osd_write_locked(env, obj));
2244 LASSERT(th != NULL);
2246 OSD_EXEC_OP(th, create);
2248 result = __osd_object_create(info, obj, attr, hint, dof, th);
2250 /* objects under osd root shld have igif fid, so dont add fid EA */
2251 if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL)
2252 result = osd_ea_fid_set(env, dt, fid);
2255 result = __osd_oi_insert(env, obj, fid, th);
2257 LASSERT(ergo(result == 0, dt_object_exists(dt)));
2258 LINVRNT(osd_invariant(obj));
2262 static int osd_declare_object_ref_add(const struct lu_env *env,
2263 struct dt_object *dt,
2264 struct thandle *handle)
2266 struct osd_thandle *oh;
2268 /* it's possible that object doesn't exist yet */
2269 LASSERT(handle != NULL);
2271 oh = container_of0(handle, struct osd_thandle, ot_super);
2272 LASSERT(oh->ot_handle == NULL);
2274 OSD_DECLARE_OP(oh, ref_add);
2275 oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
2281 * Concurrency: @dt is write locked.
2283 static int osd_object_ref_add(const struct lu_env *env,
2284 struct dt_object *dt,
2287 struct osd_object *obj = osd_dt_obj(dt);
2288 struct inode *inode = obj->oo_inode;
2290 LINVRNT(osd_invariant(obj));
2291 LASSERT(dt_object_exists(dt));
2292 LASSERT(osd_write_locked(env, obj));
2293 LASSERT(th != NULL);
2295 OSD_EXEC_OP(th, ref_add);
2298 * DIR_NLINK feature is set for compatibility reasons if:
2299 * 1) nlinks > LDISKFS_LINK_MAX, or
2300 * 2) nlinks == 2, since this indicates i_nlink was previously 1.
2302 * It is easier to always set this flag (rather than check and set),
2303 * since it has less overhead, and the superblock will be dirtied
2304 * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
2305 * do not actually care whether this flag is set or not.
2307 cfs_spin_lock(&obj->oo_guard);
2309 if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
2310 if (inode->i_nlink >= LDISKFS_LINK_MAX ||
2311 inode->i_nlink == 2)
2314 LASSERT(inode->i_nlink < LDISKFS_LINK_MAX);
2315 cfs_spin_unlock(&obj->oo_guard);
2316 inode->i_sb->s_op->dirty_inode(inode);
2317 LINVRNT(osd_invariant(obj));
2322 static int osd_declare_object_ref_del(const struct lu_env *env,
2323 struct dt_object *dt,
2324 struct thandle *handle)
2326 struct osd_thandle *oh;
2328 LASSERT(dt_object_exists(dt));
2329 LASSERT(handle != NULL);
2331 oh = container_of0(handle, struct osd_thandle, ot_super);
2332 LASSERT(oh->ot_handle == NULL);
2334 OSD_DECLARE_OP(oh, ref_del);
2335 oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
2341 * Concurrency: @dt is write locked.
2343 static int osd_object_ref_del(const struct lu_env *env,
2344 struct dt_object *dt,
2347 struct osd_object *obj = osd_dt_obj(dt);
2348 struct inode *inode = obj->oo_inode;
2350 LINVRNT(osd_invariant(obj));
2351 LASSERT(dt_object_exists(dt));
2352 LASSERT(osd_write_locked(env, obj));
2353 LASSERT(th != NULL);
2355 OSD_EXEC_OP(th, ref_del);
2357 cfs_spin_lock(&obj->oo_guard);
2358 LASSERT(inode->i_nlink > 0);
2360 /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
2361 * then the nlink count is 1. Don't let it be set to 0 or the directory
2362 * inode will be deleted incorrectly. */
2363 if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
2365 cfs_spin_unlock(&obj->oo_guard);
2366 inode->i_sb->s_op->dirty_inode(inode);
2367 LINVRNT(osd_invariant(obj));
2373 * Get the 64-bit version for an inode.
2375 static int osd_object_version_get(const struct lu_env *env,
2376 struct dt_object *dt, dt_obj_version_t *ver)
2378 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2380 CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2381 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2382 *ver = LDISKFS_I(inode)->i_fs_version;
2387 * Concurrency: @dt is read locked.
2389 static int osd_xattr_get(const struct lu_env *env,
2390 struct dt_object *dt,
2393 struct lustre_capa *capa)
2395 struct osd_object *obj = osd_dt_obj(dt);
2396 struct inode *inode = obj->oo_inode;
2397 struct osd_thread_info *info = osd_oti_get(env);
2398 struct dentry *dentry = &info->oti_obj_dentry;
2400 /* version get is not real XATTR but uses xattr API */
2401 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2402 /* for version we are just using xattr API but change inode
2404 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2405 osd_object_version_get(env, dt, buf->lb_buf);
2406 return sizeof(dt_obj_version_t);
2409 LASSERT(dt_object_exists(dt));
2410 LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
2411 LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2413 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2416 dentry->d_inode = inode;
2417 return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
2421 static int osd_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
2422 const struct lu_buf *buf, const char *name,
2423 int fl, struct thandle *handle)
2425 struct osd_thandle *oh;
2427 LASSERT(handle != NULL);
2429 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2430 /* no credits for version */
2434 oh = container_of0(handle, struct osd_thandle, ot_super);
2435 LASSERT(oh->ot_handle == NULL);
2437 OSD_DECLARE_OP(oh, xattr_set);
2438 oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
2444 * Set the 64-bit version for object
2446 static void osd_object_version_set(const struct lu_env *env,
2447 struct dt_object *dt,
2448 dt_obj_version_t *new_version)
2450 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2452 CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
2453 *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2455 LDISKFS_I(inode)->i_fs_version = *new_version;
2456 /** Version is set after all inode operations are finished,
2457 * so we should mark it dirty here */
2458 inode->i_sb->s_op->dirty_inode(inode);
2462 * Concurrency: @dt is write locked.
2464 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2465 const struct lu_buf *buf, const char *name, int fl,
2466 struct thandle *handle, struct lustre_capa *capa)
2468 LASSERT(handle != NULL);
2470 /* version set is not real XATTR */
2471 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2472 /* for version we are just using xattr API but change inode
2474 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2475 osd_object_version_set(env, dt, buf->lb_buf);
2476 return sizeof(dt_obj_version_t);
2479 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2482 OSD_EXEC_OP(handle, xattr_set);
2483 return __osd_xattr_set(env, dt, buf, name, fl);
2487 * Concurrency: @dt is read locked.
2489 static int osd_xattr_list(const struct lu_env *env,
2490 struct dt_object *dt,
2492 struct lustre_capa *capa)
2494 struct osd_object *obj = osd_dt_obj(dt);
2495 struct inode *inode = obj->oo_inode;
2496 struct osd_thread_info *info = osd_oti_get(env);
2497 struct dentry *dentry = &info->oti_obj_dentry;
2499 LASSERT(dt_object_exists(dt));
2500 LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
2501 LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2503 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2506 dentry->d_inode = inode;
2507 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
2510 static int osd_declare_xattr_del(const struct lu_env *env,
2511 struct dt_object *dt,
2513 struct thandle *handle)
2515 struct osd_thandle *oh;
2517 LASSERT(dt_object_exists(dt));
2518 LASSERT(handle != NULL);
2520 oh = container_of0(handle, struct osd_thandle, ot_super);
2521 LASSERT(oh->ot_handle == NULL);
2523 OSD_DECLARE_OP(oh, xattr_set);
2524 oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
2530 * Concurrency: @dt is write locked.
2532 static int osd_xattr_del(const struct lu_env *env,
2533 struct dt_object *dt,
2535 struct thandle *handle,
2536 struct lustre_capa *capa)
2538 struct osd_object *obj = osd_dt_obj(dt);
2539 struct inode *inode = obj->oo_inode;
2540 struct osd_thread_info *info = osd_oti_get(env);
2541 struct dentry *dentry = &info->oti_obj_dentry;
2544 LASSERT(dt_object_exists(dt));
2545 LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
2546 LASSERT(osd_write_locked(env, obj));
2547 LASSERT(handle != NULL);
2549 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2552 OSD_EXEC_OP(handle, xattr_set);
2554 dentry->d_inode = inode;
2555 rc = inode->i_op->removexattr(dentry, name);
2559 static struct obd_capa *osd_capa_get(const struct lu_env *env,
2560 struct dt_object *dt,
2561 struct lustre_capa *old,
2564 struct osd_thread_info *info = osd_oti_get(env);
2565 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2566 struct osd_object *obj = osd_dt_obj(dt);
2567 struct osd_device *dev = osd_obj2dev(obj);
2568 struct lustre_capa_key *key = &info->oti_capa_key;
2569 struct lustre_capa *capa = &info->oti_capa;
2570 struct obd_capa *oc;
2571 struct md_capainfo *ci;
2575 if (!dev->od_fl_capa)
2576 RETURN(ERR_PTR(-ENOENT));
2578 LASSERT(dt_object_exists(dt));
2579 LINVRNT(osd_invariant(obj));
2581 /* renewal sanity check */
2582 if (old && osd_object_auth(env, dt, old, opc))
2583 RETURN(ERR_PTR(-EACCES));
2585 ci = md_capainfo(env);
2587 RETURN(ERR_PTR(-ENOENT));
2589 switch (ci->mc_auth) {
2593 capa->lc_uid = obj->oo_inode->i_uid;
2594 capa->lc_gid = obj->oo_inode->i_gid;
2595 capa->lc_flags = LC_ID_PLAIN;
2597 case LC_ID_CONVERT: {
2600 s[0] = obj->oo_inode->i_uid;
2601 cfs_get_random_bytes(&(s[1]), sizeof(__u32));
2602 s[2] = obj->oo_inode->i_gid;
2603 cfs_get_random_bytes(&(s[3]), sizeof(__u32));
2604 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
2606 RETURN(ERR_PTR(rc));
2608 capa->lc_uid = ((__u64)d[1] << 32) | d[0];
2609 capa->lc_gid = ((__u64)d[3] << 32) | d[2];
2610 capa->lc_flags = LC_ID_CONVERT;
2614 RETURN(ERR_PTR(-EINVAL));
2617 capa->lc_fid = *fid;
2619 capa->lc_flags |= dev->od_capa_alg << 24;
2620 capa->lc_timeout = dev->od_capa_timeout;
2621 capa->lc_expiry = 0;
2623 oc = capa_lookup(dev->od_capa_hash, capa, 1);
2625 LASSERT(!capa_is_expired(oc));
2629 cfs_spin_lock(&capa_lock);
2630 *key = dev->od_capa_keys[1];
2631 cfs_spin_unlock(&capa_lock);
2633 capa->lc_keyid = key->lk_keyid;
2634 capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
2636 rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
2638 DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
2639 RETURN(ERR_PTR(rc));
2642 oc = capa_add(dev->od_capa_hash, capa);
2646 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
2649 struct osd_object *obj = osd_dt_obj(dt);
2650 struct inode *inode = obj->oo_inode;
2651 struct osd_thread_info *info = osd_oti_get(env);
2652 struct dentry *dentry = &info->oti_obj_dentry;
2653 struct file *file = &info->oti_file;
2656 dentry->d_inode = inode;
2657 file->f_dentry = dentry;
2658 file->f_mapping = inode->i_mapping;
2659 file->f_op = inode->i_fop;
2660 LOCK_INODE_MUTEX(inode);
2661 rc = file->f_op->fsync(file, dentry, 0);
2662 UNLOCK_INODE_MUTEX(inode);
2666 static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
2669 struct osd_object *obj = osd_dt_obj(dt);
2672 *data = (void *)obj->oo_inode;
2680 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
2681 const struct dt_index_features *feat)
2683 struct iam_descr *descr;
2685 if (osd_object_is_root(o))
2686 return feat == &dt_directory_features;
2688 LASSERT(o->oo_dir != NULL);
2690 descr = o->oo_dir->od_container.ic_descr;
2691 if (feat == &dt_directory_features) {
2692 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
2698 feat->dif_keysize_min <= descr->id_key_size &&
2699 descr->id_key_size <= feat->dif_keysize_max &&
2700 feat->dif_recsize_min <= descr->id_rec_size &&
2701 descr->id_rec_size <= feat->dif_recsize_max &&
2702 !(feat->dif_flags & (DT_IND_VARKEY |
2703 DT_IND_VARREC | DT_IND_NONUNQ)) &&
2704 ergo(feat->dif_flags & DT_IND_UPDATE,
2705 1 /* XXX check that object (and file system) is
2710 static int osd_iam_container_init(const struct lu_env *env,
2711 struct osd_object *obj,
2712 struct osd_directory *dir)
2714 struct iam_container *bag = &dir->od_container;
2717 result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
2721 result = iam_container_setup(bag);
2725 if (osd_obj2dev(obj)->od_iop_mode) {
2726 u32 ptr = bag->ic_descr->id_ops->id_root_ptr(bag);
2728 bag->ic_root_bh = ldiskfs_bread(NULL, obj->oo_inode,
2734 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
2736 iam_container_fini(bag);
2743 * Concurrency: no external locking is necessary.
2745 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
2746 const struct dt_index_features *feat)
2750 struct osd_object *obj = osd_dt_obj(dt);
2751 struct osd_device *osd = osd_obj2dev(obj);
2753 LINVRNT(osd_invariant(obj));
2754 LASSERT(dt_object_exists(dt));
2756 if (osd_object_is_root(obj)) {
2757 dt->do_index_ops = &osd_index_ea_ops;
2759 } else if (feat == &dt_directory_features && osd->od_iop_mode) {
2760 dt->do_index_ops = &osd_index_ea_ops;
2761 if (S_ISDIR(obj->oo_inode->i_mode))
2766 } else if (!osd_has_index(obj)) {
2767 struct osd_directory *dir;
2772 cfs_spin_lock(&obj->oo_guard);
2773 if (obj->oo_dir == NULL)
2777 * Concurrent thread allocated container data.
2780 cfs_spin_unlock(&obj->oo_guard);
2782 * Now, that we have container data, serialize its
2785 cfs_down_write(&obj->oo_ext_idx_sem);
2787 * recheck under lock.
2789 if (!osd_has_index(obj))
2790 result = osd_iam_container_init(env, obj, dir);
2793 cfs_up_write(&obj->oo_ext_idx_sem);
2801 if (result == 0 && ea_dir == 0) {
2802 if (!osd_iam_index_probe(env, obj, feat))
2805 LINVRNT(osd_invariant(obj));
2810 static const struct dt_object_operations osd_obj_ops = {
2811 .do_read_lock = osd_object_read_lock,
2812 .do_write_lock = osd_object_write_lock,
2813 .do_read_unlock = osd_object_read_unlock,
2814 .do_write_unlock = osd_object_write_unlock,
2815 .do_write_locked = osd_object_write_locked,
2816 .do_attr_get = osd_attr_get,
2817 .do_declare_attr_set = osd_declare_attr_set,
2818 .do_attr_set = osd_attr_set,
2819 .do_ah_init = osd_ah_init,
2820 .do_declare_create = osd_declare_object_create,
2821 .do_create = osd_object_create,
2822 .do_declare_destroy = osd_declare_object_destroy,
2823 .do_destroy = osd_object_destroy,
2824 .do_index_try = osd_index_try,
2825 .do_declare_ref_add = osd_declare_object_ref_add,
2826 .do_ref_add = osd_object_ref_add,
2827 .do_declare_ref_del = osd_declare_object_ref_del,
2828 .do_ref_del = osd_object_ref_del,
2829 .do_xattr_get = osd_xattr_get,
2830 .do_declare_xattr_set = osd_declare_xattr_set,
2831 .do_xattr_set = osd_xattr_set,
2832 .do_declare_xattr_del = osd_declare_xattr_del,
2833 .do_xattr_del = osd_xattr_del,
2834 .do_xattr_list = osd_xattr_list,
2835 .do_capa_get = osd_capa_get,
2836 .do_object_sync = osd_object_sync,
2837 .do_data_get = osd_data_get,
2841 * dt_object_operations for interoperability mode
2842 * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
2844 static const struct dt_object_operations osd_obj_ea_ops = {
2845 .do_read_lock = osd_object_read_lock,
2846 .do_write_lock = osd_object_write_lock,
2847 .do_read_unlock = osd_object_read_unlock,
2848 .do_write_unlock = osd_object_write_unlock,
2849 .do_write_locked = osd_object_write_locked,
2850 .do_attr_get = osd_attr_get,
2851 .do_declare_attr_set = osd_declare_attr_set,
2852 .do_attr_set = osd_attr_set,
2853 .do_ah_init = osd_ah_init,
2854 .do_declare_create = osd_declare_object_create,
2855 .do_create = osd_object_ea_create,
2856 .do_declare_destroy = osd_declare_object_destroy,
2857 .do_destroy = osd_object_destroy,
2858 .do_index_try = osd_index_try,
2859 .do_declare_ref_add = osd_declare_object_ref_add,
2860 .do_ref_add = osd_object_ref_add,
2861 .do_declare_ref_del = osd_declare_object_ref_del,
2862 .do_ref_del = osd_object_ref_del,
2863 .do_xattr_get = osd_xattr_get,
2864 .do_declare_xattr_set = osd_declare_xattr_set,
2865 .do_xattr_set = osd_xattr_set,
2866 .do_declare_xattr_del = osd_declare_xattr_del,
2867 .do_xattr_del = osd_xattr_del,
2868 .do_xattr_list = osd_xattr_list,
2869 .do_capa_get = osd_capa_get,
2870 .do_object_sync = osd_object_sync,
2871 .do_data_get = osd_data_get,
2879 * XXX: Another layering violation for now.
2881 * We don't want to use ->f_op->read methods, because generic file write
2883 * - serializes on ->i_sem, and
2885 * - does a lot of extra work like balance_dirty_pages(),
2887 * which doesn't work for globally shared files like /last-received.
2889 static int osd_ldiskfs_readlink(struct inode *inode, char *buffer, int buflen)
2891 struct ldiskfs_inode_info *ei = LDISKFS_I(inode);
2893 memcpy(buffer, (char*)ei->i_data, buflen);
2898 static int osd_ldiskfs_read(struct inode *inode, void *buf, int size,
2901 struct buffer_head *bh;
2902 unsigned long block;
2909 /* prevent reading after eof */
2910 spin_lock(&inode->i_lock);
2911 if (i_size_read(inode) < *offs + size) {
2912 size = i_size_read(inode) - *offs;
2913 spin_unlock(&inode->i_lock);
2915 CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
2916 i_size_read(inode), *offs);
2918 } else if (size == 0) {
2922 spin_unlock(&inode->i_lock);
2925 blocksize = 1 << inode->i_blkbits;
2928 block = *offs >> inode->i_blkbits;
2929 boffs = *offs & (blocksize - 1);
2930 csize = min(blocksize - boffs, size);
2931 bh = ldiskfs_bread(NULL, inode, block, 0, &err);
2933 CERROR("can't read block: %d\n", err);
2937 memcpy(buf, bh->b_data + boffs, csize);
2947 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
2948 struct lu_buf *buf, loff_t *pos,
2949 struct lustre_capa *capa)
2951 struct osd_object *obj = osd_dt_obj(dt);
2952 struct inode *inode = obj->oo_inode;
2955 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
2958 /* Read small symlink from inode body as we need to maintain correct
2959 * on-disk symlinks for ldiskfs.
2961 if (S_ISLNK(obj->oo_dt.do_lu.lo_header->loh_attr) &&
2962 (buf->lb_len <= sizeof (LDISKFS_I(inode)->i_data)))
2963 rc = osd_ldiskfs_readlink(inode, buf->lb_buf, buf->lb_len);
2965 rc = osd_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
2970 static int osd_ldiskfs_writelink(struct inode *inode, char *buffer, int buflen)
2973 memcpy((char*)&LDISKFS_I(inode)->i_data, (char *)buffer,
2975 LDISKFS_I(inode)->i_disksize = buflen;
2976 i_size_write(inode, buflen);
2977 inode->i_sb->s_op->dirty_inode(inode);
2982 static int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
2983 loff_t *offs, handle_t *handle)
2985 struct buffer_head *bh = NULL;
2986 loff_t offset = *offs;
2987 loff_t new_size = i_size_read(inode);
2988 unsigned long block;
2989 int blocksize = 1 << inode->i_blkbits;
2993 int dirty_inode = 0;
2995 while (bufsize > 0) {
2999 block = offset >> inode->i_blkbits;
3000 boffs = offset & (blocksize - 1);
3001 size = min(blocksize - boffs, bufsize);
3002 bh = ldiskfs_bread(handle, inode, block, 1, &err);
3004 CERROR("can't read/create block: %d\n", err);
3008 err = ldiskfs_journal_get_write_access(handle, bh);
3010 CERROR("journal_get_write_access() returned error %d\n",
3014 LASSERTF(boffs + size <= bh->b_size,
3015 "boffs %d size %d bh->b_size %lu",
3016 boffs, size, (unsigned long)bh->b_size);
3017 memcpy(bh->b_data + boffs, buf, size);
3018 err = ldiskfs_journal_dirty_metadata(handle, bh);
3022 if (offset + size > new_size)
3023 new_size = offset + size;
3031 /* correct in-core and on-disk sizes */
3032 if (new_size > i_size_read(inode)) {
3033 spin_lock(&inode->i_lock);
3034 if (new_size > i_size_read(inode))
3035 i_size_write(inode, new_size);
3036 if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
3037 LDISKFS_I(inode)->i_disksize = i_size_read(inode);
3040 spin_unlock(&inode->i_lock);
3042 inode->i_sb->s_op->dirty_inode(inode);
3050 static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
3051 const loff_t size, loff_t pos,
3052 struct thandle *handle)
3054 struct osd_thandle *oh;
3057 LASSERT(handle != NULL);
3059 oh = container_of0(handle, struct osd_thandle, ot_super);
3060 LASSERT(oh->ot_handle == NULL);
3062 /* XXX: size == 0 or INT_MAX indicating a catalog header update or
3063 * llog write, see comment in mdd_declare_llog_record().
3065 * This hack should be removed in 2.3
3067 if (size == DECLARE_LLOG_REWRITE)
3069 else if (size == DECLARE_LLOG_WRITE)
3072 credits = osd_dto_credits_noquota[DTO_WRITE_BLOCK];
3074 OSD_DECLARE_OP(oh, write);
3075 oh->ot_credits += credits;
3077 if (osd_dt_obj(dt)->oo_inode == NULL)
3080 osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
3081 osd_dt_obj(dt)->oo_inode);
3082 osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
3083 osd_dt_obj(dt)->oo_inode);
3087 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
3088 const struct lu_buf *buf, loff_t *pos,
3089 struct thandle *handle, struct lustre_capa *capa,
3092 struct osd_object *obj = osd_dt_obj(dt);
3093 struct inode *inode = obj->oo_inode;
3094 struct osd_thandle *oh;
3096 #ifdef HAVE_QUOTA_SUPPORT
3097 cfs_cap_t save = cfs_curproc_cap_pack();
3100 LASSERT(handle != NULL);
3102 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
3105 oh = container_of(handle, struct osd_thandle, ot_super);
3106 LASSERT(oh->ot_handle->h_transaction != NULL);
3107 #ifdef HAVE_QUOTA_SUPPORT
3109 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
3111 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
3113 /* Write small symlink to inode body as we need to maintain correct
3114 * on-disk symlinks for ldiskfs.
3116 if(S_ISLNK(obj->oo_dt.do_lu.lo_header->loh_attr) &&
3117 (buf->lb_len < sizeof (LDISKFS_I(inode)->i_data)))
3118 result = osd_ldiskfs_writelink(inode, buf->lb_buf, buf->lb_len);
3120 result = osd_ldiskfs_write_record(inode, buf->lb_buf,
3123 #ifdef HAVE_QUOTA_SUPPORT
3124 cfs_curproc_cap_unpack(save);
3127 result = buf->lb_len;
3132 * in some cases we may need declare methods for objects being created
3133 * e.g., when we create symlink
3135 static const struct dt_body_operations osd_body_ops_new = {
3136 .dbo_declare_write = osd_declare_write,
3139 static const struct dt_body_operations osd_body_ops = {
3140 .dbo_read = osd_read,
3141 .dbo_declare_write = osd_declare_write,
3142 .dbo_write = osd_write
3145 static int osd_index_declare_iam_delete(const struct lu_env *env,
3146 struct dt_object *dt,
3147 const struct dt_key *key,
3148 struct thandle *handle)
3150 struct osd_thandle *oh;
3152 oh = container_of0(handle, struct osd_thandle, ot_super);
3153 LASSERT(oh->ot_handle == NULL);
3155 OSD_DECLARE_OP(oh, delete);
3156 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
3162 * delete a (key, value) pair from index \a dt specified by \a key
3164 * \param dt osd index object
3165 * \param key key for index
3166 * \param rec record reference
3167 * \param handle transaction handler
3170 * \retval -ve failure
3173 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
3174 const struct dt_key *key, struct thandle *handle,
3175 struct lustre_capa *capa)
3177 struct osd_object *obj = osd_dt_obj(dt);
3178 struct osd_thandle *oh;
3179 struct iam_path_descr *ipd;
3180 struct iam_container *bag = &obj->oo_dir->od_container;
3185 LINVRNT(osd_invariant(obj));
3186 LASSERT(dt_object_exists(dt));
3187 LASSERT(bag->ic_object == obj->oo_inode);
3188 LASSERT(handle != NULL);
3190 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
3193 OSD_EXEC_OP(handle, delete);
3195 ipd = osd_idx_ipd_get(env, bag);
3196 if (unlikely(ipd == NULL))
3199 oh = container_of0(handle, struct osd_thandle, ot_super);
3200 LASSERT(oh->ot_handle != NULL);
3201 LASSERT(oh->ot_handle->h_transaction != NULL);
3203 rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
3204 osd_ipd_put(env, bag, ipd);
3205 LINVRNT(osd_invariant(obj));
3209 static int osd_index_declare_ea_delete(const struct lu_env *env,
3210 struct dt_object *dt,
3211 const struct dt_key *key,
3212 struct thandle *handle)
3214 struct osd_thandle *oh;
3216 LASSERT(dt_object_exists(dt));
3217 LASSERT(handle != NULL);
3219 oh = container_of0(handle, struct osd_thandle, ot_super);
3220 LASSERT(oh->ot_handle == NULL);
3222 OSD_DECLARE_OP(oh, delete);
3223 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
3225 LASSERT(osd_dt_obj(dt)->oo_inode);
3226 osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
3227 osd_dt_obj(dt)->oo_inode);
3228 osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
3229 osd_dt_obj(dt)->oo_inode);
3234 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
3237 struct osd_fid_pack *rec;
3240 if (de->file_type & LDISKFS_DIRENT_LUFID) {
3241 rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
3242 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
3248 * Index delete function for interoperability mode (b11826).
3249 * It will remove the directory entry added by osd_index_ea_insert().
3250 * This entry is needed to maintain name->fid mapping.
3252 * \param key, key i.e. file entry to be deleted
3254 * \retval 0, on success
3255 * \retval -ve, on error
3257 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
3258 const struct dt_key *key, struct thandle *handle,
3259 struct lustre_capa *capa)
3261 struct osd_object *obj = osd_dt_obj(dt);
3262 struct inode *dir = obj->oo_inode;
3263 struct dentry *dentry;
3264 struct osd_thandle *oh;
3265 struct ldiskfs_dir_entry_2 *de;
3266 struct buffer_head *bh;
3267 struct htree_lock *hlock = NULL;
3273 LINVRNT(osd_invariant(obj));
3274 LASSERT(dt_object_exists(dt));
3275 LASSERT(handle != NULL);
3277 OSD_EXEC_OP(handle, delete);
3279 oh = container_of(handle, struct osd_thandle, ot_super);
3280 LASSERT(oh->ot_handle != NULL);
3281 LASSERT(oh->ot_handle->h_transaction != NULL);
3283 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
3286 dentry = osd_child_dentry_get(env, obj,
3287 (char *)key, strlen((char *)key));
3289 if (obj->oo_hl_head != NULL) {
3290 hlock = osd_oti_get(env)->oti_hlock;
3291 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3292 dir, LDISKFS_HLOCK_DEL);
3294 cfs_down_write(&obj->oo_ext_idx_sem);
3297 bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
3299 rc = ldiskfs_delete_entry(oh->ot_handle,
3306 ldiskfs_htree_unlock(hlock);
3308 cfs_up_write(&obj->oo_ext_idx_sem);
3310 LASSERT(osd_invariant(obj));
3315 * Lookup index for \a key and copy record to \a rec.
3317 * \param dt osd index object
3318 * \param key key for index
3319 * \param rec record reference
3321 * \retval +ve success : exact mach
3322 * \retval 0 return record with key not greater than \a key
3323 * \retval -ve failure
3325 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
3326 struct dt_rec *rec, const struct dt_key *key,
3327 struct lustre_capa *capa)
3329 struct osd_object *obj = osd_dt_obj(dt);
3330 struct iam_path_descr *ipd;
3331 struct iam_container *bag = &obj->oo_dir->od_container;
3332 struct osd_thread_info *oti = osd_oti_get(env);
3333 struct iam_iterator *it = &oti->oti_idx_it;
3334 struct iam_rec *iam_rec;
3338 LASSERT(osd_invariant(obj));
3339 LASSERT(dt_object_exists(dt));
3340 LASSERT(bag->ic_object == obj->oo_inode);
3342 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
3345 ipd = osd_idx_ipd_get(env, bag);
3349 /* got ipd now we can start iterator. */
3350 iam_it_init(it, bag, 0, ipd);
3352 rc = iam_it_get(it, (struct iam_key *)key);
3354 if (S_ISDIR(obj->oo_inode->i_mode))
3355 iam_rec = (struct iam_rec *)oti->oti_ldp;
3357 iam_rec = (struct iam_rec *) rec;
3359 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
3360 if (S_ISDIR(obj->oo_inode->i_mode))
3361 osd_fid_unpack((struct lu_fid *) rec,
3362 (struct osd_fid_pack *)iam_rec);
3366 osd_ipd_put(env, bag, ipd);
3368 LINVRNT(osd_invariant(obj));
3373 static int osd_index_declare_iam_insert(const struct lu_env *env,
3374 struct dt_object *dt,
3375 const struct dt_rec *rec,
3376 const struct dt_key *key,
3377 struct thandle *handle)
3379 struct osd_thandle *oh;
3381 LASSERT(dt_object_exists(dt));
3382 LASSERT(handle != NULL);
3384 oh = container_of0(handle, struct osd_thandle, ot_super);
3385 LASSERT(oh->ot_handle == NULL);
3387 OSD_DECLARE_OP(oh, insert);
3388 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
3394 * Inserts (key, value) pair in \a dt index object.
3396 * \param dt osd index object
3397 * \param key key for index
3398 * \param rec record reference
3399 * \param th transaction handler
3402 * \retval -ve failure
3404 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
3405 const struct dt_rec *rec, const struct dt_key *key,
3406 struct thandle *th, struct lustre_capa *capa,
3409 struct osd_object *obj = osd_dt_obj(dt);
3410 struct iam_path_descr *ipd;
3411 struct osd_thandle *oh;
3412 struct iam_container *bag = &obj->oo_dir->od_container;
3413 #ifdef HAVE_QUOTA_SUPPORT
3414 cfs_cap_t save = cfs_curproc_cap_pack();
3416 struct osd_thread_info *oti = osd_oti_get(env);
3417 struct iam_rec *iam_rec = (struct iam_rec *)oti->oti_ldp;
3422 LINVRNT(osd_invariant(obj));
3423 LASSERT(dt_object_exists(dt));
3424 LASSERT(bag->ic_object == obj->oo_inode);
3425 LASSERT(th != NULL);
3427 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3430 OSD_EXEC_OP(th, insert);
3432 ipd = osd_idx_ipd_get(env, bag);
3433 if (unlikely(ipd == NULL))
3436 oh = container_of0(th, struct osd_thandle, ot_super);
3437 LASSERT(oh->ot_handle != NULL);
3438 LASSERT(oh->ot_handle->h_transaction != NULL);
3439 #ifdef HAVE_QUOTA_SUPPORT
3441 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
3443 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
3445 if (S_ISDIR(obj->oo_inode->i_mode))
3446 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
3448 iam_rec = (struct iam_rec *) rec;
3449 rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
3451 #ifdef HAVE_QUOTA_SUPPORT
3452 cfs_curproc_cap_unpack(save);
3454 osd_ipd_put(env, bag, ipd);
3455 LINVRNT(osd_invariant(obj));
3460 * Calls ldiskfs_add_entry() to add directory entry
3461 * into the directory. This is required for
3462 * interoperability mode (b11826)
3464 * \retval 0, on success
3465 * \retval -ve, on error
3467 static int __osd_ea_add_rec(struct osd_thread_info *info,
3468 struct osd_object *pobj,
3469 struct inode *cinode,
3471 const struct dt_rec *fid,
3472 struct htree_lock *hlock,
3475 struct ldiskfs_dentry_param *ldp;
3476 struct dentry *child;
3477 struct osd_thandle *oth;
3480 oth = container_of(th, struct osd_thandle, ot_super);
3481 LASSERT(oth->ot_handle != NULL);
3482 LASSERT(oth->ot_handle->h_transaction != NULL);
3484 child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
3486 if (fid_is_igif((struct lu_fid *)fid) ||
3487 fid_is_norm((struct lu_fid *)fid)) {
3488 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3489 osd_get_ldiskfs_dirent_param(ldp, fid);
3490 child->d_fsdata = (void*) ldp;
3492 child->d_fsdata = NULL;
3493 rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
3499 * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
3500 * into the directory.Also sets flags into osd object to
3501 * indicate dot and dotdot are created. This is required for
3502 * interoperability mode (b11826)
3504 * \param dir directory for dot and dotdot fixup.
3505 * \param obj child object for linking
3507 * \retval 0, on success
3508 * \retval -ve, on error
3510 static int osd_add_dot_dotdot(struct osd_thread_info *info,
3511 struct osd_object *dir,
3512 struct inode *parent_dir, const char *name,
3513 const struct dt_rec *dot_fid,
3514 const struct dt_rec *dot_dot_fid,
3517 struct inode *inode = dir->oo_inode;
3518 struct ldiskfs_dentry_param *dot_ldp;
3519 struct ldiskfs_dentry_param *dot_dot_ldp;
3520 struct osd_thandle *oth;
3523 oth = container_of(th, struct osd_thandle, ot_super);
3524 LASSERT(oth->ot_handle->h_transaction != NULL);
3525 LASSERT(S_ISDIR(dir->oo_inode->i_mode));
3527 if (strcmp(name, dot) == 0) {
3528 if (dir->oo_compat_dot_created) {
3531 LASSERT(inode == parent_dir);
3532 dir->oo_compat_dot_created = 1;
3535 } else if(strcmp(name, dotdot) == 0) {
3536 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3537 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3539 if (!dir->oo_compat_dot_created)
3541 if (fid_seq((struct lu_fid *)dot_fid) >= FID_SEQ_NORMAL) {
3542 osd_get_ldiskfs_dirent_param(dot_ldp, dot_fid);
3543 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3548 /* in case of rename, dotdot is already created */
3549 if (dir->oo_compat_dotdot_created) {
3550 return __osd_ea_add_rec(info, dir, parent_dir, name,
3551 dot_dot_fid, NULL, th);
3554 result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3555 inode, dot_ldp, dot_dot_ldp);
3557 dir->oo_compat_dotdot_created = 1;
3565 * It will call the appropriate osd_add* function and return the
3566 * value, return by respective functions.
3568 static int osd_ea_add_rec(const struct lu_env *env,
3569 struct osd_object *pobj,
3570 struct inode *cinode,
3572 const struct dt_rec *fid,
3575 struct osd_thread_info *info = osd_oti_get(env);
3576 struct htree_lock *hlock;
3579 hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
3581 if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' &&
3583 if (hlock != NULL) {
3584 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3587 cfs_down_write(&pobj->oo_ext_idx_sem);
3589 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
3590 (struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
3593 if (hlock != NULL) {
3594 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3595 pobj->oo_inode, LDISKFS_HLOCK_ADD);
3597 cfs_down_write(&pobj->oo_ext_idx_sem);
3600 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
3604 ldiskfs_htree_unlock(hlock);
3606 cfs_up_write(&pobj->oo_ext_idx_sem);
3612 * Calls ->lookup() to find dentry. From dentry get inode and
3613 * read inode's ea to get fid. This is required for interoperability
3616 * \retval 0, on success
3617 * \retval -ve, on error
3619 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
3620 struct dt_rec *rec, const struct dt_key *key)
3622 struct inode *dir = obj->oo_inode;
3623 struct dentry *dentry;
3624 struct ldiskfs_dir_entry_2 *de;
3625 struct buffer_head *bh;
3626 struct lu_fid *fid = (struct lu_fid *) rec;
3627 struct htree_lock *hlock = NULL;
3631 LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
3633 dentry = osd_child_dentry_get(env, obj,
3634 (char *)key, strlen((char *)key));
3636 if (obj->oo_hl_head != NULL) {
3637 hlock = osd_oti_get(env)->oti_hlock;
3638 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3639 dir, LDISKFS_HLOCK_LOOKUP);
3641 cfs_down_read(&obj->oo_ext_idx_sem);
3644 bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
3646 ino = le32_to_cpu(de->inode);
3647 rc = osd_get_fid_from_dentry(de, rec);
3649 /* done with de, release bh */
3652 rc = osd_ea_fid_get(env, obj, ino, fid);
3658 ldiskfs_htree_unlock(hlock);
3660 cfs_up_read(&obj->oo_ext_idx_sem);
3665 * Find the osd object for given fid.
3667 * \param fid need to find the osd object having this fid
3669 * \retval osd_object on success
3670 * \retval -ve on error
3672 struct osd_object *osd_object_find(const struct lu_env *env,
3673 struct dt_object *dt,
3674 const struct lu_fid *fid)
3676 struct lu_device *ludev = dt->do_lu.lo_dev;
3677 struct osd_object *child = NULL;
3678 struct lu_object *luch;
3679 struct lu_object *lo;
3681 luch = lu_object_find(env, ludev, fid, NULL);
3682 if (!IS_ERR(luch)) {
3683 if (lu_object_exists(luch)) {
3684 lo = lu_object_locate(luch->lo_header, ludev->ld_type);
3686 child = osd_obj(lo);
3688 LU_OBJECT_DEBUG(D_ERROR, env, luch,
3689 "lu_object can't be located"
3690 ""DFID"\n", PFID(fid));
3692 if (child == NULL) {
3693 lu_object_put(env, luch);
3694 CERROR("Unable to get osd_object\n");
3695 child = ERR_PTR(-ENOENT);
3698 LU_OBJECT_DEBUG(D_ERROR, env, luch,
3699 "lu_object does not exists "DFID"\n",
3701 child = ERR_PTR(-ENOENT);
3704 child = (void *)luch;
3710 * Put the osd object once done with it.
3712 * \param obj osd object that needs to be put
3714 static inline void osd_object_put(const struct lu_env *env,
3715 struct osd_object *obj)
3717 lu_object_put(env, &obj->oo_dt.do_lu);
3720 static int osd_index_declare_ea_insert(const struct lu_env *env,
3721 struct dt_object *dt,
3722 const struct dt_rec *rec,
3723 const struct dt_key *key,
3724 struct thandle *handle)
3726 struct osd_thandle *oh;
3728 LASSERT(dt_object_exists(dt));
3729 LASSERT(handle != NULL);
3731 oh = container_of0(handle, struct osd_thandle, ot_super);
3732 LASSERT(oh->ot_handle == NULL);
3734 OSD_DECLARE_OP(oh, insert);
3735 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
3737 LASSERT(osd_dt_obj(dt)->oo_inode);
3738 osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
3739 osd_dt_obj(dt)->oo_inode);
3740 osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
3741 osd_dt_obj(dt)->oo_inode);
3747 * Index add function for interoperability mode (b11826).
3748 * It will add the directory entry.This entry is needed to
3749 * maintain name->fid mapping.
3751 * \param key it is key i.e. file entry to be inserted
3752 * \param rec it is value of given key i.e. fid
3754 * \retval 0, on success
3755 * \retval -ve, on error
3757 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
3758 const struct dt_rec *rec,
3759 const struct dt_key *key, struct thandle *th,
3760 struct lustre_capa *capa, int ignore_quota)
3762 struct osd_object *obj = osd_dt_obj(dt);
3763 struct lu_fid *fid = (struct lu_fid *) rec;
3764 const char *name = (const char *)key;
3765 struct osd_object *child;
3766 #ifdef HAVE_QUOTA_SUPPORT
3767 cfs_cap_t save = cfs_curproc_cap_pack();
3773 LASSERT(osd_invariant(obj));
3774 LASSERT(dt_object_exists(dt));
3775 LASSERT(th != NULL);
3777 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3780 child = osd_object_find(env, dt, fid);
3781 if (!IS_ERR(child)) {
3782 #ifdef HAVE_QUOTA_SUPPORT
3784 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
3786 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
3788 rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
3789 #ifdef HAVE_QUOTA_SUPPORT
3790 cfs_curproc_cap_unpack(save);
3792 osd_object_put(env, child);
3794 rc = PTR_ERR(child);
3797 LASSERT(osd_invariant(obj));
3802 * Initialize osd Iterator for given osd index object.
3804 * \param dt osd index object
3807 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
3808 struct dt_object *dt,
3810 struct lustre_capa *capa)
3812 struct osd_it_iam *it;
3813 struct osd_thread_info *oti = osd_oti_get(env);
3814 struct osd_object *obj = osd_dt_obj(dt);
3815 struct lu_object *lo = &dt->do_lu;
3816 struct iam_path_descr *ipd;
3817 struct iam_container *bag = &obj->oo_dir->od_container;
3819 LASSERT(lu_object_exists(lo));
3821 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
3822 return ERR_PTR(-EACCES);
3825 ipd = osd_it_ipd_get(env, bag);
3826 if (likely(ipd != NULL)) {
3830 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
3831 return (struct dt_it *)it;
3833 return ERR_PTR(-ENOMEM);
3837 * free given Iterator.
3840 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
3842 struct osd_it_iam *it = (struct osd_it_iam *)di;
3843 struct osd_object *obj = it->oi_obj;
3845 iam_it_fini(&it->oi_it);
3846 osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
3847 lu_object_put(env, &obj->oo_dt.do_lu);
3851 * Move Iterator to record specified by \a key
3853 * \param di osd iterator
3854 * \param key key for index
3856 * \retval +ve di points to record with least key not larger than key
3857 * \retval 0 di points to exact matched key
3858 * \retval -ve failure
3861 static int osd_it_iam_get(const struct lu_env *env,
3862 struct dt_it *di, const struct dt_key *key)
3864 struct osd_it_iam *it = (struct osd_it_iam *)di;
3866 return iam_it_get(&it->oi_it, (const struct iam_key *)key);
3872 * \param di osd iterator
3875 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
3877 struct osd_it_iam *it = (struct osd_it_iam *)di;
3879 iam_it_put(&it->oi_it);
3883 * Move iterator by one record
3885 * \param di osd iterator
3887 * \retval +1 end of container reached
3889 * \retval -ve failure
3892 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
3894 struct osd_it_iam *it = (struct osd_it_iam *)di;
3896 return iam_it_next(&it->oi_it);
3900 * Return pointer to the key under iterator.
3903 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
3904 const struct dt_it *di)
3906 struct osd_it_iam *it = (struct osd_it_iam *)di;
3908 return (struct dt_key *)iam_it_key_get(&it->oi_it);
3912 * Return size of key under iterator (in bytes)
3915 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
3917 struct osd_it_iam *it = (struct osd_it_iam *)di;
3919 return iam_it_key_size(&it->oi_it);
3922 static inline void osd_it_append_attrs(struct lu_dirent*ent,
3927 struct luda_type *lt;
3928 const unsigned align = sizeof(struct luda_type) - 1;
3930 /* check if file type is required */
3931 if (attr & LUDA_TYPE) {
3932 len = (len + align) & ~align;
3934 lt = (void *) ent->lde_name + len;
3935 lt->lt_type = cpu_to_le16(CFS_DTTOIF(type));
3936 ent->lde_attrs |= LUDA_TYPE;
3939 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
3943 * build lu direct from backend fs dirent.
3946 static inline void osd_it_pack_dirent(struct lu_dirent *ent,
3954 fid_cpu_to_le(&ent->lde_fid, fid);
3955 ent->lde_attrs = LUDA_FID;
3957 ent->lde_hash = cpu_to_le64(offset);
3958 ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
3960 strncpy(ent->lde_name, name, namelen);
3961 ent->lde_namelen = cpu_to_le16(namelen);
3963 /* append lustre attributes */
3964 osd_it_append_attrs(ent, attr, namelen, type);
3968 * Return pointer to the record under iterator.
3970 static int osd_it_iam_rec(const struct lu_env *env,
3971 const struct dt_it *di,
3972 struct dt_rec *dtrec,
3975 struct osd_it_iam *it = (struct osd_it_iam *)di;
3976 struct osd_thread_info *info = osd_oti_get(env);
3977 struct lu_fid *fid = &info->oti_fid;
3978 const struct osd_fid_pack *rec;
3979 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
3985 name = (char *)iam_it_key_get(&it->oi_it);
3987 RETURN(PTR_ERR(name));
3989 namelen = iam_it_key_size(&it->oi_it);
3991 rec = (const struct osd_fid_pack *) iam_it_rec_get(&it->oi_it);
3993 RETURN(PTR_ERR(rec));
3995 rc = osd_fid_unpack(fid, rec);
3999 hash = iam_it_store(&it->oi_it);
4001 /* IAM does not store object type in IAM index (dir) */
4002 osd_it_pack_dirent(lde, fid, hash, name, namelen,
4009 * Returns cookie for current Iterator position.
4011 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
4013 struct osd_it_iam *it = (struct osd_it_iam *)di;
4015 return iam_it_store(&it->oi_it);
4019 * Restore iterator from cookie.
4021 * \param di osd iterator
4022 * \param hash Iterator location cookie
4024 * \retval +ve di points to record with least key not larger than key.
4025 * \retval 0 di points to exact matched key
4026 * \retval -ve failure
4029 static int osd_it_iam_load(const struct lu_env *env,
4030 const struct dt_it *di, __u64 hash)
4032 struct osd_it_iam *it = (struct osd_it_iam *)di;
4034 return iam_it_load(&it->oi_it, hash);
4037 static const struct dt_index_operations osd_index_iam_ops = {
4038 .dio_lookup = osd_index_iam_lookup,
4039 .dio_declare_insert = osd_index_declare_iam_insert,
4040 .dio_insert = osd_index_iam_insert,
4041 .dio_declare_delete = osd_index_declare_iam_delete,
4042 .dio_delete = osd_index_iam_delete,
4044 .init = osd_it_iam_init,
4045 .fini = osd_it_iam_fini,
4046 .get = osd_it_iam_get,
4047 .put = osd_it_iam_put,
4048 .next = osd_it_iam_next,
4049 .key = osd_it_iam_key,
4050 .key_size = osd_it_iam_key_size,
4051 .rec = osd_it_iam_rec,
4052 .store = osd_it_iam_store,
4053 .load = osd_it_iam_load
4058 * Creates or initializes iterator context.
4060 * \retval struct osd_it_ea, iterator structure on success
4063 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
4064 struct dt_object *dt,
4066 struct lustre_capa *capa)
4068 struct osd_object *obj = osd_dt_obj(dt);
4069 struct osd_thread_info *info = osd_oti_get(env);
4070 struct osd_it_ea *it = &info->oti_it_ea;
4071 struct lu_object *lo = &dt->do_lu;
4072 struct dentry *obj_dentry = &info->oti_it_dentry;
4074 LASSERT(lu_object_exists(lo));
4076 obj_dentry->d_inode = obj->oo_inode;
4077 obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
4078 obj_dentry->d_name.hash = 0;
4080 it->oie_rd_dirent = 0;
4081 it->oie_it_dirent = 0;
4082 it->oie_dirent = NULL;
4083 it->oie_buf = info->oti_it_ea_buf;
4085 it->oie_file.f_pos = 0;
4086 it->oie_file.f_dentry = obj_dentry;
4087 if (attr & LUDA_64BITHASH)
4088 it->oie_file.f_flags = O_64BITHASH;
4090 it->oie_file.f_flags = O_32BITHASH;
4091 it->oie_file.f_mapping = obj->oo_inode->i_mapping;
4092 it->oie_file.f_op = obj->oo_inode->i_fop;
4093 it->oie_file.private_data = NULL;
4095 RETURN((struct dt_it *) it);
4099 * Destroy or finishes iterator context.
4101 * \param di iterator structure to be destroyed
4103 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
4105 struct osd_it_ea *it = (struct osd_it_ea *)di;
4106 struct osd_object *obj = it->oie_obj;
4107 struct inode *inode = obj->oo_inode;
4110 it->oie_file.f_op->release(inode, &it->oie_file);
4111 lu_object_put(env, &obj->oo_dt.do_lu);
4116 * It position the iterator at given key, so that next lookup continues from
4117 * that key Or it is similar to dio_it->load() but based on a key,
4118 * rather than file position.
4120 * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
4123 * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
4125 static int osd_it_ea_get(const struct lu_env *env,
4126 struct dt_it *di, const struct dt_key *key)
4128 struct osd_it_ea *it = (struct osd_it_ea *)di;
4131 LASSERT(((const char *)key)[0] == '\0');
4132 it->oie_file.f_pos = 0;
4133 it->oie_rd_dirent = 0;
4134 it->oie_it_dirent = 0;
4135 it->oie_dirent = NULL;
4143 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
4148 * It is called internally by ->readdir(). It fills the
4149 * iterator's in-memory data structure with required
4150 * information i.e. name, namelen, rec_size etc.
4152 * \param buf in which information to be filled in.
4153 * \param name name of the file in given dir
4155 * \retval 0 on success
4156 * \retval 1 on buffer full
4158 static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
4159 loff_t offset, __u64 ino,
4162 struct osd_it_ea *it = (struct osd_it_ea *)buf;
4163 struct osd_it_ea_dirent *ent = it->oie_dirent;
4164 struct lu_fid *fid = &ent->oied_fid;
4165 struct osd_fid_pack *rec;
4168 /* this should never happen */
4169 if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
4170 CERROR("ldiskfs return invalid namelen %d\n", namelen);
4174 if ((void *) ent - it->oie_buf + sizeof(*ent) + namelen >
4178 if (d_type & LDISKFS_DIRENT_LUFID) {
4179 rec = (struct osd_fid_pack*) (name + namelen + 1);
4181 if (osd_fid_unpack(fid, rec) != 0)
4184 d_type &= ~LDISKFS_DIRENT_LUFID;
4189 ent->oied_ino = ino;
4190 ent->oied_off = offset;
4191 ent->oied_namelen = namelen;
4192 ent->oied_type = d_type;
4194 memcpy(ent->oied_name, name, namelen);
4196 it->oie_rd_dirent++;
4197 it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
4202 * Calls ->readdir() to load a directory entry at a time
4203 * and stored it in iterator's in-memory data structure.
4205 * \param di iterator's in memory structure
4207 * \retval 0 on success
4208 * \retval -ve on error
4210 static int osd_ldiskfs_it_fill(const struct lu_env *env,
4211 const struct dt_it *di)
4213 struct osd_it_ea *it = (struct osd_it_ea *)di;
4214 struct osd_object *obj = it->oie_obj;
4215 struct inode *inode = obj->oo_inode;
4216 struct htree_lock *hlock = NULL;
4220 it->oie_dirent = it->oie_buf;
4221 it->oie_rd_dirent = 0;
4223 if (obj->oo_hl_head != NULL) {
4224 hlock = osd_oti_get(env)->oti_hlock;
4225 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
4226 inode, LDISKFS_HLOCK_READDIR);
4228 cfs_down_read(&obj->oo_ext_idx_sem);
4231 result = inode->i_fop->readdir(&it->oie_file, it,
4232 (filldir_t) osd_ldiskfs_filldir);
4235 ldiskfs_htree_unlock(hlock);
4237 cfs_up_read(&obj->oo_ext_idx_sem);
4239 if (it->oie_rd_dirent == 0) {
4242 it->oie_dirent = it->oie_buf;
4243 it->oie_it_dirent = 1;
4250 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4251 * to load a directory entry at a time and stored it in
4252 * iterator's in-memory data structure.
4254 * \param di iterator's in memory structure
4256 * \retval +ve iterator reached to end
4257 * \retval 0 iterator not reached to end
4258 * \retval -ve on error
4260 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
4262 struct osd_it_ea *it = (struct osd_it_ea *)di;
4267 if (it->oie_it_dirent < it->oie_rd_dirent) {
4269 (void *) it->oie_dirent +
4270 cfs_size_round(sizeof(struct osd_it_ea_dirent) +
4271 it->oie_dirent->oied_namelen);
4272 it->oie_it_dirent++;
4275 if (it->oie_file.f_pos == LDISKFS_HTREE_EOF)
4278 rc = osd_ldiskfs_it_fill(env, di);
4285 * Returns the key at current position from iterator's in memory structure.
4287 * \param di iterator's in memory structure
4289 * \retval key i.e. struct dt_key on success
4291 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
4292 const struct dt_it *di)
4294 struct osd_it_ea *it = (struct osd_it_ea *)di;
4296 RETURN((struct dt_key *)it->oie_dirent->oied_name);
4300 * Returns the key's size at current position from iterator's in memory structure.
4302 * \param di iterator's in memory structure
4304 * \retval key_size i.e. struct dt_key on success
4306 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
4308 struct osd_it_ea *it = (struct osd_it_ea *)di;
4310 RETURN(it->oie_dirent->oied_namelen);
4315 * Returns the value (i.e. fid/igif) at current position from iterator's
4316 * in memory structure.
4318 * \param di struct osd_it_ea, iterator's in memory structure
4319 * \param attr attr requested for dirent.
4320 * \param lde lustre dirent
4322 * \retval 0 no error and \param lde has correct lustre dirent.
4323 * \retval -ve on error
4325 static inline int osd_it_ea_rec(const struct lu_env *env,
4326 const struct dt_it *di,
4327 struct dt_rec *dtrec,
4330 struct osd_it_ea *it = (struct osd_it_ea *)di;
4331 struct osd_object *obj = it->oie_obj;
4332 struct lu_fid *fid = &it->oie_dirent->oied_fid;
4333 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
4338 if (!fid_is_sane(fid))
4339 rc = osd_ea_fid_get(env, obj, it->oie_dirent->oied_ino, fid);
4342 osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
4343 it->oie_dirent->oied_name,
4344 it->oie_dirent->oied_namelen,
4345 it->oie_dirent->oied_type,
4351 * Returns a cookie for current position of the iterator head, so that
4352 * user can use this cookie to load/start the iterator next time.
4354 * \param di iterator's in memory structure
4356 * \retval cookie for current position, on success
4358 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
4360 struct osd_it_ea *it = (struct osd_it_ea *)di;
4362 RETURN(it->oie_dirent->oied_off);
4366 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
4367 * to load a directory entry at a time and stored it i inn,
4368 * in iterator's in-memory data structure.
4370 * \param di struct osd_it_ea, iterator's in memory structure
4372 * \retval +ve on success
4373 * \retval -ve on error
4375 static int osd_it_ea_load(const struct lu_env *env,
4376 const struct dt_it *di, __u64 hash)
4378 struct osd_it_ea *it = (struct osd_it_ea *)di;
4382 it->oie_file.f_pos = hash;
4384 rc = osd_ldiskfs_it_fill(env, di);
4392 * Index lookup function for interoperability mode (b11826).
4394 * \param key, key i.e. file name to be searched
4396 * \retval +ve, on success
4397 * \retval -ve, on error
4399 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
4400 struct dt_rec *rec, const struct dt_key *key,
4401 struct lustre_capa *capa)
4403 struct osd_object *obj = osd_dt_obj(dt);
4408 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
4409 LINVRNT(osd_invariant(obj));
4411 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
4414 rc = osd_ea_lookup_rec(env, obj, rec, key);
4422 * Index and Iterator operations for interoperability
4423 * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
4425 static const struct dt_index_operations osd_index_ea_ops = {
4426 .dio_lookup = osd_index_ea_lookup,
4427 .dio_declare_insert = osd_index_declare_ea_insert,
4428 .dio_insert = osd_index_ea_insert,
4429 .dio_declare_delete = osd_index_declare_ea_delete,
4430 .dio_delete = osd_index_ea_delete,
4432 .init = osd_it_ea_init,
4433 .fini = osd_it_ea_fini,
4434 .get = osd_it_ea_get,
4435 .put = osd_it_ea_put,
4436 .next = osd_it_ea_next,
4437 .key = osd_it_ea_key,
4438 .key_size = osd_it_ea_key_size,
4439 .rec = osd_it_ea_rec,
4440 .store = osd_it_ea_store,
4441 .load = osd_it_ea_load
4445 static void *osd_key_init(const struct lu_context *ctx,
4446 struct lu_context_key *key)
4448 struct osd_thread_info *info;
4450 OBD_ALLOC_PTR(info);
4452 return ERR_PTR(-ENOMEM);
4454 OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4455 if (info->oti_it_ea_buf == NULL)
4458 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
4460 info->oti_hlock = ldiskfs_htree_lock_alloc();
4461 if (info->oti_hlock == NULL)
4467 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4470 return ERR_PTR(-ENOMEM);
4473 static void osd_key_fini(const struct lu_context *ctx,
4474 struct lu_context_key *key, void* data)
4476 struct osd_thread_info *info = data;
4478 if (info->oti_hlock != NULL)
4479 ldiskfs_htree_lock_free(info->oti_hlock);
4480 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
4484 static void osd_key_exit(const struct lu_context *ctx,
4485 struct lu_context_key *key, void *data)
4487 struct osd_thread_info *info = data;
4489 LASSERT(info->oti_r_locks == 0);
4490 LASSERT(info->oti_w_locks == 0);
4491 LASSERT(info->oti_txns == 0);
4494 /* type constructor/destructor: osd_type_init, osd_type_fini */
4495 LU_TYPE_INIT_FINI(osd, &osd_key);
4497 static struct lu_context_key osd_key = {
4498 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD,
4499 .lct_init = osd_key_init,
4500 .lct_fini = osd_key_fini,
4501 .lct_exit = osd_key_exit
4505 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
4506 const char *name, struct lu_device *next)
4508 return osd_procfs_init(osd_dev(d), name);
4511 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
4513 struct osd_thread_info *info = osd_oti_get(env);
4515 if (o->od_obj_area != NULL) {
4516 lu_object_put(env, &o->od_obj_area->do_lu);
4517 o->od_obj_area = NULL;
4519 if (o->od_oi_table != NULL)
4520 osd_oi_fini(info, &o->od_oi_table, o->od_oi_count);
4525 static int osd_mount(const struct lu_env *env,
4526 struct osd_device *o, struct lustre_cfg *cfg)
4528 struct lustre_mount_info *lmi;
4529 const char *dev = lustre_cfg_string(cfg, 0);
4530 struct lustre_disk_data *ldd;
4531 struct lustre_sb_info *lsi;
4534 if (o->od_mount != NULL) {
4535 CERROR("Already mounted (%s)\n", dev);
4540 lmi = server_get_mount(dev);
4542 CERROR("Cannot get mount info for %s!\n", dev);
4546 LASSERT(lmi != NULL);
4547 /* save lustre_mount_info in dt_device */
4550 lsi = s2lsi(lmi->lmi_sb);
4553 if (ldd->ldd_flags & LDD_F_IAM_DIR) {
4555 LCONSOLE_WARN("OSD: IAM mode enabled\n");
4559 o->od_obj_area = NULL;
4563 static struct lu_device *osd_device_fini(const struct lu_env *env,
4564 struct lu_device *d)
4569 shrink_dcache_sb(osd_sb(osd_dev(d)));
4570 osd_sync(env, lu2dt_dev(d));
4572 rc = osd_procfs_fini(osd_dev(d));
4574 CERROR("proc fini error %d \n", rc);
4575 RETURN (ERR_PTR(rc));
4578 if (osd_dev(d)->od_mount)
4579 server_put_mount(osd_dev(d)->od_mount->lmi_name,
4580 osd_dev(d)->od_mount->lmi_mnt);
4581 osd_dev(d)->od_mount = NULL;
4586 static struct lu_device *osd_device_alloc(const struct lu_env *env,
4587 struct lu_device_type *t,
4588 struct lustre_cfg *cfg)
4590 struct lu_device *l;
4591 struct osd_device *o;
4597 result = dt_device_init(&o->od_dt_dev, t);
4600 l->ld_ops = &osd_lu_ops;
4601 o->od_dt_dev.dd_ops = &osd_dt_ops;
4602 cfs_spin_lock_init(&o->od_osfs_lock);
4603 o->od_osfs_age = cfs_time_shift_64(-1000);
4604 o->od_capa_hash = init_capa_hash();
4605 if (o->od_capa_hash == NULL) {
4606 dt_device_fini(&o->od_dt_dev);
4607 l = ERR_PTR(-ENOMEM);
4610 l = ERR_PTR(result);
4615 l = ERR_PTR(-ENOMEM);
4619 static struct lu_device *osd_device_free(const struct lu_env *env,
4620 struct lu_device *d)
4622 struct osd_device *o = osd_dev(d);
4625 cleanup_capa_hash(o->od_capa_hash);
4626 dt_device_fini(&o->od_dt_dev);
4631 static int osd_process_config(const struct lu_env *env,
4632 struct lu_device *d, struct lustre_cfg *cfg)
4634 struct osd_device *o = osd_dev(d);
4638 switch(cfg->lcfg_command) {
4640 err = osd_mount(env, o, cfg);
4643 err = osd_shutdown(env, o);
4652 static int osd_recovery_complete(const struct lu_env *env,
4653 struct lu_device *d)
4658 static int osd_prepare(const struct lu_env *env,
4659 struct lu_device *pdev,
4660 struct lu_device *dev)
4662 struct osd_device *osd = osd_dev(dev);
4663 struct lustre_sb_info *lsi;
4664 struct lustre_disk_data *ldd;
4665 struct lustre_mount_info *lmi;
4666 struct osd_thread_info *oti = osd_oti_get(env);
4667 struct dt_object *d;
4671 /* 1. initialize oi before any file create or file open */
4672 result = osd_oi_init(oti, &osd->od_oi_table,
4673 &osd->od_dt_dev, lu2md_dev(pdev));
4677 LASSERT(result > 0);
4678 osd->od_oi_count = result;
4680 lmi = osd->od_mount;
4681 lsi = s2lsi(lmi->lmi_sb);
4684 /* 2. setup local objects */
4685 result = llo_local_objects_setup(env, lu2md_dev(pdev), lu2dt_dev(dev));
4689 /* 3. open remote object dir */
4690 d = dt_store_open(env, lu2dt_dev(dev), "",
4691 remote_obj_dir, &oti->oti_fid);
4693 osd->od_obj_area = d;
4696 result = PTR_ERR(d);
4697 osd->od_obj_area = NULL;
4704 static const struct lu_object_operations osd_lu_obj_ops = {
4705 .loo_object_init = osd_object_init,
4706 .loo_object_delete = osd_object_delete,
4707 .loo_object_release = osd_object_release,
4708 .loo_object_free = osd_object_free,
4709 .loo_object_print = osd_object_print,
4710 .loo_object_invariant = osd_object_invariant
4713 static const struct lu_device_operations osd_lu_ops = {
4714 .ldo_object_alloc = osd_object_alloc,
4715 .ldo_process_config = osd_process_config,
4716 .ldo_recovery_complete = osd_recovery_complete,
4717 .ldo_prepare = osd_prepare,
4720 static const struct lu_device_type_operations osd_device_type_ops = {
4721 .ldto_init = osd_type_init,
4722 .ldto_fini = osd_type_fini,
4724 .ldto_start = osd_type_start,
4725 .ldto_stop = osd_type_stop,
4727 .ldto_device_alloc = osd_device_alloc,
4728 .ldto_device_free = osd_device_free,
4730 .ldto_device_init = osd_device_init,
4731 .ldto_device_fini = osd_device_fini
4734 static struct lu_device_type osd_device_type = {
4735 .ldt_tags = LU_DEVICE_DT,
4736 .ldt_name = LUSTRE_OSD_NAME,
4737 .ldt_ops = &osd_device_type_ops,
4738 .ldt_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
4742 * lprocfs legacy support.
4744 static struct obd_ops osd_obd_device_ops = {
4745 .o_owner = THIS_MODULE
4748 static struct lu_local_obj_desc llod_osd_rem_obj_dir = {
4749 .llod_name = remote_obj_dir,
4750 .llod_oid = OSD_REM_OBJ_DIR_OID,
4752 .llod_feat = &dt_directory_features,
4755 static int __init osd_mod_init(void)
4757 struct lprocfs_static_vars lvars;
4760 llo_local_obj_register(&llod_osd_rem_obj_dir);
4761 lprocfs_osd_init_vars(&lvars);
4762 return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
4763 LUSTRE_OSD_NAME, &osd_device_type);
4766 static void __exit osd_mod_exit(void)
4768 llo_local_obj_unregister(&llod_osd_rem_obj_dir);
4769 class_unregister_type(LUSTRE_OSD_NAME);
4772 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4773 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_NAME")");
4774 MODULE_LICENSE("GPL");
4776 cfs_module(osd, "0.0.2", osd_mod_init, osd_mod_exit);