1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/osd/osd_handler.c
40 * Top-level entry points into osd module
42 * Author: Nikita Danilov <nikita@clusterfs.com>
43 * Pravin Shelar <pravin.shelar@sun.com> : Added fid in dirent
47 # define EXPORT_SYMTAB
49 #define DEBUG_SUBSYSTEM S_MDS
51 #include <linux/module.h>
53 /* LUSTRE_VERSION_CODE */
54 #include <lustre_ver.h>
55 /* prerequisite for linux/xattr.h */
56 #include <linux/types.h>
57 /* prerequisite for linux/xattr.h */
59 /* XATTR_{REPLACE,CREATE} */
60 #include <linux/xattr.h>
65 * struct OBD_{ALLOC,FREE}*()
68 #include <obd_support.h>
69 /* struct ptlrpc_thread */
70 #include <lustre_net.h>
73 #include <lustre_fid.h>
75 #include "osd_internal.h"
78 /* llo_* api support */
79 #include <md_object.h>
81 #ifdef HAVE_LDISKFS_PDO
83 CFS_MODULE_PARM(ldiskfs_pdo, "i", int, 0644,
84 "ldiskfs with parallel directory operations");
89 static const char dot[] = ".";
90 static const char dotdot[] = "..";
91 static const char remote_obj_dir[] = "REM_OBJ_DIR";
93 static const struct lu_object_operations osd_lu_obj_ops;
94 static const struct dt_object_operations osd_obj_ops;
95 static const struct dt_object_operations osd_obj_ea_ops;
96 static const struct dt_index_operations osd_index_iam_ops;
97 static const struct dt_index_operations osd_index_ea_ops;
99 static int osd_has_index(const struct osd_object *obj)
101 return obj->oo_dt.do_index_ops != NULL;
104 static int osd_object_invariant(const struct lu_object *l)
106 return osd_invariant(osd_obj(l));
109 #ifdef HAVE_QUOTA_SUPPORT
111 osd_push_ctxt(const struct lu_env *env, struct osd_ctxt *save)
113 struct md_ucred *uc = md_ucred(env);
118 save->oc_uid = current_fsuid();
119 save->oc_gid = current_fsgid();
120 save->oc_cap = current_cap();
121 if ((tc = prepare_creds())) {
122 tc->fsuid = uc->mu_fsuid;
123 tc->fsgid = uc->mu_fsgid;
126 /* XXX not suboptimal */
127 cfs_curproc_cap_unpack(uc->mu_cap);
131 osd_pop_ctxt(struct osd_ctxt *save)
135 if ((tc = prepare_creds())) {
136 tc->fsuid = save->oc_uid;
137 tc->fsgid = save->oc_gid;
138 tc->cap_effective = save->oc_cap;
145 * Concurrency: doesn't matter
147 static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
149 return osd_oti_get(env)->oti_r_locks > 0;
153 * Concurrency: doesn't matter
155 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
157 struct osd_thread_info *oti = osd_oti_get(env);
158 return oti->oti_w_locks > 0 && o->oo_owner == env;
162 * Concurrency: doesn't access mutable data
164 static int osd_root_get(const struct lu_env *env,
165 struct dt_device *dev, struct lu_fid *f)
167 lu_local_obj_fid(f, OSD_FS_ROOT_OID);
171 static inline int osd_qid_type(struct osd_thandle *oh, int i)
173 return (oh->ot_id_type & (1 << i)) ? GRPQUOTA : USRQUOTA;
176 static inline void osd_qid_set_type(struct osd_thandle *oh, int i, int type)
178 oh->ot_id_type |= ((type == GRPQUOTA) ? (1 << i) : 0);
181 void osd_declare_qid(struct dt_object *dt, struct osd_thandle *oh,
182 int type, uid_t id, struct inode *inode)
185 int i, allocated = 0;
186 struct osd_object *obj;
190 LASSERTF(oh->ot_id_cnt <= OSD_MAX_UGID_CNT, "count=%u",
193 /* id entry is allocated in the quota file */
194 if (inode && inode->i_dquot[type] && inode->i_dquot[type]->dq_off)
197 for (i = 0; i < oh->ot_id_cnt; i++) {
198 if (oh->ot_id_array[i] == id && osd_qid_type(oh, i) == type)
202 if (unlikely(i >= OSD_MAX_UGID_CNT)) {
203 CERROR("more than %d uid/gids for a transaction?\n", i);
207 oh->ot_id_array[i] = id;
208 osd_qid_set_type(oh, i, type);
210 obj = osd_dt_obj(dt);
211 oh->ot_credits += (allocated || id == 0) ?
212 1 : LDISKFS_QUOTA_INIT_BLOCKS(osd_sb(osd_obj2dev(obj)));
217 * OSD object methods.
221 * Concurrency: no concurrent access is possible that early in object
224 static struct lu_object *osd_object_alloc(const struct lu_env *env,
225 const struct lu_object_header *hdr,
228 struct osd_object *mo;
234 l = &mo->oo_dt.do_lu;
235 dt_object_init(&mo->oo_dt, NULL, d);
236 if (osd_dev(d)->od_iop_mode)
237 mo->oo_dt.do_ops = &osd_obj_ea_ops;
239 mo->oo_dt.do_ops = &osd_obj_ops;
241 l->lo_ops = &osd_lu_obj_ops;
242 cfs_init_rwsem(&mo->oo_sem);
243 cfs_init_rwsem(&mo->oo_ext_idx_sem);
244 cfs_spin_lock_init(&mo->oo_guard);
252 * retrieve object from backend ext fs.
254 struct inode *osd_iget(struct osd_thread_info *info,
255 struct osd_device *dev,
256 const struct osd_inode_id *id)
258 struct inode *inode = NULL;
260 inode = ldiskfs_iget(osd_sb(dev), id->oii_ino);
262 CERROR("Cannot get inode, rc = %li\n", PTR_ERR(inode));
263 } else if (id->oii_gen != OSD_OII_NOGEN &&
264 inode->i_generation != id->oii_gen) {
266 inode = ERR_PTR(-ESTALE);
267 } else if (inode->i_nlink == 0) {
268 /* due to parallel readdir and unlink,
269 * we can have dead inode here. */
270 CWARN("stale inode\n");
271 make_bad_inode(inode);
273 inode = ERR_PTR(-ESTALE);
274 } else if (is_bad_inode(inode)) {
275 CERROR("bad inode %lx\n",inode->i_ino);
277 inode = ERR_PTR(-ENOENT);
279 /* Do not update file c/mtime in ldiskfs.
280 * NB: we don't have any lock to protect this because we don't
281 * have reference on osd_object now, but contention with
282 * another lookup + attr_set can't happen in the tiny window
283 * between if (...) and set S_NOCMTIME. */
284 if (!(inode->i_flags & S_NOCMTIME))
285 inode->i_flags |= S_NOCMTIME;
290 static int osd_fid_lookup(const struct lu_env *env,
291 struct osd_object *obj, const struct lu_fid *fid)
293 struct osd_thread_info *info;
294 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
295 struct osd_device *dev;
296 struct osd_inode_id *id;
300 LINVRNT(osd_invariant(obj));
301 LASSERT(obj->oo_inode == NULL);
302 LASSERTF(fid_is_sane(fid) || fid_is_idif(fid), DFID, PFID(fid));
304 * This assertion checks that osd layer sees only local
305 * fids. Unfortunately it is somewhat expensive (does a
306 * cache-lookup). Disabling it for production/acceptance-testing.
308 LASSERT(1 || fid_is_local(env, ldev->ld_site, fid));
312 info = osd_oti_get(env);
317 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
320 result = osd_oi_lookup(info, dev, fid, id);
322 if (result == -ENOENT)
327 inode = osd_iget(info, dev, id);
330 * If fid wasn't found in oi, inode-less object is
331 * created, for which lu_object_exists() returns
332 * false. This is used in a (frequent) case when
333 * objects are created as locking anchors or
334 * place holders for objects yet to be created.
336 result = PTR_ERR(inode);
340 obj->oo_inode = inode;
341 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
342 if (dev->od_iop_mode) {
343 obj->oo_compat_dot_created = 1;
344 obj->oo_compat_dotdot_created = 1;
347 if (!S_ISDIR(inode->i_mode) || !ldiskfs_pdo) /* done */
350 LASSERT(obj->oo_hl_head == NULL);
351 obj->oo_hl_head = ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
352 if (obj->oo_hl_head == NULL) {
353 obj->oo_inode = NULL;
358 LINVRNT(osd_invariant(obj));
363 * Concurrency: shouldn't matter.
365 static void osd_object_init0(struct osd_object *obj)
367 LASSERT(obj->oo_inode != NULL);
368 obj->oo_dt.do_body_ops = &osd_body_ops;
369 obj->oo_dt.do_lu.lo_header->loh_attr |=
370 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
374 * Concurrency: no concurrent access is possible that early in object
377 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
378 const struct lu_object_conf *unused)
380 struct osd_object *obj = osd_obj(l);
383 LINVRNT(osd_invariant(obj));
385 result = osd_fid_lookup(env, obj, lu_object_fid(l));
386 obj->oo_dt.do_body_ops = &osd_body_ops_new;
388 if (obj->oo_inode != NULL)
389 osd_object_init0(obj);
391 LINVRNT(osd_invariant(obj));
396 * Concurrency: no concurrent access is possible that late in object
399 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
401 struct osd_object *obj = osd_obj(l);
403 LINVRNT(osd_invariant(obj));
405 dt_object_fini(&obj->oo_dt);
406 if (obj->oo_hl_head != NULL)
407 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
412 * Concurrency: no concurrent access is possible that late in object
415 static void osd_index_fini(struct osd_object *o)
417 struct iam_container *bag;
419 if (o->oo_dir != NULL) {
420 bag = &o->oo_dir->od_container;
421 if (o->oo_inode != NULL) {
422 if (bag->ic_object == o->oo_inode)
423 iam_container_fini(bag);
425 OBD_FREE_PTR(o->oo_dir);
431 * Concurrency: no concurrent access is possible that late in object
432 * life-cycle (for all existing callers, that is. New callers have to provide
433 * their own locking.)
435 static int osd_inode_unlinked(const struct inode *inode)
437 return inode->i_nlink == 0;
441 OSD_TXN_OI_DELETE_CREDITS = 20,
442 OSD_TXN_INODE_DELETE_CREDITS = 20
449 #if OSD_THANDLE_STATS
451 * Set time when the handle is allocated
453 static void osd_th_alloced(struct osd_thandle *oth)
455 oth->oth_alloced = cfs_time_current();
459 * Set time when the handle started
461 static void osd_th_started(struct osd_thandle *oth)
463 oth->oth_started = cfs_time_current();
467 * Helper function to convert time interval to microseconds packed in
468 * long int (default time units for the counter in "stats" initialized
469 * by lu_time_init() )
471 static long interval_to_usec(cfs_time_t start, cfs_time_t end)
475 cfs_duration_usec(cfs_time_sub(end, start), &val);
476 return val.tv_sec * 1000000 + val.tv_usec;
480 * Check whether the we deal with this handle for too long.
482 static void __osd_th_check_slow(void *oth, struct osd_device *dev,
483 cfs_time_t alloced, cfs_time_t started,
486 cfs_time_t now = cfs_time_current();
488 LASSERT(dev != NULL);
490 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_STARTING,
491 interval_to_usec(alloced, started));
492 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_OPEN,
493 interval_to_usec(started, closed));
494 lprocfs_counter_add(dev->od_stats, LPROC_OSD_THANDLE_CLOSING,
495 interval_to_usec(closed, now));
497 if (cfs_time_before(cfs_time_add(alloced, cfs_time_seconds(30)), now)) {
498 CWARN("transaction handle %p was open for too long: "
500 "alloced "CFS_TIME_T" ,"
501 "started "CFS_TIME_T" ,"
502 "closed "CFS_TIME_T"\n",
503 oth, now, alloced, started, closed);
504 libcfs_debug_dumpstack(NULL);
508 #define OSD_CHECK_SLOW_TH(oth, dev, expr) \
510 cfs_time_t __closed = cfs_time_current(); \
511 cfs_time_t __alloced = oth->oth_alloced; \
512 cfs_time_t __started = oth->oth_started; \
515 __osd_th_check_slow(oth, dev, __alloced, __started, __closed); \
518 #else /* OSD_THANDLE_STATS */
520 #define osd_th_alloced(h) do {} while(0)
521 #define osd_th_started(h) do {} while(0)
522 #define OSD_CHECK_SLOW_TH(oth, dev, expr) expr
524 #endif /* OSD_THANDLE_STATS */
527 * Concurrency: doesn't access mutable data.
529 static int osd_param_is_sane(const struct osd_device *dev,
530 const struct thandle *th)
532 struct osd_thandle *oh;
533 oh = container_of0(th, struct osd_thandle, ot_super);
534 return oh->ot_credits <= osd_journal(dev)->j_max_transaction_buffers;
538 * Concurrency: shouldn't matter.
540 #ifdef HAVE_LDISKFS_JOURNAL_CALLBACK_ADD
541 static void osd_trans_commit_cb(struct super_block *sb,
542 struct journal_callback *jcb, int error)
544 static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
547 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
548 struct thandle *th = &oh->ot_super;
549 struct lu_device *lud = &th->th_dev->dd_lu_dev;
550 struct dt_txn_commit_cb *dcb, *tmp;
552 LASSERT(oh->ot_handle == NULL);
555 CERROR("transaction @0x%p commit error: %d\n", th, error);
557 dt_txn_hook_commit(th);
559 /* call per-transaction callbacks if any */
560 cfs_list_for_each_entry_safe(dcb, tmp, &oh->ot_dcb_list, dcb_linkage)
561 dcb->dcb_func(NULL, th, dcb, error);
563 lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
567 lu_context_exit(&th->th_ctx);
568 lu_context_fini(&th->th_ctx);
572 static struct thandle *osd_trans_create(const struct lu_env *env,
575 struct osd_thread_info *oti = osd_oti_get(env);
576 struct osd_iobuf *iobuf = &oti->oti_iobuf;
577 struct osd_thandle *oh;
581 /* on pending IO in this thread should left from prev. request */
582 LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
584 th = ERR_PTR(-ENOMEM);
585 OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
590 th->th_tags = LCT_TX_HANDLE;
592 oti->oti_dev = osd_dt_dev(d);
593 CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
600 * Concurrency: shouldn't matter.
602 int osd_trans_start(const struct lu_env *env, struct dt_device *d,
605 struct osd_thread_info *oti = osd_oti_get(env);
606 struct osd_device *dev = osd_dt_dev(d);
608 struct osd_thandle *oh;
613 LASSERT(current->journal_info == NULL);
615 oh = container_of0(th, struct osd_thandle, ot_super);
617 LASSERT(oh->ot_handle == NULL);
619 rc = dt_txn_hook_start(env, d, th);
623 if (!osd_param_is_sane(dev, th)) {
624 CWARN("%s: too many transaction credits (%d > %d)\n",
625 d->dd_lu_dev.ld_obd->obd_name, oh->ot_credits,
626 osd_journal(dev)->j_max_transaction_buffers);
627 /* XXX Limit the credits to 'max_transaction_buffers', and
628 * let the underlying filesystem to catch the error if
629 * we really need so many credits.
631 * This should be removed when we can calculate the
632 * credits precisely. */
633 oh->ot_credits = osd_journal(dev)->j_max_transaction_buffers;
634 #ifdef OSD_TRACK_DECLARES
635 CERROR(" attr_set: %d, punch: %d, xattr_set: %d,\n",
636 oh->ot_declare_attr_set, oh->ot_declare_punch,
637 oh->ot_declare_xattr_set);
638 CERROR(" create: %d, ref_add: %d, ref_del: %d, write: %d\n",
639 oh->ot_declare_create, oh->ot_declare_ref_add,
640 oh->ot_declare_ref_del, oh->ot_declare_write);
641 CERROR(" insert: %d, delete: %d, destroy: %d\n",
642 oh->ot_declare_insert, oh->ot_declare_delete,
643 oh->ot_declare_destroy);
648 * XXX temporary stuff. Some abstraction layer should
651 jh = ldiskfs_journal_start_sb(osd_sb(dev), oh->ot_credits);
655 LASSERT(oti->oti_txns == 0);
656 lu_context_init(&th->th_ctx, th->th_tags);
657 lu_context_enter(&th->th_ctx);
659 lu_device_get(&d->dd_lu_dev);
660 oh->ot_dev_link = lu_ref_add(&d->dd_lu_dev.ld_reference,
664 * XXX: current rule is that we first start tx,
665 * then lock object(s), but we can't use
666 * this rule for data (due to locking specifics
667 * in ldiskfs). also in long-term we'd like to
668 * use usually-used (locks;tx) ordering. so,
669 * UGLY thing is that we'll use one ordering for
670 * data (ofd) and reverse ordering for metadata
671 * (mdd). then at some point we'll fix the latter
673 if (lu_device_is_md(&d->dd_lu_dev)) {
674 LASSERT(oti->oti_r_locks == 0);
675 LASSERT(oti->oti_w_locks == 0);
688 * Concurrency: shouldn't matter.
690 static int osd_trans_stop(const struct lu_env *env, struct thandle *th)
693 struct osd_thandle *oh;
694 struct osd_thread_info *oti = osd_oti_get(env);
695 struct osd_iobuf *iobuf = &oti->oti_iobuf;
699 oh = container_of0(th, struct osd_thandle, ot_super);
701 if (oh->ot_handle != NULL) {
702 handle_t *hdl = oh->ot_handle;
704 hdl->h_sync = th->th_sync;
707 * add commit callback
708 * notice we don't do this in osd_trans_start()
709 * as underlying transaction can change during truncate
711 osd_journal_callback_set(hdl, osd_trans_commit_cb,
714 LASSERT(oti->oti_txns == 1);
717 * XXX: current rule is that we first start tx,
718 * then lock object(s), but we can't use
719 * this rule for data (due to locking specifics
720 * in ldiskfs). also in long-term we'd like to
721 * use usually-used (locks;tx) ordering. so,
722 * UGLY thing is that we'll use one ordering for
723 * data (ofd) and reverse ordering for metadata
724 * (mdd). then at some point we'll fix the latter
726 if (lu_device_is_md(&th->th_dev->dd_lu_dev)) {
727 LASSERT(oti->oti_r_locks == 0);
728 LASSERT(oti->oti_w_locks == 0);
730 rc = dt_txn_hook_stop(env, th);
732 CERROR("Failure in transaction hook: %d\n", rc);
733 oh->ot_handle = NULL;
734 OSD_CHECK_SLOW_TH(oh, oti->oti_dev,
735 rc = ldiskfs_journal_stop(hdl));
737 CERROR("Failure to stop transaction: %d\n", rc);
742 /* as we want IO to journal and data IO be concurrent, we don't block
743 * awaiting data IO completion in osd_do_bio(), instead we wait here
744 * once transaction is submitted to the journal. all reqular requests
745 * don't do direct IO (except read/write), thus this wait_event becomes
748 * IMPORTANT: we have to wait till any IO submited by the thread is
749 * completed otherwise iobuf may be corrupted by different request
751 cfs_wait_event(iobuf->dr_wait,
752 cfs_atomic_read(&iobuf->dr_numreqs) == 0);
754 rc = iobuf->dr_error;
759 static int osd_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb)
761 struct osd_thandle *oh = container_of0(th, struct osd_thandle,
764 cfs_list_add(&dcb->dcb_linkage, &oh->ot_dcb_list);
770 * Called just before object is freed. Releases all resources except for
771 * object itself (that is released by osd_object_free()).
773 * Concurrency: no concurrent access is possible that late in object
776 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
778 struct osd_object *obj = osd_obj(l);
779 struct inode *inode = obj->oo_inode;
781 LINVRNT(osd_invariant(obj));
784 * If object is unlinked remove fid->ino mapping from object index.
790 obj->oo_inode = NULL;
795 * Concurrency: ->loo_object_release() is called under site spin-lock.
797 static void osd_object_release(const struct lu_env *env,
803 * Concurrency: shouldn't matter.
805 static int osd_object_print(const struct lu_env *env, void *cookie,
806 lu_printer_t p, const struct lu_object *l)
808 struct osd_object *o = osd_obj(l);
811 if (o->oo_dir != NULL)
812 d = o->oo_dir->od_container.ic_descr;
815 return (*p)(env, cookie, LUSTRE_OSD_NAME"-object@%p(i:%p:%lu/%u)[%s]",
817 o->oo_inode ? o->oo_inode->i_ino : 0UL,
818 o->oo_inode ? o->oo_inode->i_generation : 0,
819 d ? d->id_ops->id_name : "plain");
823 * Concurrency: shouldn't matter.
825 int osd_statfs(const struct lu_env *env, struct dt_device *d,
826 struct obd_statfs *sfs)
828 struct osd_device *osd = osd_dt_dev(d);
829 struct super_block *sb = osd_sb(osd);
830 struct kstatfs *ksfs;
833 /* osd_lproc.c call this without env, allocate ksfs for that case */
834 if (unlikely(env == NULL)) {
839 ksfs = &osd_oti_get(env)->oti_ksfs;
842 cfs_spin_lock(&osd->od_osfs_lock);
844 if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
845 result = ll_do_statfs(sb, ksfs);
846 if (likely(result == 0)) { /* N.B. statfs can't really fail */
847 osd->od_osfs_age = cfs_time_current_64();
848 statfs_pack(&osd->od_statfs, ksfs);
852 if (likely(result == 0))
853 *sfs = osd->od_statfs;
854 cfs_spin_unlock(&osd->od_osfs_lock);
856 if (unlikely(env == NULL))
863 * Concurrency: doesn't access mutable data.
865 static void osd_conf_get(const struct lu_env *env,
866 const struct dt_device *dev,
867 struct dt_device_param *param)
869 struct super_block *sb = osd_sb(osd_dt_dev(dev));
872 * XXX should be taken from not-yet-existing fs abstraction layer.
874 param->ddp_max_name_len = LDISKFS_NAME_LEN;
875 param->ddp_max_nlink = LDISKFS_LINK_MAX;
876 param->ddp_block_shift = osd_sb(osd_dt_dev(dev))->s_blocksize_bits;
877 param->ddp_mntopts = 0;
878 if (test_opt(sb, XATTR_USER))
879 param->ddp_mntopts |= MNTOPT_USERXATTR;
880 if (test_opt(sb, POSIX_ACL))
881 param->ddp_mntopts |= MNTOPT_ACL;
883 #if defined(LDISKFS_FEATURE_INCOMPAT_EA_INODE)
884 if (LDISKFS_HAS_INCOMPAT_FEATURE(sb, LDISKFS_FEATURE_INCOMPAT_EA_INODE))
885 param->ddp_max_ea_size = LDISKFS_XATTR_MAX_LARGE_EA_SIZE;
888 param->ddp_max_ea_size = sb->s_blocksize;
893 * Helper function to get and fill the buffer with input values.
895 static struct lu_buf *osd_buf_get(const struct lu_env *env, void *area, ssize_t len)
899 buf = &osd_oti_get(env)->oti_buf;
906 * Concurrency: shouldn't matter.
908 static int osd_sync(const struct lu_env *env, struct dt_device *d)
910 CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_NAME);
911 return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
915 * Start commit for OSD device.
917 * An implementation of dt_commit_async method for OSD device.
918 * Asychronously starts underlayng fs sync and thereby a transaction
921 * \param env environment
924 * \see dt_device_operations
926 static int osd_commit_async(const struct lu_env *env,
929 struct super_block *s = osd_sb(osd_dt_dev(d));
932 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_NAME);
933 RETURN(s->s_op->sync_fs(s, 0));
937 * Concurrency: shouldn't matter.
940 static int osd_ro(const struct lu_env *env, struct dt_device *d)
942 struct super_block *sb = osd_sb(osd_dt_dev(d));
946 CERROR("*** setting device %s read-only ***\n", LUSTRE_OSD_NAME);
948 rc = __lvfs_set_rdonly(sb->s_bdev, LDISKFS_SB(sb)->journal_bdev);
953 * Concurrency: serialization provided by callers.
955 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
956 int mode, unsigned long timeout, __u32 alg,
957 struct lustre_capa_key *keys)
959 struct osd_device *dev = osd_dt_dev(d);
962 dev->od_fl_capa = mode;
963 dev->od_capa_timeout = timeout;
964 dev->od_capa_alg = alg;
965 dev->od_capa_keys = keys;
970 * Concurrency: serialization provided by callers.
972 static void osd_init_quota_ctxt(const struct lu_env *env, struct dt_device *d,
973 struct dt_quota_ctxt *ctxt, void *data)
975 struct obd_device *obd = (void *)ctxt;
976 struct vfsmount *mnt = (struct vfsmount *)data;
979 obd->u.obt.obt_sb = mnt->mnt_root->d_inode->i_sb;
980 OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
981 obd->obd_lvfs_ctxt.pwdmnt = mnt;
982 obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
983 obd->obd_lvfs_ctxt.fs = get_ds();
989 * Note: we do not count into QUOTA here.
990 * If we mount with --data_journal we may need more.
992 const int osd_dto_credits_noquota[DTO_NR] = {
995 * INDEX_EXTRA_TRANS_BLOCKS(8) +
996 * SINGLEDATA_TRANS_BLOCKS(8)
997 * XXX Note: maybe iam need more, since iam have more level than
1000 [DTO_INDEX_INSERT] = 16,
1001 [DTO_INDEX_DELETE] = 16,
1005 [DTO_INDEX_UPDATE] = 16,
1007 * Create a object. The same as create object in EXT3.
1008 * DATA_TRANS_BLOCKS(14) +
1009 * INDEX_EXTRA_BLOCKS(8) +
1010 * 3(inode bits, groups, GDT)
1012 [DTO_OBJECT_CREATE] = 25,
1014 * XXX: real credits to be fixed
1016 [DTO_OBJECT_DELETE] = 25,
1018 * Attr set credits (inode)
1020 [DTO_ATTR_SET_BASE] = 1,
1022 * Xattr set. The same as xattr of EXT3.
1023 * DATA_TRANS_BLOCKS(14)
1024 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS
1025 * are also counted in. Do not know why?
1027 [DTO_XATTR_SET] = 14,
1030 * credits for inode change during write.
1032 [DTO_WRITE_BASE] = 3,
1034 * credits for single block write.
1036 [DTO_WRITE_BLOCK] = 14,
1038 * Attr set credits for chown.
1039 * This is extra credits for setattr, and it is null without quota
1041 [DTO_ATTR_SET_CHOWN]= 0
1044 static const struct dt_device_operations osd_dt_ops = {
1045 .dt_root_get = osd_root_get,
1046 .dt_statfs = osd_statfs,
1047 .dt_trans_create = osd_trans_create,
1048 .dt_trans_start = osd_trans_start,
1049 .dt_trans_stop = osd_trans_stop,
1050 .dt_trans_cb_add = osd_trans_cb_add,
1051 .dt_conf_get = osd_conf_get,
1052 .dt_sync = osd_sync,
1054 .dt_commit_async = osd_commit_async,
1055 .dt_init_capa_ctxt = osd_init_capa_ctxt,
1056 .dt_init_quota_ctxt= osd_init_quota_ctxt,
1059 static void osd_object_read_lock(const struct lu_env *env,
1060 struct dt_object *dt, unsigned role)
1062 struct osd_object *obj = osd_dt_obj(dt);
1063 struct osd_thread_info *oti = osd_oti_get(env);
1065 LINVRNT(osd_invariant(obj));
1067 LASSERT(obj->oo_owner != env);
1068 cfs_down_read_nested(&obj->oo_sem, role);
1070 LASSERT(obj->oo_owner == NULL);
1074 static void osd_object_write_lock(const struct lu_env *env,
1075 struct dt_object *dt, unsigned role)
1077 struct osd_object *obj = osd_dt_obj(dt);
1078 struct osd_thread_info *oti = osd_oti_get(env);
1080 LINVRNT(osd_invariant(obj));
1082 LASSERT(obj->oo_owner != env);
1083 cfs_down_write_nested(&obj->oo_sem, role);
1085 LASSERT(obj->oo_owner == NULL);
1086 obj->oo_owner = env;
1090 static void osd_object_read_unlock(const struct lu_env *env,
1091 struct dt_object *dt)
1093 struct osd_object *obj = osd_dt_obj(dt);
1094 struct osd_thread_info *oti = osd_oti_get(env);
1096 LINVRNT(osd_invariant(obj));
1098 LASSERT(oti->oti_r_locks > 0);
1100 cfs_up_read(&obj->oo_sem);
1103 static void osd_object_write_unlock(const struct lu_env *env,
1104 struct dt_object *dt)
1106 struct osd_object *obj = osd_dt_obj(dt);
1107 struct osd_thread_info *oti = osd_oti_get(env);
1109 LINVRNT(osd_invariant(obj));
1111 LASSERT(obj->oo_owner == env);
1112 LASSERT(oti->oti_w_locks > 0);
1114 obj->oo_owner = NULL;
1115 cfs_up_write(&obj->oo_sem);
1118 static int osd_object_write_locked(const struct lu_env *env,
1119 struct dt_object *dt)
1121 struct osd_object *obj = osd_dt_obj(dt);
1123 LINVRNT(osd_invariant(obj));
1125 return obj->oo_owner == env;
1128 static int capa_is_sane(const struct lu_env *env,
1129 struct osd_device *dev,
1130 struct lustre_capa *capa,
1131 struct lustre_capa_key *keys)
1133 struct osd_thread_info *oti = osd_oti_get(env);
1134 struct lustre_capa *tcapa = &oti->oti_capa;
1135 struct obd_capa *oc;
1139 oc = capa_lookup(dev->od_capa_hash, capa, 0);
1141 if (capa_is_expired(oc)) {
1142 DEBUG_CAPA(D_ERROR, capa, "expired");
1149 if (capa_is_expired_sec(capa)) {
1150 DEBUG_CAPA(D_ERROR, capa, "expired");
1154 cfs_spin_lock(&capa_lock);
1155 for (i = 0; i < 2; i++) {
1156 if (keys[i].lk_keyid == capa->lc_keyid) {
1157 oti->oti_capa_key = keys[i];
1161 cfs_spin_unlock(&capa_lock);
1164 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1168 rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1172 if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1173 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1177 oc = capa_add(dev->od_capa_hash, capa);
1183 int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1184 struct lustre_capa *capa, __u64 opc)
1186 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1187 struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1188 struct md_capainfo *ci;
1191 if (!dev->od_fl_capa)
1194 if (capa == BYPASS_CAPA)
1197 ci = md_capainfo(env);
1201 if (ci->mc_auth == LC_ID_NONE)
1205 CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
1209 if (!lu_fid_eq(fid, &capa->lc_fid)) {
1210 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1215 if (!capa_opc_supported(capa, opc)) {
1216 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1220 if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
1221 DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
1228 static struct timespec *osd_inode_time(const struct lu_env *env,
1229 struct inode *inode, __u64 seconds)
1231 struct osd_thread_info *oti = osd_oti_get(env);
1232 struct timespec *t = &oti->oti_time;
1234 t->tv_sec = seconds;
1236 *t = timespec_trunc(*t, get_sb_time_gran(inode->i_sb));
1241 static void osd_inode_getattr(const struct lu_env *env,
1242 struct inode *inode, struct lu_attr *attr)
1244 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
1245 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
1246 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
1248 attr->la_atime = LTIME_S(inode->i_atime);
1249 attr->la_mtime = LTIME_S(inode->i_mtime);
1250 attr->la_ctime = LTIME_S(inode->i_ctime);
1251 attr->la_mode = inode->i_mode;
1252 attr->la_size = i_size_read(inode);
1253 attr->la_blocks = inode->i_blocks;
1254 attr->la_uid = inode->i_uid;
1255 attr->la_gid = inode->i_gid;
1256 attr->la_flags = LDISKFS_I(inode)->i_flags;
1257 attr->la_nlink = inode->i_nlink;
1258 attr->la_rdev = inode->i_rdev;
1259 attr->la_blksize = ll_inode_blksize(inode);
1260 attr->la_blkbits = inode->i_blkbits;
1263 static int osd_attr_get(const struct lu_env *env,
1264 struct dt_object *dt,
1265 struct lu_attr *attr,
1266 struct lustre_capa *capa)
1268 struct osd_object *obj = osd_dt_obj(dt);
1270 LASSERT(dt_object_exists(dt));
1271 LINVRNT(osd_invariant(obj));
1273 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1276 cfs_spin_lock(&obj->oo_guard);
1277 osd_inode_getattr(env, obj->oo_inode, attr);
1278 cfs_spin_unlock(&obj->oo_guard);
1282 static int osd_declare_attr_set(const struct lu_env *env,
1283 struct dt_object *dt,
1284 const struct lu_attr *attr,
1285 struct thandle *handle)
1287 struct osd_thandle *oh;
1288 struct osd_object *obj;
1290 LASSERT(dt != NULL);
1291 LASSERT(handle != NULL);
1293 obj = osd_dt_obj(dt);
1294 LASSERT(osd_invariant(obj));
1296 oh = container_of0(handle, struct osd_thandle, ot_super);
1297 LASSERT(oh->ot_handle == NULL);
1299 OSD_DECLARE_OP(oh, attr_set);
1300 oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
1302 if (attr && attr->la_valid & LA_UID) {
1304 osd_declare_qid(dt, oh, USRQUOTA, obj->oo_inode->i_uid,
1306 osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
1308 if (attr && attr->la_valid & LA_GID) {
1310 osd_declare_qid(dt, oh, GRPQUOTA, obj->oo_inode->i_gid,
1312 osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
1318 static int osd_inode_setattr(const struct lu_env *env,
1319 struct inode *inode, const struct lu_attr *attr)
1323 bits = attr->la_valid;
1325 LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
1327 #ifdef HAVE_QUOTA_SUPPORT
1328 if ((bits & LA_UID && attr->la_uid != inode->i_uid) ||
1329 (bits & LA_GID && attr->la_gid != inode->i_gid)) {
1330 struct osd_ctxt *save = &osd_oti_get(env)->oti_ctxt;
1336 iattr.ia_valid |= ATTR_UID;
1338 iattr.ia_valid |= ATTR_GID;
1339 iattr.ia_uid = attr->la_uid;
1340 iattr.ia_gid = attr->la_gid;
1341 osd_push_ctxt(env, save);
1342 rc = ll_vfs_dq_transfer(inode, &iattr) ? -EDQUOT : 0;
1349 if (bits & LA_ATIME)
1350 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1351 if (bits & LA_CTIME)
1352 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1353 if (bits & LA_MTIME)
1354 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1355 if (bits & LA_SIZE) {
1356 LDISKFS_I(inode)->i_disksize = attr->la_size;
1357 i_size_write(inode, attr->la_size);
1361 /* OSD should not change "i_blocks" which is used by quota.
1362 * "i_blocks" should be changed by ldiskfs only. */
1363 if (bits & LA_BLOCKS)
1364 inode->i_blocks = attr->la_blocks;
1367 inode->i_mode = (inode->i_mode & S_IFMT) |
1368 (attr->la_mode & ~S_IFMT);
1370 inode->i_uid = attr->la_uid;
1372 inode->i_gid = attr->la_gid;
1373 if (bits & LA_NLINK)
1374 inode->i_nlink = attr->la_nlink;
1376 inode->i_rdev = attr->la_rdev;
1378 if (bits & LA_FLAGS) {
1379 /* always keep S_NOCMTIME */
1380 inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
1386 static int osd_attr_set(const struct lu_env *env,
1387 struct dt_object *dt,
1388 const struct lu_attr *attr,
1389 struct thandle *handle,
1390 struct lustre_capa *capa)
1392 struct osd_object *obj = osd_dt_obj(dt);
1395 LASSERT(handle != NULL);
1396 LASSERT(dt_object_exists(dt));
1397 LASSERT(osd_invariant(obj));
1399 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1402 OSD_EXEC_OP(handle, attr_set);
1404 cfs_spin_lock(&obj->oo_guard);
1405 rc = osd_inode_setattr(env, obj->oo_inode, attr);
1406 cfs_spin_unlock(&obj->oo_guard);
1409 obj->oo_inode->i_sb->s_op->dirty_inode(obj->oo_inode);
1416 * XXX temporary solution.
1418 static int osd_create_pre(struct osd_thread_info *info, struct osd_object *obj,
1419 struct lu_attr *attr, struct thandle *th)
1424 static int osd_create_post(struct osd_thread_info *info, struct osd_object *obj,
1425 struct lu_attr *attr, struct thandle *th)
1427 osd_object_init0(obj);
1428 if (obj->oo_inode && (obj->oo_inode->i_state & I_NEW))
1429 unlock_new_inode(obj->oo_inode);
1433 struct dentry *osd_child_dentry_get(const struct lu_env *env,
1434 struct osd_object *obj,
1435 const char *name, const int namelen)
1437 return osd_child_dentry_by_inode(env, obj->oo_inode, name, namelen);
1440 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1442 struct dt_allocation_hint *hint,
1446 struct osd_device *osd = osd_obj2dev(obj);
1447 struct osd_thandle *oth;
1448 struct dt_object *parent = NULL;
1449 struct inode *inode;
1450 #ifdef HAVE_QUOTA_SUPPORT
1451 struct osd_ctxt *save = &info->oti_ctxt;
1454 LINVRNT(osd_invariant(obj));
1455 LASSERT(obj->oo_inode == NULL);
1456 LASSERT(obj->oo_hl_head == NULL);
1458 if (S_ISDIR(mode) && ldiskfs_pdo) {
1459 obj->oo_hl_head =ldiskfs_htree_lock_head_alloc(HTREE_HBITS_DEF);
1460 if (obj->oo_hl_head == NULL)
1464 oth = container_of(th, struct osd_thandle, ot_super);
1465 LASSERT(oth->ot_handle->h_transaction != NULL);
1467 if (hint && hint->dah_parent)
1468 parent = hint->dah_parent;
1470 #ifdef HAVE_QUOTA_SUPPORT
1471 osd_push_ctxt(info->oti_env, save);
1473 inode = ldiskfs_create_inode(oth->ot_handle,
1474 parent ? osd_dt_obj(parent)->oo_inode :
1475 osd_sb(osd)->s_root->d_inode,
1477 #ifdef HAVE_QUOTA_SUPPORT
1480 if (!IS_ERR(inode)) {
1481 /* Do not update file c/mtime in ldiskfs.
1482 * NB: don't need any lock because no contention at this
1484 inode->i_flags |= S_NOCMTIME;
1485 obj->oo_inode = inode;
1488 if (obj->oo_hl_head != NULL) {
1489 ldiskfs_htree_lock_head_free(obj->oo_hl_head);
1490 obj->oo_hl_head = NULL;
1492 result = PTR_ERR(inode);
1494 LINVRNT(osd_invariant(obj));
1502 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1503 struct lu_attr *attr,
1504 struct dt_allocation_hint *hint,
1505 struct dt_object_format *dof,
1509 struct osd_thandle *oth;
1510 struct osd_device *osd = osd_obj2dev(obj);
1511 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1513 LASSERT(S_ISDIR(attr->la_mode));
1515 oth = container_of(th, struct osd_thandle, ot_super);
1516 LASSERT(oth->ot_handle->h_transaction != NULL);
1517 result = osd_mkfile(info, obj, mode, hint, th);
1518 if (result == 0 && osd->od_iop_mode == 0) {
1519 LASSERT(obj->oo_inode != NULL);
1521 * XXX uh-oh... call low-level iam function directly.
1524 result = iam_lvar_create(obj->oo_inode, OSD_NAME_LEN, 4,
1525 sizeof (struct osd_fid_pack),
1531 static int osd_mk_index(struct osd_thread_info *info, struct osd_object *obj,
1532 struct lu_attr *attr,
1533 struct dt_allocation_hint *hint,
1534 struct dt_object_format *dof,
1538 struct osd_thandle *oth;
1539 const struct dt_index_features *feat = dof->u.dof_idx.di_feat;
1541 __u32 mode = (attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX));
1543 LASSERT(S_ISREG(attr->la_mode));
1545 oth = container_of(th, struct osd_thandle, ot_super);
1546 LASSERT(oth->ot_handle->h_transaction != NULL);
1548 result = osd_mkfile(info, obj, mode, hint, th);
1550 LASSERT(obj->oo_inode != NULL);
1551 if (feat->dif_flags & DT_IND_VARKEY)
1552 result = iam_lvar_create(obj->oo_inode,
1553 feat->dif_keysize_max,
1555 feat->dif_recsize_max,
1558 result = iam_lfix_create(obj->oo_inode,
1559 feat->dif_keysize_max,
1561 feat->dif_recsize_max,
1568 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1569 struct lu_attr *attr,
1570 struct dt_allocation_hint *hint,
1571 struct dt_object_format *dof,
1574 LASSERT(S_ISREG(attr->la_mode));
1575 return osd_mkfile(info, obj, (attr->la_mode &
1576 (S_IFMT | S_IRWXUGO | S_ISVTX)), hint, th);
1579 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1580 struct lu_attr *attr,
1581 struct dt_allocation_hint *hint,
1582 struct dt_object_format *dof,
1585 LASSERT(S_ISLNK(attr->la_mode));
1586 return osd_mkfile(info, obj, (attr->la_mode &
1587 (S_IFMT | S_IRWXUGO | S_ISVTX)), hint, th);
1590 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1591 struct lu_attr *attr,
1592 struct dt_allocation_hint *hint,
1593 struct dt_object_format *dof,
1596 cfs_umode_t mode = attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX);
1599 LINVRNT(osd_invariant(obj));
1600 LASSERT(obj->oo_inode == NULL);
1601 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1602 S_ISFIFO(mode) || S_ISSOCK(mode));
1604 result = osd_mkfile(info, obj, mode, hint, th);
1606 LASSERT(obj->oo_inode != NULL);
1607 init_special_inode(obj->oo_inode, mode, attr->la_rdev);
1609 LINVRNT(osd_invariant(obj));
1613 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1615 struct dt_allocation_hint *hint,
1616 struct dt_object_format *dof,
1619 static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
1621 osd_obj_type_f result;
1637 result = osd_mk_index;
1648 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1649 struct dt_object *parent, cfs_umode_t child_mode)
1653 memset(ah, 0, sizeof(*ah));
1654 ah->dah_parent = parent;
1655 ah->dah_mode = child_mode;
1659 * Helper function for osd_object_create()
1661 * \retval 0, on success
1663 static int __osd_object_create(struct osd_thread_info *info,
1664 struct osd_object *obj, struct lu_attr *attr,
1665 struct dt_allocation_hint *hint,
1666 struct dt_object_format *dof,
1672 result = osd_create_pre(info, obj, attr, th);
1674 result = osd_create_type_f(dof->dof_type)(info, obj,
1675 attr, hint, dof, th);
1677 result = osd_create_post(info, obj, attr, th);
1683 * Helper function for osd_object_create()
1685 * \retval 0, on success
1687 static int __osd_oi_insert(const struct lu_env *env, struct osd_object *obj,
1688 const struct lu_fid *fid, struct thandle *th)
1690 struct osd_thread_info *info = osd_oti_get(env);
1691 struct osd_inode_id *id = &info->oti_id;
1692 struct osd_device *osd = osd_obj2dev(obj);
1693 struct md_ucred *uc = md_ucred(env);
1695 LASSERT(obj->oo_inode != NULL);
1696 LASSERT(uc != NULL);
1698 id->oii_ino = obj->oo_inode->i_ino;
1699 id->oii_gen = obj->oo_inode->i_generation;
1701 return osd_oi_insert(info, osd, fid, id, th,
1702 uc->mu_cap & CFS_CAP_SYS_RESOURCE_MASK);
1705 static int osd_declare_object_create(const struct lu_env *env,
1706 struct dt_object *dt,
1707 struct lu_attr *attr,
1708 struct dt_allocation_hint *hint,
1709 struct dt_object_format *dof,
1710 struct thandle *handle)
1712 struct osd_thandle *oh;
1714 LASSERT(handle != NULL);
1716 oh = container_of0(handle, struct osd_thandle, ot_super);
1717 LASSERT(oh->ot_handle == NULL);
1719 OSD_DECLARE_OP(oh, create);
1720 oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_CREATE];
1721 /* XXX: So far, only normal fid needs be inserted into the oi,
1722 * things could be changed later. Revise following code then. */
1723 if (fid_is_norm(lu_object_fid(&dt->do_lu))) {
1724 OSD_DECLARE_OP(oh, insert);
1725 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
1727 /* If this is directory, then we expect . and .. to be inserted as
1728 * well. The one directory block always needs to be created for the
1729 * directory, so we could use DTO_WRITE_BASE here (GDT, block bitmap,
1730 * block), there is no danger of needing a tree for the first block.
1732 if (attr && S_ISDIR(attr->la_mode)) {
1733 OSD_DECLARE_OP(oh, insert);
1734 OSD_DECLARE_OP(oh, insert);
1735 oh->ot_credits += osd_dto_credits_noquota[DTO_WRITE_BASE];
1739 osd_declare_qid(dt, oh, USRQUOTA, attr->la_uid, NULL);
1740 osd_declare_qid(dt, oh, GRPQUOTA, attr->la_gid, NULL);
1745 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1746 struct lu_attr *attr,
1747 struct dt_allocation_hint *hint,
1748 struct dt_object_format *dof,
1751 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1752 struct osd_object *obj = osd_dt_obj(dt);
1753 struct osd_thread_info *info = osd_oti_get(env);
1758 LINVRNT(osd_invariant(obj));
1759 LASSERT(!dt_object_exists(dt));
1760 LASSERT(osd_write_locked(env, obj));
1761 LASSERT(th != NULL);
1763 OSD_EXEC_OP(th, create);
1765 result = __osd_object_create(info, obj, attr, hint, dof, th);
1767 result = __osd_oi_insert(env, obj, fid, th);
1769 LASSERT(ergo(result == 0, dt_object_exists(dt)));
1770 LASSERT(osd_invariant(obj));
1775 * Called to destroy on-disk representation of the object
1777 * Concurrency: must be locked
1779 static int osd_declare_object_destroy(const struct lu_env *env,
1780 struct dt_object *dt,
1783 struct osd_object *obj = osd_dt_obj(dt);
1784 struct inode *inode = obj->oo_inode;
1785 struct osd_thandle *oh;
1789 oh = container_of0(th, struct osd_thandle, ot_super);
1790 LASSERT(oh->ot_handle == NULL);
1793 OSD_DECLARE_OP(oh, destroy);
1794 OSD_DECLARE_OP(oh, delete);
1795 oh->ot_credits += osd_dto_credits_noquota[DTO_OBJECT_DELETE];
1796 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
1798 osd_declare_qid(dt, oh, USRQUOTA, inode->i_uid, inode);
1799 osd_declare_qid(dt, oh, GRPQUOTA, inode->i_gid, inode);
1804 static int osd_object_destroy(const struct lu_env *env,
1805 struct dt_object *dt,
1808 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1809 struct osd_object *obj = osd_dt_obj(dt);
1810 struct inode *inode = obj->oo_inode;
1811 struct osd_device *osd = osd_obj2dev(obj);
1812 struct osd_thandle *oh;
1816 oh = container_of0(th, struct osd_thandle, ot_super);
1817 LASSERT(oh->ot_handle);
1819 LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
1821 if (S_ISDIR(inode->i_mode)) {
1822 LASSERT(osd_inode_unlinked(inode) ||
1823 inode->i_nlink == 1);
1824 cfs_spin_lock(&obj->oo_guard);
1826 cfs_spin_unlock(&obj->oo_guard);
1827 inode->i_sb->s_op->dirty_inode(inode);
1829 LASSERT(osd_inode_unlinked(inode));
1832 OSD_EXEC_OP(th, destroy);
1834 result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
1836 /* XXX: add to ext3 orphan list */
1837 /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
1839 /* not needed in the cache anymore */
1840 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
1846 * Helper function for osd_xattr_set()
1848 static int __osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
1849 const struct lu_buf *buf, const char *name, int fl)
1851 struct osd_object *obj = osd_dt_obj(dt);
1852 struct inode *inode = obj->oo_inode;
1853 struct osd_thread_info *info = osd_oti_get(env);
1854 struct dentry *dentry = &info->oti_child_dentry;
1858 LASSERT(dt_object_exists(dt));
1859 LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
1860 LASSERT(osd_write_locked(env, obj));
1862 if (fl & LU_XATTR_REPLACE)
1863 fs_flags |= XATTR_REPLACE;
1865 if (fl & LU_XATTR_CREATE)
1866 fs_flags |= XATTR_CREATE;
1868 dentry->d_inode = inode;
1869 rc = inode->i_op->setxattr(dentry, name, buf->lb_buf,
1870 buf->lb_len, fs_flags);
1875 * Put the fid into lustre_mdt_attrs, and then place the structure
1876 * inode's ea. This fid should not be altered during the life time
1879 * \retval +ve, on success
1880 * \retval -ve, on error
1882 * FIXME: It is good to have/use ldiskfs_xattr_set_handle() here
1884 static int osd_ea_fid_set(const struct lu_env *env, struct dt_object *dt,
1885 const struct lu_fid *fid)
1887 struct osd_thread_info *info = osd_oti_get(env);
1888 struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
1890 lustre_lma_init(mdt_attrs, fid);
1891 lustre_lma_swab(mdt_attrs);
1892 return __osd_xattr_set(env, dt,
1893 osd_buf_get(env, mdt_attrs, sizeof *mdt_attrs),
1894 XATTR_NAME_LMA, LU_XATTR_CREATE);
1899 * Helper function to form igif
1901 static inline void osd_igif_get(const struct lu_env *env, struct inode *inode,
1904 LU_IGIF_BUILD(fid, inode->i_ino, inode->i_generation);
1908 * ldiskfs supports fid in dirent, it is passed in dentry->d_fsdata.
1909 * lustre 1.8 also uses d_fsdata for passing other info to ldiskfs.
1910 * To have compatilibility with 1.8 ldiskfs driver we need to have
1911 * magic number at start of fid data.
1912 * \ldiskfs_dentry_param is used only to pass fid from osd to ldiskfs.
1915 void osd_get_ldiskfs_dirent_param(struct ldiskfs_dentry_param *param,
1916 const struct dt_rec *fid)
1918 param->edp_magic = LDISKFS_LUFID_MAGIC;
1919 param->edp_len = sizeof(struct lu_fid) + 1;
1921 fid_cpu_to_be((struct lu_fid *)param->edp_data,
1922 (struct lu_fid *)fid);
1926 * Try to read the fid from inode ea into dt_rec, if return value
1927 * i.e. rc is +ve, then we got fid, otherwise we will have to form igif
1929 * \param fid object fid.
1931 * \retval 0 on success
1933 static int osd_ea_fid_get(const struct lu_env *env, struct osd_object *obj,
1934 __u32 ino, struct lu_fid *fid)
1936 struct osd_thread_info *info = osd_oti_get(env);
1937 struct lustre_mdt_attrs *mdt_attrs = &info->oti_mdt_attrs;
1938 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
1939 struct dentry *dentry = &info->oti_child_dentry;
1940 struct osd_inode_id *id = &info->oti_id;
1941 struct osd_device *dev;
1942 struct inode *inode;
1946 dev = osd_dev(ldev);
1949 id->oii_gen = OSD_OII_NOGEN;
1951 inode = osd_iget(info, dev, id);
1952 if (IS_ERR(inode)) {
1953 rc = PTR_ERR(inode);
1956 dentry->d_inode = inode;
1958 LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
1959 rc = inode->i_op->getxattr(dentry, XATTR_NAME_LMA, (void *)mdt_attrs,
1962 /* Check LMA compatibility */
1964 (mdt_attrs->lma_incompat & ~cpu_to_le32(LMA_INCOMPAT_SUPP))) {
1965 CWARN("Inode %lx: Unsupported incompat LMA feature(s) %#x\n",
1966 inode->i_ino, le32_to_cpu(mdt_attrs->lma_incompat) &
1967 ~LMA_INCOMPAT_SUPP);
1972 lustre_lma_swab(mdt_attrs);
1973 memcpy(fid, &mdt_attrs->lma_self_fid, sizeof(*fid));
1975 } else if (rc == -ENODATA) {
1976 osd_igif_get(env, inode, fid);
1985 * OSD layer object create function for interoperability mode (b11826).
1986 * This is mostly similar to osd_object_create(). Only difference being, fid is
1987 * inserted into inode ea here.
1989 * \retval 0, on success
1990 * \retval -ve, on error
1992 static int osd_object_ea_create(const struct lu_env *env, struct dt_object *dt,
1993 struct lu_attr *attr,
1994 struct dt_allocation_hint *hint,
1995 struct dt_object_format *dof,
1998 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1999 struct osd_object *obj = osd_dt_obj(dt);
2000 struct osd_thread_info *info = osd_oti_get(env);
2005 LASSERT(osd_invariant(obj));
2006 LASSERT(!dt_object_exists(dt));
2007 LASSERT(osd_write_locked(env, obj));
2008 LASSERT(th != NULL);
2010 OSD_EXEC_OP(th, create);
2012 result = __osd_object_create(info, obj, attr, hint, dof, th);
2013 /* objects under osd root shld have igif fid, so dont add fid EA */
2014 if (result == 0 && fid_seq(fid) >= FID_SEQ_NORMAL)
2015 result = osd_ea_fid_set(env, dt, fid);
2018 result = __osd_oi_insert(env, obj, fid, th);
2020 LASSERT(ergo(result == 0, dt_object_exists(dt)));
2021 LINVRNT(osd_invariant(obj));
2025 static int osd_declare_object_ref_add(const struct lu_env *env,
2026 struct dt_object *dt,
2027 struct thandle *handle)
2029 struct osd_thandle *oh;
2031 /* it's possible that object doesn't exist yet */
2032 LASSERT(handle != NULL);
2034 oh = container_of0(handle, struct osd_thandle, ot_super);
2035 LASSERT(oh->ot_handle == NULL);
2037 OSD_DECLARE_OP(oh, ref_add);
2038 oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
2044 * Concurrency: @dt is write locked.
2046 static int osd_object_ref_add(const struct lu_env *env,
2047 struct dt_object *dt, struct thandle *th)
2049 struct osd_object *obj = osd_dt_obj(dt);
2050 struct inode *inode = obj->oo_inode;
2052 LINVRNT(osd_invariant(obj));
2053 LASSERT(dt_object_exists(dt));
2054 LASSERT(osd_write_locked(env, obj));
2055 LASSERT(th != NULL);
2057 OSD_EXEC_OP(th, ref_add);
2060 * DIR_NLINK feature is set for compatibility reasons if:
2061 * 1) nlinks > LDISKFS_LINK_MAX, or
2062 * 2) nlinks == 2, since this indicates i_nlink was previously 1.
2064 * It is easier to always set this flag (rather than check and set),
2065 * since it has less overhead, and the superblock will be dirtied
2066 * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
2067 * do not actually care whether this flag is set or not.
2069 cfs_spin_lock(&obj->oo_guard);
2071 if (S_ISDIR(inode->i_mode) && inode->i_nlink > 1) {
2072 if (inode->i_nlink >= LDISKFS_LINK_MAX ||
2073 inode->i_nlink == 2)
2076 LASSERT(inode->i_nlink < LDISKFS_LINK_MAX);
2077 cfs_spin_unlock(&obj->oo_guard);
2078 inode->i_sb->s_op->dirty_inode(inode);
2079 LINVRNT(osd_invariant(obj));
2084 static int osd_declare_object_ref_del(const struct lu_env *env,
2085 struct dt_object *dt,
2086 struct thandle *handle)
2088 struct osd_thandle *oh;
2090 LASSERT(dt_object_exists(dt));
2091 LASSERT(handle != NULL);
2093 oh = container_of0(handle, struct osd_thandle, ot_super);
2094 LASSERT(oh->ot_handle == NULL);
2096 OSD_DECLARE_OP(oh, ref_del);
2097 oh->ot_credits += osd_dto_credits_noquota[DTO_ATTR_SET_BASE];
2103 * Concurrency: @dt is write locked.
2105 static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
2108 struct osd_object *obj = osd_dt_obj(dt);
2109 struct inode *inode = obj->oo_inode;
2111 LINVRNT(osd_invariant(obj));
2112 LASSERT(dt_object_exists(dt));
2113 LASSERT(osd_write_locked(env, obj));
2114 LASSERT(th != NULL);
2116 OSD_EXEC_OP(th, ref_del);
2118 cfs_spin_lock(&obj->oo_guard);
2119 LASSERT(inode->i_nlink > 0);
2121 /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
2122 * then the nlink count is 1. Don't let it be set to 0 or the directory
2123 * inode will be deleted incorrectly. */
2124 if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
2126 cfs_spin_unlock(&obj->oo_guard);
2127 inode->i_sb->s_op->dirty_inode(inode);
2128 LINVRNT(osd_invariant(obj));
2134 * Get the 64-bit version for an inode.
2136 static int osd_object_version_get(const struct lu_env *env,
2137 struct dt_object *dt, dt_obj_version_t *ver)
2139 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2141 CDEBUG(D_INODE, "Get version "LPX64" for inode %lu\n",
2142 LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2143 *ver = LDISKFS_I(inode)->i_fs_version;
2148 * Concurrency: @dt is read locked.
2150 static int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
2151 struct lu_buf *buf, const char *name,
2152 struct lustre_capa *capa)
2154 struct osd_object *obj = osd_dt_obj(dt);
2155 struct inode *inode = obj->oo_inode;
2156 struct osd_thread_info *info = osd_oti_get(env);
2157 struct dentry *dentry = &info->oti_obj_dentry;
2159 /* version get is not real XATTR but uses xattr API */
2160 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2161 /* for version we are just using xattr API but change inode
2163 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2164 osd_object_version_get(env, dt, buf->lb_buf);
2165 return sizeof(dt_obj_version_t);
2168 LASSERT(dt_object_exists(dt));
2169 LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
2170 LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2172 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2175 dentry->d_inode = inode;
2176 return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
2180 static int osd_declare_xattr_set(const struct lu_env *env,
2181 struct dt_object *dt,
2182 const struct lu_buf *buf, const char *name,
2183 int fl, struct thandle *handle)
2185 struct osd_thandle *oh;
2187 LASSERT(handle != NULL);
2189 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2190 /* no credits for version */
2194 oh = container_of0(handle, struct osd_thandle, ot_super);
2195 LASSERT(oh->ot_handle == NULL);
2197 OSD_DECLARE_OP(oh, xattr_set);
2198 oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
2204 * Set the 64-bit version for object
2206 static void osd_object_version_set(const struct lu_env *env,
2207 struct dt_object *dt,
2208 dt_obj_version_t *new_version)
2210 struct inode *inode = osd_dt_obj(dt)->oo_inode;
2212 CDEBUG(D_INODE, "Set version "LPX64" (old "LPX64") for inode %lu\n",
2213 *new_version, LDISKFS_I(inode)->i_fs_version, inode->i_ino);
2215 LDISKFS_I(inode)->i_fs_version = *new_version;
2216 /** Version is set after all inode operations are finished,
2217 * so we should mark it dirty here */
2218 inode->i_sb->s_op->dirty_inode(inode);
2222 * Concurrency: @dt is write locked.
2224 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
2225 const struct lu_buf *buf, const char *name, int fl,
2226 struct thandle *handle, struct lustre_capa *capa)
2228 LASSERT(handle != NULL);
2230 /* version set is not real XATTR */
2231 if (strcmp(name, XATTR_NAME_VERSION) == 0) {
2232 /* for version we are just using xattr API but change inode
2234 LASSERT(buf->lb_len == sizeof(dt_obj_version_t));
2235 osd_object_version_set(env, dt, buf->lb_buf);
2236 return sizeof(dt_obj_version_t);
2239 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2242 OSD_EXEC_OP(handle, xattr_set);
2243 return __osd_xattr_set(env, dt, buf, name, fl);
2247 * Concurrency: @dt is read locked.
2249 static int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
2250 struct lu_buf *buf, struct lustre_capa *capa)
2252 struct osd_object *obj = osd_dt_obj(dt);
2253 struct inode *inode = obj->oo_inode;
2254 struct osd_thread_info *info = osd_oti_get(env);
2255 struct dentry *dentry = &info->oti_obj_dentry;
2257 LASSERT(dt_object_exists(dt));
2258 LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
2259 LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
2261 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
2264 dentry->d_inode = inode;
2265 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
2268 static int osd_declare_xattr_del(const struct lu_env *env,
2269 struct dt_object *dt, const char *name,
2270 struct thandle *handle)
2272 struct osd_thandle *oh;
2274 LASSERT(dt_object_exists(dt));
2275 LASSERT(handle != NULL);
2277 oh = container_of0(handle, struct osd_thandle, ot_super);
2278 LASSERT(oh->ot_handle == NULL);
2280 OSD_DECLARE_OP(oh, xattr_set);
2281 oh->ot_credits += osd_dto_credits_noquota[DTO_XATTR_SET];
2287 * Concurrency: @dt is write locked.
2289 static int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
2290 const char *name, struct thandle *handle,
2291 struct lustre_capa *capa)
2293 struct osd_object *obj = osd_dt_obj(dt);
2294 struct inode *inode = obj->oo_inode;
2295 struct osd_thread_info *info = osd_oti_get(env);
2296 struct dentry *dentry = &info->oti_obj_dentry;
2299 LASSERT(dt_object_exists(dt));
2300 LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
2301 LASSERT(osd_write_locked(env, obj));
2302 LASSERT(handle != NULL);
2304 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
2307 OSD_EXEC_OP(handle, xattr_set);
2309 dentry->d_inode = inode;
2310 rc = inode->i_op->removexattr(dentry, name);
2314 static struct obd_capa *osd_capa_get(const struct lu_env *env,
2315 struct dt_object *dt,
2316 struct lustre_capa *old,
2319 struct osd_thread_info *info = osd_oti_get(env);
2320 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2321 struct osd_object *obj = osd_dt_obj(dt);
2322 struct osd_device *dev = osd_obj2dev(obj);
2323 struct lustre_capa_key *key = &info->oti_capa_key;
2324 struct lustre_capa *capa = &info->oti_capa;
2325 struct obd_capa *oc;
2326 struct md_capainfo *ci;
2330 if (!dev->od_fl_capa)
2331 RETURN(ERR_PTR(-ENOENT));
2333 LASSERT(dt_object_exists(dt));
2334 LINVRNT(osd_invariant(obj));
2336 /* renewal sanity check */
2337 if (old && osd_object_auth(env, dt, old, opc))
2338 RETURN(ERR_PTR(-EACCES));
2340 ci = md_capainfo(env);
2342 RETURN(ERR_PTR(-ENOENT));
2344 switch (ci->mc_auth) {
2348 capa->lc_uid = obj->oo_inode->i_uid;
2349 capa->lc_gid = obj->oo_inode->i_gid;
2350 capa->lc_flags = LC_ID_PLAIN;
2352 case LC_ID_CONVERT: {
2355 s[0] = obj->oo_inode->i_uid;
2356 cfs_get_random_bytes(&(s[1]), sizeof(__u32));
2357 s[2] = obj->oo_inode->i_gid;
2358 cfs_get_random_bytes(&(s[3]), sizeof(__u32));
2359 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
2361 RETURN(ERR_PTR(rc));
2363 capa->lc_uid = ((__u64)d[1] << 32) | d[0];
2364 capa->lc_gid = ((__u64)d[3] << 32) | d[2];
2365 capa->lc_flags = LC_ID_CONVERT;
2369 RETURN(ERR_PTR(-EINVAL));
2372 capa->lc_fid = *fid;
2374 capa->lc_flags |= dev->od_capa_alg << 24;
2375 capa->lc_timeout = dev->od_capa_timeout;
2376 capa->lc_expiry = 0;
2378 oc = capa_lookup(dev->od_capa_hash, capa, 1);
2380 LASSERT(!capa_is_expired(oc));
2384 cfs_spin_lock(&capa_lock);
2385 *key = dev->od_capa_keys[1];
2386 cfs_spin_unlock(&capa_lock);
2388 capa->lc_keyid = key->lk_keyid;
2389 capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
2391 rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
2393 DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
2394 RETURN(ERR_PTR(rc));
2397 oc = capa_add(dev->od_capa_hash, capa);
2401 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
2403 struct osd_object *obj = osd_dt_obj(dt);
2404 struct inode *inode = obj->oo_inode;
2405 struct osd_thread_info *info = osd_oti_get(env);
2406 struct dentry *dentry = &info->oti_obj_dentry;
2407 struct file *file = &info->oti_file;
2412 dentry->d_inode = inode;
2413 file->f_dentry = dentry;
2414 file->f_mapping = inode->i_mapping;
2415 file->f_op = inode->i_fop;
2416 LOCK_INODE_MUTEX(inode);
2417 rc = file->f_op->fsync(file, dentry, 0);
2418 UNLOCK_INODE_MUTEX(inode);
2422 static int osd_data_get(const struct lu_env *env, struct dt_object *dt,
2425 struct osd_object *obj = osd_dt_obj(dt);
2428 *data = (void *)obj->oo_inode;
2436 static int osd_iam_index_probe(const struct lu_env *env, struct osd_object *o,
2437 const struct dt_index_features *feat)
2439 struct iam_descr *descr;
2441 if (osd_object_is_root(o))
2442 return feat == &dt_directory_features;
2444 LASSERT(o->oo_dir != NULL);
2446 descr = o->oo_dir->od_container.ic_descr;
2447 if (feat == &dt_directory_features) {
2448 if (descr->id_rec_size == sizeof(struct osd_fid_pack))
2454 feat->dif_keysize_min <= descr->id_key_size &&
2455 descr->id_key_size <= feat->dif_keysize_max &&
2456 feat->dif_recsize_min <= descr->id_rec_size &&
2457 descr->id_rec_size <= feat->dif_recsize_max &&
2458 !(feat->dif_flags & (DT_IND_VARKEY |
2459 DT_IND_VARREC | DT_IND_NONUNQ)) &&
2460 ergo(feat->dif_flags & DT_IND_UPDATE,
2461 1 /* XXX check that object (and file system) is
2466 static int osd_iam_container_init(const struct lu_env *env,
2467 struct osd_object *obj,
2468 struct osd_directory *dir)
2470 struct iam_container *bag = &dir->od_container;
2473 result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
2477 result = iam_container_setup(bag);
2481 if (osd_obj2dev(obj)->od_iop_mode) {
2482 u32 ptr = bag->ic_descr->id_ops->id_root_ptr(bag);
2484 bag->ic_root_bh = ldiskfs_bread(NULL, obj->oo_inode,
2490 obj->oo_dt.do_index_ops = &osd_index_iam_ops;
2492 iam_container_fini(bag);
2499 * Concurrency: no external locking is necessary.
2501 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
2502 const struct dt_index_features *feat)
2506 struct osd_object *obj = osd_dt_obj(dt);
2507 struct osd_device *osd = osd_obj2dev(obj);
2509 LINVRNT(osd_invariant(obj));
2510 LASSERT(dt_object_exists(dt));
2512 if (osd_object_is_root(obj)) {
2513 dt->do_index_ops = &osd_index_ea_ops;
2515 } else if (feat == &dt_directory_features && osd->od_iop_mode) {
2516 dt->do_index_ops = &osd_index_ea_ops;
2517 if (S_ISDIR(obj->oo_inode->i_mode))
2522 } else if (!osd_has_index(obj)) {
2523 struct osd_directory *dir;
2528 cfs_spin_lock(&obj->oo_guard);
2529 if (obj->oo_dir == NULL)
2533 * Concurrent thread allocated container data.
2536 cfs_spin_unlock(&obj->oo_guard);
2538 * Now, that we have container data, serialize its
2541 cfs_down_write(&obj->oo_ext_idx_sem);
2543 * recheck under lock.
2545 if (!osd_has_index(obj))
2546 result = osd_iam_container_init(env, obj, dir);
2549 cfs_up_write(&obj->oo_ext_idx_sem);
2557 if (result == 0 && ea_dir == 0) {
2558 if (!osd_iam_index_probe(env, obj, feat))
2561 LINVRNT(osd_invariant(obj));
2566 static const struct dt_object_operations osd_obj_ops = {
2567 .do_read_lock = osd_object_read_lock,
2568 .do_write_lock = osd_object_write_lock,
2569 .do_read_unlock = osd_object_read_unlock,
2570 .do_write_unlock = osd_object_write_unlock,
2571 .do_write_locked = osd_object_write_locked,
2572 .do_attr_get = osd_attr_get,
2573 .do_declare_attr_set = osd_declare_attr_set,
2574 .do_attr_set = osd_attr_set,
2575 .do_ah_init = osd_ah_init,
2576 .do_declare_create = osd_declare_object_create,
2577 .do_create = osd_object_create,
2578 .do_declare_destroy = osd_declare_object_destroy,
2579 .do_destroy = osd_object_destroy,
2580 .do_index_try = osd_index_try,
2581 .do_declare_ref_add = osd_declare_object_ref_add,
2582 .do_ref_add = osd_object_ref_add,
2583 .do_declare_ref_del = osd_declare_object_ref_del,
2584 .do_ref_del = osd_object_ref_del,
2585 .do_xattr_get = osd_xattr_get,
2586 .do_declare_xattr_set = osd_declare_xattr_set,
2587 .do_xattr_set = osd_xattr_set,
2588 .do_declare_xattr_del = osd_declare_xattr_del,
2589 .do_xattr_del = osd_xattr_del,
2590 .do_xattr_list = osd_xattr_list,
2591 .do_capa_get = osd_capa_get,
2592 .do_object_sync = osd_object_sync,
2593 .do_data_get = osd_data_get,
2597 * dt_object_operations for interoperability mode
2598 * (i.e. to run 2.0 mds on 1.8 disk) (b11826)
2600 static const struct dt_object_operations osd_obj_ea_ops = {
2601 .do_read_lock = osd_object_read_lock,
2602 .do_write_lock = osd_object_write_lock,
2603 .do_read_unlock = osd_object_read_unlock,
2604 .do_write_unlock = osd_object_write_unlock,
2605 .do_write_locked = osd_object_write_locked,
2606 .do_attr_get = osd_attr_get,
2607 .do_declare_attr_set = osd_declare_attr_set,
2608 .do_attr_set = osd_attr_set,
2609 .do_ah_init = osd_ah_init,
2610 .do_declare_create = osd_declare_object_create,
2611 .do_create = osd_object_ea_create,
2612 .do_declare_destroy = osd_declare_object_destroy,
2613 .do_destroy = osd_object_destroy,
2614 .do_index_try = osd_index_try,
2615 .do_declare_ref_add = osd_declare_object_ref_add,
2616 .do_ref_add = osd_object_ref_add,
2617 .do_declare_ref_del = osd_declare_object_ref_del,
2618 .do_ref_del = osd_object_ref_del,
2619 .do_xattr_get = osd_xattr_get,
2620 .do_declare_xattr_set = osd_declare_xattr_set,
2621 .do_xattr_set = osd_xattr_set,
2622 .do_declare_xattr_del = osd_declare_xattr_del,
2623 .do_xattr_del = osd_xattr_del,
2624 .do_xattr_list = osd_xattr_list,
2625 .do_capa_get = osd_capa_get,
2626 .do_object_sync = osd_object_sync,
2627 .do_data_get = osd_data_get,
2630 static int osd_index_declare_iam_delete(const struct lu_env *env,
2631 struct dt_object *dt,
2632 const struct dt_key *key,
2633 struct thandle *handle)
2635 struct osd_thandle *oh;
2637 oh = container_of0(handle, struct osd_thandle, ot_super);
2638 LASSERT(oh->ot_handle == NULL);
2640 OSD_DECLARE_OP(oh, delete);
2641 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
2647 * delete a (key, value) pair from index \a dt specified by \a key
2649 * \param dt osd index object
2650 * \param key key for index
2651 * \param rec record reference
2652 * \param handle transaction handler
2655 * \retval -ve failure
2658 static int osd_index_iam_delete(const struct lu_env *env, struct dt_object *dt,
2659 const struct dt_key *key,
2660 struct thandle *handle,
2661 struct lustre_capa *capa)
2663 struct osd_object *obj = osd_dt_obj(dt);
2664 struct osd_thandle *oh;
2665 struct iam_path_descr *ipd;
2666 struct iam_container *bag = &obj->oo_dir->od_container;
2671 LINVRNT(osd_invariant(obj));
2672 LASSERT(dt_object_exists(dt));
2673 LASSERT(bag->ic_object == obj->oo_inode);
2674 LASSERT(handle != NULL);
2676 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2679 OSD_EXEC_OP(handle, delete);
2681 ipd = osd_idx_ipd_get(env, bag);
2682 if (unlikely(ipd == NULL))
2685 oh = container_of0(handle, struct osd_thandle, ot_super);
2686 LASSERT(oh->ot_handle != NULL);
2687 LASSERT(oh->ot_handle->h_transaction != NULL);
2689 rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
2690 osd_ipd_put(env, bag, ipd);
2691 LINVRNT(osd_invariant(obj));
2695 static int osd_index_declare_ea_delete(const struct lu_env *env,
2696 struct dt_object *dt,
2697 const struct dt_key *key,
2698 struct thandle *handle)
2700 struct osd_thandle *oh;
2702 LASSERT(dt_object_exists(dt));
2703 LASSERT(handle != NULL);
2705 oh = container_of0(handle, struct osd_thandle, ot_super);
2706 LASSERT(oh->ot_handle == NULL);
2708 OSD_DECLARE_OP(oh, delete);
2709 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_DELETE];
2711 LASSERT(osd_dt_obj(dt)->oo_inode);
2712 osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
2713 osd_dt_obj(dt)->oo_inode);
2714 osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
2715 osd_dt_obj(dt)->oo_inode);
2720 static inline int osd_get_fid_from_dentry(struct ldiskfs_dir_entry_2 *de,
2723 struct osd_fid_pack *rec;
2726 if (de->file_type & LDISKFS_DIRENT_LUFID) {
2727 rec = (struct osd_fid_pack *) (de->name + de->name_len + 1);
2728 rc = osd_fid_unpack((struct lu_fid *)fid, rec);
2734 * Index delete function for interoperability mode (b11826).
2735 * It will remove the directory entry added by osd_index_ea_insert().
2736 * This entry is needed to maintain name->fid mapping.
2738 * \param key, key i.e. file entry to be deleted
2740 * \retval 0, on success
2741 * \retval -ve, on error
2743 static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
2744 const struct dt_key *key,
2745 struct thandle *handle,
2746 struct lustre_capa *capa)
2748 struct osd_object *obj = osd_dt_obj(dt);
2749 struct inode *dir = obj->oo_inode;
2750 struct dentry *dentry;
2751 struct osd_thandle *oh;
2752 struct ldiskfs_dir_entry_2 *de;
2753 struct buffer_head *bh;
2754 struct htree_lock *hlock = NULL;
2759 LINVRNT(osd_invariant(obj));
2760 LASSERT(dt_object_exists(dt));
2761 LASSERT(handle != NULL);
2763 OSD_EXEC_OP(handle, delete);
2765 oh = container_of(handle, struct osd_thandle, ot_super);
2766 LASSERT(oh->ot_handle != NULL);
2767 LASSERT(oh->ot_handle->h_transaction != NULL);
2769 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2772 dentry = osd_child_dentry_get(env, obj,
2773 (char *)key, strlen((char *)key));
2775 if (obj->oo_hl_head != NULL) {
2776 hlock = osd_oti_get(env)->oti_hlock;
2777 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
2778 dir, LDISKFS_HLOCK_DEL);
2780 cfs_down_write(&obj->oo_ext_idx_sem);
2783 bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
2785 rc = ldiskfs_delete_entry(oh->ot_handle,
2792 ldiskfs_htree_unlock(hlock);
2794 cfs_up_write(&obj->oo_ext_idx_sem);
2796 LASSERT(osd_invariant(obj));
2801 * Lookup index for \a key and copy record to \a rec.
2803 * \param dt osd index object
2804 * \param key key for index
2805 * \param rec record reference
2807 * \retval +ve success : exact mach
2808 * \retval 0 return record with key not greater than \a key
2809 * \retval -ve failure
2811 static int osd_index_iam_lookup(const struct lu_env *env, struct dt_object *dt,
2812 struct dt_rec *rec, const struct dt_key *key,
2813 struct lustre_capa *capa)
2815 struct osd_object *obj = osd_dt_obj(dt);
2816 struct iam_path_descr *ipd;
2817 struct iam_container *bag = &obj->oo_dir->od_container;
2818 struct osd_thread_info *oti = osd_oti_get(env);
2819 struct iam_iterator *it = &oti->oti_idx_it;
2820 struct iam_rec *iam_rec;
2825 LASSERT(osd_invariant(obj));
2826 LASSERT(dt_object_exists(dt));
2827 LASSERT(bag->ic_object == obj->oo_inode);
2829 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
2832 ipd = osd_idx_ipd_get(env, bag);
2836 /* got ipd now we can start iterator. */
2837 iam_it_init(it, bag, 0, ipd);
2839 rc = iam_it_get(it, (struct iam_key *)key);
2841 if (S_ISDIR(obj->oo_inode->i_mode))
2842 iam_rec = (struct iam_rec *)oti->oti_ldp;
2844 iam_rec = (struct iam_rec *) rec;
2846 iam_reccpy(&it->ii_path.ip_leaf, (struct iam_rec *)iam_rec);
2847 if (S_ISDIR(obj->oo_inode->i_mode))
2848 osd_fid_unpack((struct lu_fid *) rec,
2849 (struct osd_fid_pack *)iam_rec);
2853 osd_ipd_put(env, bag, ipd);
2855 LINVRNT(osd_invariant(obj));
2860 static int osd_index_declare_iam_insert(const struct lu_env *env,
2861 struct dt_object *dt,
2862 const struct dt_rec *rec,
2863 const struct dt_key *key,
2864 struct thandle *handle)
2866 struct osd_thandle *oh;
2868 LASSERT(dt_object_exists(dt));
2869 LASSERT(handle != NULL);
2871 oh = container_of0(handle, struct osd_thandle, ot_super);
2872 LASSERT(oh->ot_handle == NULL);
2874 OSD_DECLARE_OP(oh, insert);
2875 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
2881 * Inserts (key, value) pair in \a dt index object.
2883 * \param dt osd index object
2884 * \param key key for index
2885 * \param rec record reference
2886 * \param th transaction handler
2889 * \retval -ve failure
2891 static int osd_index_iam_insert(const struct lu_env *env, struct dt_object *dt,
2892 const struct dt_rec *rec,
2893 const struct dt_key *key, struct thandle *th,
2894 struct lustre_capa *capa, int ignore_quota)
2896 struct osd_object *obj = osd_dt_obj(dt);
2897 struct iam_path_descr *ipd;
2898 struct osd_thandle *oh;
2899 struct iam_container *bag = &obj->oo_dir->od_container;
2900 #ifdef HAVE_QUOTA_SUPPORT
2901 cfs_cap_t save = cfs_curproc_cap_pack();
2903 struct osd_thread_info *oti = osd_oti_get(env);
2904 struct iam_rec *iam_rec = (struct iam_rec *)oti->oti_ldp;
2909 LINVRNT(osd_invariant(obj));
2910 LASSERT(dt_object_exists(dt));
2911 LASSERT(bag->ic_object == obj->oo_inode);
2912 LASSERT(th != NULL);
2914 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
2917 OSD_EXEC_OP(th, insert);
2919 ipd = osd_idx_ipd_get(env, bag);
2920 if (unlikely(ipd == NULL))
2923 oh = container_of0(th, struct osd_thandle, ot_super);
2924 LASSERT(oh->ot_handle != NULL);
2925 LASSERT(oh->ot_handle->h_transaction != NULL);
2926 #ifdef HAVE_QUOTA_SUPPORT
2928 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
2930 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
2932 if (S_ISDIR(obj->oo_inode->i_mode))
2933 osd_fid_pack((struct osd_fid_pack *)iam_rec, rec, &oti->oti_fid);
2935 iam_rec = (struct iam_rec *) rec;
2936 rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
2938 #ifdef HAVE_QUOTA_SUPPORT
2939 cfs_curproc_cap_unpack(save);
2941 osd_ipd_put(env, bag, ipd);
2942 LINVRNT(osd_invariant(obj));
2947 * Calls ldiskfs_add_entry() to add directory entry
2948 * into the directory. This is required for
2949 * interoperability mode (b11826)
2951 * \retval 0, on success
2952 * \retval -ve, on error
2954 static int __osd_ea_add_rec(struct osd_thread_info *info,
2955 struct osd_object *pobj, struct inode *cinode,
2956 const char *name, const struct dt_rec *fid,
2957 struct htree_lock *hlock, struct thandle *th)
2959 struct ldiskfs_dentry_param *ldp;
2960 struct dentry *child;
2961 struct osd_thandle *oth;
2964 oth = container_of(th, struct osd_thandle, ot_super);
2965 LASSERT(oth->ot_handle != NULL);
2966 LASSERT(oth->ot_handle->h_transaction != NULL);
2968 child = osd_child_dentry_get(info->oti_env, pobj, name, strlen(name));
2970 /* XXX: remove fid_is_igif() check here.
2971 * IGIF check is just to handle insertion of .. when it is 'ROOT',
2972 * it is IGIF now but needs FID in dir entry as well for readdir
2974 * LU-838 should fix that and remove fid_is_igif() check */
2975 if (fid_is_igif((struct lu_fid *)fid) ||
2976 fid_is_norm((struct lu_fid *)fid)) {
2977 ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
2978 osd_get_ldiskfs_dirent_param(ldp, fid);
2979 child->d_fsdata = (void *)ldp;
2981 child->d_fsdata = NULL;
2983 rc = osd_ldiskfs_add_entry(oth->ot_handle, child, cinode, hlock);
2989 * Calls ldiskfs_add_dot_dotdot() to add dot and dotdot entries
2990 * into the directory.Also sets flags into osd object to
2991 * indicate dot and dotdot are created. This is required for
2992 * interoperability mode (b11826)
2994 * \param dir directory for dot and dotdot fixup.
2995 * \param obj child object for linking
2997 * \retval 0, on success
2998 * \retval -ve, on error
3000 static int osd_add_dot_dotdot(struct osd_thread_info *info,
3001 struct osd_object *dir,
3002 struct inode *parent_dir, const char *name,
3003 const struct dt_rec *dot_fid,
3004 const struct dt_rec *dot_dot_fid,
3007 struct inode *inode = dir->oo_inode;
3008 struct ldiskfs_dentry_param *dot_ldp;
3009 struct ldiskfs_dentry_param *dot_dot_ldp;
3010 struct osd_thandle *oth;
3013 oth = container_of(th, struct osd_thandle, ot_super);
3014 LASSERT(oth->ot_handle->h_transaction != NULL);
3015 LASSERT(S_ISDIR(dir->oo_inode->i_mode));
3017 if (strcmp(name, dot) == 0) {
3018 if (dir->oo_compat_dot_created) {
3021 LASSERT(inode == parent_dir);
3022 dir->oo_compat_dot_created = 1;
3025 } else if(strcmp(name, dotdot) == 0) {
3026 dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp;
3027 dot_dot_ldp = (struct ldiskfs_dentry_param *)info->oti_ldp2;
3029 if (!dir->oo_compat_dot_created)
3031 if (!fid_is_igif((struct lu_fid *)dot_fid)) {
3032 osd_get_ldiskfs_dirent_param(dot_ldp, dot_fid);
3033 osd_get_ldiskfs_dirent_param(dot_dot_ldp, dot_dot_fid);
3038 /* in case of rename, dotdot is already created */
3039 if (dir->oo_compat_dotdot_created) {
3040 return __osd_ea_add_rec(info, dir, parent_dir, name,
3041 dot_dot_fid, NULL, th);
3044 result = ldiskfs_add_dot_dotdot(oth->ot_handle, parent_dir,
3045 inode, dot_ldp, dot_dot_ldp);
3047 dir->oo_compat_dotdot_created = 1;
3055 * It will call the appropriate osd_add* function and return the
3056 * value, return by respective functions.
3058 static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
3059 struct inode *cinode, const char *name,
3060 const struct dt_rec *fid, struct thandle *th)
3062 struct osd_thread_info *info = osd_oti_get(env);
3063 struct htree_lock *hlock;
3066 hlock = pobj->oo_hl_head != NULL ? info->oti_hlock : NULL;
3068 if (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' &&
3070 if (hlock != NULL) {
3071 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3074 cfs_down_write(&pobj->oo_ext_idx_sem);
3076 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
3077 (struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
3080 if (hlock != NULL) {
3081 ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
3082 pobj->oo_inode, LDISKFS_HLOCK_ADD);
3084 cfs_down_write(&pobj->oo_ext_idx_sem);
3087 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
3091 ldiskfs_htree_unlock(hlock);
3093 cfs_up_write(&pobj->oo_ext_idx_sem);
3099 * Calls ->lookup() to find dentry. From dentry get inode and
3100 * read inode's ea to get fid. This is required for interoperability
3103 * \retval 0, on success
3104 * \retval -ve, on error
3106 static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
3107 struct dt_rec *rec, const struct dt_key *key)
3109 struct inode *dir = obj->oo_inode;
3110 struct dentry *dentry;
3111 struct ldiskfs_dir_entry_2 *de;
3112 struct buffer_head *bh;
3113 struct lu_fid *fid = (struct lu_fid *) rec;
3114 struct htree_lock *hlock = NULL;
3118 LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
3120 dentry = osd_child_dentry_get(env, obj,
3121 (char *)key, strlen((char *)key));
3123 if (obj->oo_hl_head != NULL) {
3124 hlock = osd_oti_get(env)->oti_hlock;
3125 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3126 dir, LDISKFS_HLOCK_LOOKUP);
3128 cfs_down_read(&obj->oo_ext_idx_sem);
3131 bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
3133 ino = le32_to_cpu(de->inode);
3134 rc = osd_get_fid_from_dentry(de, rec);
3136 /* done with de, release bh */
3139 rc = osd_ea_fid_get(env, obj, ino, fid);
3145 ldiskfs_htree_unlock(hlock);
3147 cfs_up_read(&obj->oo_ext_idx_sem);
3152 * Find the osd object for given fid.
3154 * \param fid need to find the osd object having this fid
3156 * \retval osd_object on success
3157 * \retval -ve on error
3159 struct osd_object *osd_object_find(const struct lu_env *env,
3160 struct dt_object *dt,
3161 const struct lu_fid *fid)
3163 struct lu_device *ludev = dt->do_lu.lo_dev;
3164 struct osd_object *child = NULL;
3165 struct lu_object *luch;
3166 struct lu_object *lo;
3168 luch = lu_object_find(env, ludev, fid, NULL);
3169 if (!IS_ERR(luch)) {
3170 if (lu_object_exists(luch)) {
3171 lo = lu_object_locate(luch->lo_header, ludev->ld_type);
3173 child = osd_obj(lo);
3175 LU_OBJECT_DEBUG(D_ERROR, env, luch,
3176 "lu_object can't be located"
3177 ""DFID"\n", PFID(fid));
3179 if (child == NULL) {
3180 lu_object_put(env, luch);
3181 CERROR("Unable to get osd_object\n");
3182 child = ERR_PTR(-ENOENT);
3185 LU_OBJECT_DEBUG(D_ERROR, env, luch,
3186 "lu_object does not exists "DFID"\n",
3188 child = ERR_PTR(-ENOENT);
3191 child = (void *)luch;
3197 * Put the osd object once done with it.
3199 * \param obj osd object that needs to be put
3201 static inline void osd_object_put(const struct lu_env *env,
3202 struct osd_object *obj)
3204 lu_object_put(env, &obj->oo_dt.do_lu);
3207 static int osd_index_declare_ea_insert(const struct lu_env *env,
3208 struct dt_object *dt,
3209 const struct dt_rec *rec,
3210 const struct dt_key *key,
3211 struct thandle *handle)
3213 struct osd_thandle *oh;
3215 LASSERT(dt_object_exists(dt));
3216 LASSERT(handle != NULL);
3218 oh = container_of0(handle, struct osd_thandle, ot_super);
3219 LASSERT(oh->ot_handle == NULL);
3221 OSD_DECLARE_OP(oh, insert);
3222 oh->ot_credits += osd_dto_credits_noquota[DTO_INDEX_INSERT];
3224 LASSERT(osd_dt_obj(dt)->oo_inode);
3225 osd_declare_qid(dt, oh, USRQUOTA, osd_dt_obj(dt)->oo_inode->i_uid,
3226 osd_dt_obj(dt)->oo_inode);
3227 osd_declare_qid(dt, oh, GRPQUOTA, osd_dt_obj(dt)->oo_inode->i_gid,
3228 osd_dt_obj(dt)->oo_inode);
3234 * Index add function for interoperability mode (b11826).
3235 * It will add the directory entry.This entry is needed to
3236 * maintain name->fid mapping.
3238 * \param key it is key i.e. file entry to be inserted
3239 * \param rec it is value of given key i.e. fid
3241 * \retval 0, on success
3242 * \retval -ve, on error
3244 static int osd_index_ea_insert(const struct lu_env *env, struct dt_object *dt,
3245 const struct dt_rec *rec,
3246 const struct dt_key *key, struct thandle *th,
3247 struct lustre_capa *capa, int ignore_quota)
3249 struct osd_object *obj = osd_dt_obj(dt);
3250 struct lu_fid *fid = (struct lu_fid *) rec;
3251 const char *name = (const char *)key;
3252 struct osd_object *child;
3253 #ifdef HAVE_QUOTA_SUPPORT
3254 cfs_cap_t save = cfs_curproc_cap_pack();
3260 LASSERT(osd_invariant(obj));
3261 LASSERT(dt_object_exists(dt));
3262 LASSERT(th != NULL);
3264 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
3267 child = osd_object_find(env, dt, fid);
3268 if (!IS_ERR(child)) {
3269 #ifdef HAVE_QUOTA_SUPPORT
3271 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
3273 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
3275 rc = osd_ea_add_rec(env, obj, child->oo_inode, name, rec, th);
3276 #ifdef HAVE_QUOTA_SUPPORT
3277 cfs_curproc_cap_unpack(save);
3279 osd_object_put(env, child);
3281 rc = PTR_ERR(child);
3284 LASSERT(osd_invariant(obj));
3289 * Initialize osd Iterator for given osd index object.
3291 * \param dt osd index object
3294 static struct dt_it *osd_it_iam_init(const struct lu_env *env,
3295 struct dt_object *dt,
3297 struct lustre_capa *capa)
3299 struct osd_it_iam *it;
3300 struct osd_thread_info *oti = osd_oti_get(env);
3301 struct osd_object *obj = osd_dt_obj(dt);
3302 struct lu_object *lo = &dt->do_lu;
3303 struct iam_path_descr *ipd;
3304 struct iam_container *bag = &obj->oo_dir->od_container;
3306 LASSERT(lu_object_exists(lo));
3308 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
3309 return ERR_PTR(-EACCES);
3312 ipd = osd_it_ipd_get(env, bag);
3313 if (likely(ipd != NULL)) {
3317 iam_it_init(&it->oi_it, bag, IAM_IT_MOVE, ipd);
3318 return (struct dt_it *)it;
3320 return ERR_PTR(-ENOMEM);
3324 * free given Iterator.
3327 static void osd_it_iam_fini(const struct lu_env *env, struct dt_it *di)
3329 struct osd_it_iam *it = (struct osd_it_iam *)di;
3330 struct osd_object *obj = it->oi_obj;
3332 iam_it_fini(&it->oi_it);
3333 osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
3334 lu_object_put(env, &obj->oo_dt.do_lu);
3338 * Move Iterator to record specified by \a key
3340 * \param di osd iterator
3341 * \param key key for index
3343 * \retval +ve di points to record with least key not larger than key
3344 * \retval 0 di points to exact matched key
3345 * \retval -ve failure
3348 static int osd_it_iam_get(const struct lu_env *env,
3349 struct dt_it *di, const struct dt_key *key)
3351 struct osd_it_iam *it = (struct osd_it_iam *)di;
3353 return iam_it_get(&it->oi_it, (const struct iam_key *)key);
3359 * \param di osd iterator
3362 static void osd_it_iam_put(const struct lu_env *env, struct dt_it *di)
3364 struct osd_it_iam *it = (struct osd_it_iam *)di;
3366 iam_it_put(&it->oi_it);
3370 * Move iterator by one record
3372 * \param di osd iterator
3374 * \retval +1 end of container reached
3376 * \retval -ve failure
3379 static int osd_it_iam_next(const struct lu_env *env, struct dt_it *di)
3381 struct osd_it_iam *it = (struct osd_it_iam *)di;
3383 return iam_it_next(&it->oi_it);
3387 * Return pointer to the key under iterator.
3390 static struct dt_key *osd_it_iam_key(const struct lu_env *env,
3391 const struct dt_it *di)
3393 struct osd_it_iam *it = (struct osd_it_iam *)di;
3395 return (struct dt_key *)iam_it_key_get(&it->oi_it);
3399 * Return size of key under iterator (in bytes)
3402 static int osd_it_iam_key_size(const struct lu_env *env, const struct dt_it *di)
3404 struct osd_it_iam *it = (struct osd_it_iam *)di;
3406 return iam_it_key_size(&it->oi_it);
3409 static inline void osd_it_append_attrs(struct lu_dirent *ent, __u32 attr,
3410 int len, __u16 type)
3412 struct luda_type *lt;
3413 const unsigned align = sizeof(struct luda_type) - 1;
3415 /* check if file type is required */
3416 if (attr & LUDA_TYPE) {
3417 len = (len + align) & ~align;
3419 lt = (void *) ent->lde_name + len;
3420 lt->lt_type = cpu_to_le16(CFS_DTTOIF(type));
3421 ent->lde_attrs |= LUDA_TYPE;
3424 ent->lde_attrs = cpu_to_le32(ent->lde_attrs);
3428 * build lu direct from backend fs dirent.
3431 static inline void osd_it_pack_dirent(struct lu_dirent *ent,
3432 struct lu_fid *fid, __u64 offset,
3433 char *name, __u16 namelen,
3434 __u16 type, __u32 attr)
3436 fid_cpu_to_le(&ent->lde_fid, fid);
3437 ent->lde_attrs = LUDA_FID;
3439 ent->lde_hash = cpu_to_le64(offset);
3440 ent->lde_reclen = cpu_to_le16(lu_dirent_calc_size(namelen, attr));
3442 strncpy(ent->lde_name, name, namelen);
3443 ent->lde_namelen = cpu_to_le16(namelen);
3445 /* append lustre attributes */
3446 osd_it_append_attrs(ent, attr, namelen, type);
3450 * Return pointer to the record under iterator.
3452 static int osd_it_iam_rec(const struct lu_env *env,
3453 const struct dt_it *di,
3454 struct dt_rec *dtrec, __u32 attr)
3456 struct osd_it_iam *it = (struct osd_it_iam *)di;
3457 struct osd_thread_info *info = osd_oti_get(env);
3458 struct lu_fid *fid = &info->oti_fid;
3459 const struct osd_fid_pack *rec;
3460 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
3466 name = (char *)iam_it_key_get(&it->oi_it);
3468 RETURN(PTR_ERR(name));
3470 namelen = iam_it_key_size(&it->oi_it);
3472 rec = (const struct osd_fid_pack *) iam_it_rec_get(&it->oi_it);
3474 RETURN(PTR_ERR(rec));
3476 rc = osd_fid_unpack(fid, rec);
3480 hash = iam_it_store(&it->oi_it);
3482 /* IAM does not store object type in IAM index (dir) */
3483 osd_it_pack_dirent(lde, fid, hash, name, namelen,
3490 * Returns cookie for current Iterator position.
3492 static __u64 osd_it_iam_store(const struct lu_env *env, const struct dt_it *di)
3494 struct osd_it_iam *it = (struct osd_it_iam *)di;
3496 return iam_it_store(&it->oi_it);
3500 * Restore iterator from cookie.
3502 * \param di osd iterator
3503 * \param hash Iterator location cookie
3505 * \retval +ve di points to record with least key not larger than key.
3506 * \retval 0 di points to exact matched key
3507 * \retval -ve failure
3510 static int osd_it_iam_load(const struct lu_env *env,
3511 const struct dt_it *di, __u64 hash)
3513 struct osd_it_iam *it = (struct osd_it_iam *)di;
3515 return iam_it_load(&it->oi_it, hash);
3518 static const struct dt_index_operations osd_index_iam_ops = {
3519 .dio_lookup = osd_index_iam_lookup,
3520 .dio_declare_insert = osd_index_declare_iam_insert,
3521 .dio_insert = osd_index_iam_insert,
3522 .dio_declare_delete = osd_index_declare_iam_delete,
3523 .dio_delete = osd_index_iam_delete,
3525 .init = osd_it_iam_init,
3526 .fini = osd_it_iam_fini,
3527 .get = osd_it_iam_get,
3528 .put = osd_it_iam_put,
3529 .next = osd_it_iam_next,
3530 .key = osd_it_iam_key,
3531 .key_size = osd_it_iam_key_size,
3532 .rec = osd_it_iam_rec,
3533 .store = osd_it_iam_store,
3534 .load = osd_it_iam_load
3539 * Creates or initializes iterator context.
3541 * \retval struct osd_it_ea, iterator structure on success
3544 static struct dt_it *osd_it_ea_init(const struct lu_env *env,
3545 struct dt_object *dt,
3547 struct lustre_capa *capa)
3549 struct osd_object *obj = osd_dt_obj(dt);
3550 struct osd_thread_info *info = osd_oti_get(env);
3551 struct osd_it_ea *it = &info->oti_it_ea;
3552 struct lu_object *lo = &dt->do_lu;
3553 struct dentry *obj_dentry = &info->oti_it_dentry;
3555 LASSERT(lu_object_exists(lo));
3557 obj_dentry->d_inode = obj->oo_inode;
3558 obj_dentry->d_sb = osd_sb(osd_obj2dev(obj));
3559 obj_dentry->d_name.hash = 0;
3561 it->oie_rd_dirent = 0;
3562 it->oie_it_dirent = 0;
3563 it->oie_dirent = NULL;
3564 it->oie_buf = info->oti_it_ea_buf;
3566 it->oie_file.f_pos = 0;
3567 it->oie_file.f_dentry = obj_dentry;
3568 if (attr & LUDA_64BITHASH)
3569 it->oie_file.f_flags = O_64BITHASH;
3571 it->oie_file.f_flags = O_32BITHASH;
3572 it->oie_file.f_mapping = obj->oo_inode->i_mapping;
3573 it->oie_file.f_op = obj->oo_inode->i_fop;
3574 it->oie_file.private_data = NULL;
3576 RETURN((struct dt_it *) it);
3580 * Destroy or finishes iterator context.
3582 * \param di iterator structure to be destroyed
3584 static void osd_it_ea_fini(const struct lu_env *env, struct dt_it *di)
3586 struct osd_it_ea *it = (struct osd_it_ea *)di;
3587 struct osd_object *obj = it->oie_obj;
3588 struct inode *inode = obj->oo_inode;
3591 it->oie_file.f_op->release(inode, &it->oie_file);
3592 lu_object_put(env, &obj->oo_dt.do_lu);
3597 * It position the iterator at given key, so that next lookup continues from
3598 * that key Or it is similar to dio_it->load() but based on a key,
3599 * rather than file position.
3601 * As a special convention, osd_it_ea_get(env, di, "") has to rewind iterator
3604 * TODO: Presently return +1 considering it is only used by mdd_dir_is_empty().
3606 static int osd_it_ea_get(const struct lu_env *env,
3607 struct dt_it *di, const struct dt_key *key)
3609 struct osd_it_ea *it = (struct osd_it_ea *)di;
3612 LASSERT(((const char *)key)[0] == '\0');
3613 it->oie_file.f_pos = 0;
3614 it->oie_rd_dirent = 0;
3615 it->oie_it_dirent = 0;
3616 it->oie_dirent = NULL;
3624 static void osd_it_ea_put(const struct lu_env *env, struct dt_it *di)
3629 * It is called internally by ->readdir(). It fills the
3630 * iterator's in-memory data structure with required
3631 * information i.e. name, namelen, rec_size etc.
3633 * \param buf in which information to be filled in.
3634 * \param name name of the file in given dir
3636 * \retval 0 on success
3637 * \retval 1 on buffer full
3639 static int osd_ldiskfs_filldir(char *buf, const char *name, int namelen,
3640 loff_t offset, __u64 ino,
3643 struct osd_it_ea *it = (struct osd_it_ea *)buf;
3644 struct osd_it_ea_dirent *ent = it->oie_dirent;
3645 struct lu_fid *fid = &ent->oied_fid;
3646 struct osd_fid_pack *rec;
3649 /* this should never happen */
3650 if (unlikely(namelen == 0 || namelen > LDISKFS_NAME_LEN)) {
3651 CERROR("ldiskfs return invalid namelen %d\n", namelen);
3655 if ((void *) ent - it->oie_buf + sizeof(*ent) + namelen >
3659 if (d_type & LDISKFS_DIRENT_LUFID) {
3660 rec = (struct osd_fid_pack*) (name + namelen + 1);
3662 if (osd_fid_unpack(fid, rec) != 0)
3665 d_type &= ~LDISKFS_DIRENT_LUFID;
3670 ent->oied_ino = ino;
3671 ent->oied_off = offset;
3672 ent->oied_namelen = namelen;
3673 ent->oied_type = d_type;
3675 memcpy(ent->oied_name, name, namelen);
3677 it->oie_rd_dirent++;
3678 it->oie_dirent = (void *) ent + cfs_size_round(sizeof(*ent) + namelen);
3683 * Calls ->readdir() to load a directory entry at a time
3684 * and stored it in iterator's in-memory data structure.
3686 * \param di iterator's in memory structure
3688 * \retval 0 on success
3689 * \retval -ve on error
3691 static int osd_ldiskfs_it_fill(const struct lu_env *env,
3692 const struct dt_it *di)
3694 struct osd_it_ea *it = (struct osd_it_ea *)di;
3695 struct osd_object *obj = it->oie_obj;
3696 struct inode *inode = obj->oo_inode;
3697 struct htree_lock *hlock = NULL;
3701 it->oie_dirent = it->oie_buf;
3702 it->oie_rd_dirent = 0;
3704 if (obj->oo_hl_head != NULL) {
3705 hlock = osd_oti_get(env)->oti_hlock;
3706 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
3707 inode, LDISKFS_HLOCK_READDIR);
3709 cfs_down_read(&obj->oo_ext_idx_sem);
3712 result = inode->i_fop->readdir(&it->oie_file, it,
3713 (filldir_t) osd_ldiskfs_filldir);
3716 ldiskfs_htree_unlock(hlock);
3718 cfs_up_read(&obj->oo_ext_idx_sem);
3720 if (it->oie_rd_dirent == 0) {
3723 it->oie_dirent = it->oie_buf;
3724 it->oie_it_dirent = 1;
3731 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
3732 * to load a directory entry at a time and stored it in
3733 * iterator's in-memory data structure.
3735 * \param di iterator's in memory structure
3737 * \retval +ve iterator reached to end
3738 * \retval 0 iterator not reached to end
3739 * \retval -ve on error
3741 static int osd_it_ea_next(const struct lu_env *env, struct dt_it *di)
3743 struct osd_it_ea *it = (struct osd_it_ea *)di;
3748 if (it->oie_it_dirent < it->oie_rd_dirent) {
3750 (void *) it->oie_dirent +
3751 cfs_size_round(sizeof(struct osd_it_ea_dirent) +
3752 it->oie_dirent->oied_namelen);
3753 it->oie_it_dirent++;
3756 if (it->oie_file.f_pos == LDISKFS_HTREE_EOF)
3759 rc = osd_ldiskfs_it_fill(env, di);
3766 * Returns the key at current position from iterator's in memory structure.
3768 * \param di iterator's in memory structure
3770 * \retval key i.e. struct dt_key on success
3772 static struct dt_key *osd_it_ea_key(const struct lu_env *env,
3773 const struct dt_it *di)
3775 struct osd_it_ea *it = (struct osd_it_ea *)di;
3777 return (struct dt_key *)it->oie_dirent->oied_name;
3781 * Returns the key's size at current position from iterator's in memory structure.
3783 * \param di iterator's in memory structure
3785 * \retval key_size i.e. struct dt_key on success
3787 static int osd_it_ea_key_size(const struct lu_env *env, const struct dt_it *di)
3789 struct osd_it_ea *it = (struct osd_it_ea *)di;
3791 return it->oie_dirent->oied_namelen;
3796 * Returns the value (i.e. fid/igif) at current position from iterator's
3797 * in memory structure.
3799 * \param di struct osd_it_ea, iterator's in memory structure
3800 * \param attr attr requested for dirent.
3801 * \param lde lustre dirent
3803 * \retval 0 no error and \param lde has correct lustre dirent.
3804 * \retval -ve on error
3806 static inline int osd_it_ea_rec(const struct lu_env *env,
3807 const struct dt_it *di,
3808 struct dt_rec *dtrec, __u32 attr)
3810 struct osd_it_ea *it = (struct osd_it_ea *)di;
3811 struct osd_object *obj = it->oie_obj;
3812 struct lu_fid *fid = &it->oie_dirent->oied_fid;
3813 struct lu_dirent *lde = (struct lu_dirent *)dtrec;
3818 if (!fid_is_sane(fid))
3819 rc = osd_ea_fid_get(env, obj, it->oie_dirent->oied_ino, fid);
3822 osd_it_pack_dirent(lde, fid, it->oie_dirent->oied_off,
3823 it->oie_dirent->oied_name,
3824 it->oie_dirent->oied_namelen,
3825 it->oie_dirent->oied_type,
3831 * Returns a cookie for current position of the iterator head, so that
3832 * user can use this cookie to load/start the iterator next time.
3834 * \param di iterator's in memory structure
3836 * \retval cookie for current position, on success
3838 static __u64 osd_it_ea_store(const struct lu_env *env, const struct dt_it *di)
3840 struct osd_it_ea *it = (struct osd_it_ea *)di;
3842 return it->oie_dirent->oied_off;
3846 * It calls osd_ldiskfs_it_fill() which will use ->readdir()
3847 * to load a directory entry at a time and stored it i inn,
3848 * in iterator's in-memory data structure.
3850 * \param di struct osd_it_ea, iterator's in memory structure
3852 * \retval +ve on success
3853 * \retval -ve on error
3855 static int osd_it_ea_load(const struct lu_env *env,
3856 const struct dt_it *di, __u64 hash)
3858 struct osd_it_ea *it = (struct osd_it_ea *)di;
3862 it->oie_file.f_pos = hash;
3864 rc = osd_ldiskfs_it_fill(env, di);
3872 * Index lookup function for interoperability mode (b11826).
3874 * \param key, key i.e. file name to be searched
3876 * \retval +ve, on success
3877 * \retval -ve, on error
3879 static int osd_index_ea_lookup(const struct lu_env *env, struct dt_object *dt,
3880 struct dt_rec *rec, const struct dt_key *key,
3881 struct lustre_capa *capa)
3883 struct osd_object *obj = osd_dt_obj(dt);
3888 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
3889 LINVRNT(osd_invariant(obj));
3891 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
3894 rc = osd_ea_lookup_rec(env, obj, rec, key);
3902 * Index and Iterator operations for interoperability
3903 * mode (i.e. to run 2.0 mds on 1.8 disk) (b11826)
3905 static const struct dt_index_operations osd_index_ea_ops = {
3906 .dio_lookup = osd_index_ea_lookup,
3907 .dio_declare_insert = osd_index_declare_ea_insert,
3908 .dio_insert = osd_index_ea_insert,
3909 .dio_declare_delete = osd_index_declare_ea_delete,
3910 .dio_delete = osd_index_ea_delete,
3912 .init = osd_it_ea_init,
3913 .fini = osd_it_ea_fini,
3914 .get = osd_it_ea_get,
3915 .put = osd_it_ea_put,
3916 .next = osd_it_ea_next,
3917 .key = osd_it_ea_key,
3918 .key_size = osd_it_ea_key_size,
3919 .rec = osd_it_ea_rec,
3920 .store = osd_it_ea_store,
3921 .load = osd_it_ea_load
3925 static void *osd_key_init(const struct lu_context *ctx,
3926 struct lu_context_key *key)
3928 struct osd_thread_info *info;
3930 OBD_ALLOC_PTR(info);
3932 return ERR_PTR(-ENOMEM);
3934 OBD_ALLOC(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
3935 if (info->oti_it_ea_buf == NULL)
3938 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
3940 info->oti_hlock = ldiskfs_htree_lock_alloc();
3941 if (info->oti_hlock == NULL)
3947 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
3950 return ERR_PTR(-ENOMEM);
3953 static void osd_key_fini(const struct lu_context *ctx,
3954 struct lu_context_key *key, void* data)
3956 struct osd_thread_info *info = data;
3958 if (info->oti_hlock != NULL)
3959 ldiskfs_htree_lock_free(info->oti_hlock);
3960 OBD_FREE(info->oti_it_ea_buf, OSD_IT_EA_BUFSIZE);
3964 static void osd_key_exit(const struct lu_context *ctx,
3965 struct lu_context_key *key, void *data)
3967 struct osd_thread_info *info = data;
3969 LASSERT(info->oti_r_locks == 0);
3970 LASSERT(info->oti_w_locks == 0);
3971 LASSERT(info->oti_txns == 0);
3974 /* type constructor/destructor: osd_type_init, osd_type_fini */
3975 LU_TYPE_INIT_FINI(osd, &osd_key);
3977 struct lu_context_key osd_key = {
3978 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD,
3979 .lct_init = osd_key_init,
3980 .lct_fini = osd_key_fini,
3981 .lct_exit = osd_key_exit
3985 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
3986 const char *name, struct lu_device *next)
3988 return osd_procfs_init(osd_dev(d), name);
3991 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
3993 struct osd_thread_info *info = osd_oti_get(env);
3997 if (o->od_oi_table != NULL)
3998 osd_oi_fini(info, o);
4001 fsfilt_put_ops(o->od_fsops);
4008 static int osd_mount(const struct lu_env *env,
4009 struct osd_device *o, struct lustre_cfg *cfg)
4011 struct lustre_mount_info *lmi;
4012 const char *dev = lustre_cfg_string(cfg, 0);
4013 struct lustre_disk_data *ldd;
4014 struct lustre_sb_info *lsi;
4019 o->od_fsops = fsfilt_get_ops(mt_str(LDD_MT_LDISKFS));
4020 if (o->od_fsops == NULL) {
4021 CERROR("Can't find fsfilt_ldiskfs\n");
4025 if (o->od_mount != NULL) {
4026 CERROR("Already mounted (%s)\n", dev);
4031 lmi = server_get_mount(dev);
4033 CERROR("Cannot get mount info for %s!\n", dev);
4037 LASSERT(lmi != NULL);
4038 /* save lustre_mount_info in dt_device */
4040 o->od_mnt = lmi->lmi_mnt;
4042 lsi = s2lsi(lmi->lmi_sb);
4045 if (ldd->ldd_flags & LDD_F_IAM_DIR) {
4047 LCONSOLE_WARN("%s: OSD: IAM mode enabled\n", dev);
4051 if (ldd->ldd_flags & LDD_F_SV_TYPE_OST) {
4052 rc = osd_compat_init(o);
4054 CERROR("%s: can't initialize compats: %d\n", dev, rc);
4060 static struct lu_device *osd_device_fini(const struct lu_env *env,
4061 struct lu_device *d)
4066 osd_compat_fini(osd_dev(d));
4068 shrink_dcache_sb(osd_sb(osd_dev(d)));
4069 osd_sync(env, lu2dt_dev(d));
4071 rc = osd_procfs_fini(osd_dev(d));
4073 CERROR("proc fini error %d \n", rc);
4074 RETURN (ERR_PTR(rc));
4077 if (osd_dev(d)->od_mount)
4078 server_put_mount(osd_dev(d)->od_mount->lmi_name,
4079 osd_dev(d)->od_mount->lmi_mnt);
4080 osd_dev(d)->od_mount = NULL;
4085 static struct lu_device *osd_device_alloc(const struct lu_env *env,
4086 struct lu_device_type *t,
4087 struct lustre_cfg *cfg)
4089 struct lu_device *l;
4090 struct osd_device *o;
4096 result = dt_device_init(&o->od_dt_dev, t);
4099 l->ld_ops = &osd_lu_ops;
4100 o->od_dt_dev.dd_ops = &osd_dt_ops;
4101 cfs_spin_lock_init(&o->od_osfs_lock);
4102 o->od_osfs_age = cfs_time_shift_64(-1000);
4103 o->od_capa_hash = init_capa_hash();
4104 if (o->od_capa_hash == NULL) {
4105 dt_device_fini(&o->od_dt_dev);
4106 l = ERR_PTR(-ENOMEM);
4109 l = ERR_PTR(result);
4114 l = ERR_PTR(-ENOMEM);
4118 static struct lu_device *osd_device_free(const struct lu_env *env,
4119 struct lu_device *d)
4121 struct osd_device *o = osd_dev(d);
4124 cleanup_capa_hash(o->od_capa_hash);
4125 dt_device_fini(&o->od_dt_dev);
4130 static int osd_process_config(const struct lu_env *env,
4131 struct lu_device *d, struct lustre_cfg *cfg)
4133 struct osd_device *o = osd_dev(d);
4137 switch(cfg->lcfg_command) {
4139 err = osd_mount(env, o, cfg);
4142 err = osd_shutdown(env, o);
4151 static int osd_recovery_complete(const struct lu_env *env,
4152 struct lu_device *d)
4157 static int osd_prepare(const struct lu_env *env, struct lu_device *pdev,
4158 struct lu_device *dev)
4160 struct osd_device *osd = osd_dev(dev);
4161 struct osd_thread_info *oti = osd_oti_get(env);
4166 /* 1. initialize oi before any file create or file open */
4167 result = osd_oi_init(oti, osd);
4171 if (!lu_device_is_md(pdev))
4174 /* 2. setup local objects */
4175 result = llo_local_objects_setup(env, lu2md_dev(pdev), lu2dt_dev(dev));
4179 static const struct lu_object_operations osd_lu_obj_ops = {
4180 .loo_object_init = osd_object_init,
4181 .loo_object_delete = osd_object_delete,
4182 .loo_object_release = osd_object_release,
4183 .loo_object_free = osd_object_free,
4184 .loo_object_print = osd_object_print,
4185 .loo_object_invariant = osd_object_invariant
4188 const struct lu_device_operations osd_lu_ops = {
4189 .ldo_object_alloc = osd_object_alloc,
4190 .ldo_process_config = osd_process_config,
4191 .ldo_recovery_complete = osd_recovery_complete,
4192 .ldo_prepare = osd_prepare,
4195 static const struct lu_device_type_operations osd_device_type_ops = {
4196 .ldto_init = osd_type_init,
4197 .ldto_fini = osd_type_fini,
4199 .ldto_start = osd_type_start,
4200 .ldto_stop = osd_type_stop,
4202 .ldto_device_alloc = osd_device_alloc,
4203 .ldto_device_free = osd_device_free,
4205 .ldto_device_init = osd_device_init,
4206 .ldto_device_fini = osd_device_fini
4209 static struct lu_device_type osd_device_type = {
4210 .ldt_tags = LU_DEVICE_DT,
4211 .ldt_name = LUSTRE_OSD_NAME,
4212 .ldt_ops = &osd_device_type_ops,
4213 .ldt_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
4217 * lprocfs legacy support.
4219 static struct obd_ops osd_obd_device_ops = {
4220 .o_owner = THIS_MODULE
4223 static int __init osd_mod_init(void)
4225 struct lprocfs_static_vars lvars;
4228 lprocfs_osd_init_vars(&lvars);
4229 return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
4230 LUSTRE_OSD_NAME, &osd_device_type);
4233 static void __exit osd_mod_exit(void)
4235 class_unregister_type(LUSTRE_OSD_NAME);
4238 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4239 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_NAME")");
4240 MODULE_LICENSE("GPL");
4242 cfs_module(osd, "0.1.0", osd_mod_init, osd_mod_exit);