1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osd/osd_handler.c
38 * Top-level entry points into osd module
40 * Author: Nikita Danilov <nikita@clusterfs.com>
44 # define EXPORT_SYMTAB
46 #define DEBUG_SUBSYSTEM S_MDS
48 #include <linux/module.h>
50 /* LUSTRE_VERSION_CODE */
51 #include <lustre_ver.h>
52 /* prerequisite for linux/xattr.h */
53 #include <linux/types.h>
54 /* prerequisite for linux/xattr.h */
56 /* XATTR_{REPLACE,CREATE} */
57 #include <linux/xattr.h>
59 * XXX temporary stuff: direct access to ldiskfs/jdb. Interface between osd
60 * and file system is not yet specified.
62 /* handle_t, journal_start(), journal_stop() */
63 #include <linux/jbd.h>
65 #include <linux/ldiskfs_fs.h>
66 #include <linux/ldiskfs_jbd.h>
71 * struct OBD_{ALLOC,FREE}*()
74 #include <obd_support.h>
75 /* struct ptlrpc_thread */
76 #include <lustre_net.h>
79 #include <lustre_fid.h>
80 #include <linux/lustre_iam.h>
82 #include "osd_internal.h"
85 struct osd_directory {
86 struct iam_container od_container;
87 struct iam_descr od_descr;
88 struct semaphore od_sem;
92 struct dt_object oo_dt;
94 * Inode for file system object represented by this osd_object. This
95 * inode is pinned for the whole duration of lu_object life.
97 * Not modified concurrently (either setup early during object
98 * creation, or assigned by osd_object_create() under write lock).
100 struct inode *oo_inode;
101 struct rw_semaphore oo_sem;
102 struct osd_directory *oo_dir;
103 /** protects inode attributes. */
105 const struct lu_env *oo_owner;
106 #ifdef CONFIG_LOCKDEP
107 struct lockdep_map oo_dep_map;
111 static int osd_root_get (const struct lu_env *env,
112 struct dt_device *dev, struct lu_fid *f);
114 static int lu_device_is_osd (const struct lu_device *d);
115 static void osd_mod_exit (void) __exit;
116 static int osd_mod_init (void) __init;
117 static int osd_type_init (struct lu_device_type *t);
118 static void osd_type_fini (struct lu_device_type *t);
119 static int osd_object_init (const struct lu_env *env,
121 const struct lu_object_conf *_);
122 static void osd_object_release(const struct lu_env *env,
123 struct lu_object *l);
124 static int osd_object_print (const struct lu_env *env, void *cookie,
125 lu_printer_t p, const struct lu_object *o);
126 static struct lu_device *osd_device_free (const struct lu_env *env,
127 struct lu_device *m);
128 static void *osd_key_init (const struct lu_context *ctx,
129 struct lu_context_key *key);
130 static void osd_key_fini (const struct lu_context *ctx,
131 struct lu_context_key *key, void *data);
132 static void osd_key_exit (const struct lu_context *ctx,
133 struct lu_context_key *key, void *data);
134 static int osd_has_index (const struct osd_object *obj);
135 static void osd_object_init0 (struct osd_object *obj);
136 static int osd_device_init (const struct lu_env *env,
137 struct lu_device *d, const char *,
139 static int osd_fid_lookup (const struct lu_env *env,
140 struct osd_object *obj,
141 const struct lu_fid *fid);
142 static void osd_inode_getattr (const struct lu_env *env,
143 struct inode *inode, struct lu_attr *attr);
144 static int osd_inode_setattr (const struct lu_env *env,
145 struct inode *inode, const struct lu_attr *attr);
146 static int osd_param_is_sane (const struct osd_device *dev,
147 const struct txn_param *param);
148 static int osd_index_lookup (const struct lu_env *env,
149 struct dt_object *dt,
150 struct dt_rec *rec, const struct dt_key *key,
151 struct lustre_capa *capa);
152 static int osd_index_insert (const struct lu_env *env,
153 struct dt_object *dt,
154 const struct dt_rec *rec,
155 const struct dt_key *key,
156 struct thandle *handle,
157 struct lustre_capa *capa,
159 static int osd_index_delete (const struct lu_env *env,
160 struct dt_object *dt, const struct dt_key *key,
161 struct thandle *handle,
162 struct lustre_capa *capa);
163 static int osd_index_probe (const struct lu_env *env,
164 struct osd_object *o,
165 const struct dt_index_features *feat);
166 static int osd_index_try (const struct lu_env *env,
167 struct dt_object *dt,
168 const struct dt_index_features *feat);
169 static void osd_index_fini (struct osd_object *o);
171 static void osd_it_fini (const struct lu_env *env, struct dt_it *di);
172 static int osd_it_get (const struct lu_env *env,
173 struct dt_it *di, const struct dt_key *key);
174 static void osd_it_put (const struct lu_env *env, struct dt_it *di);
175 static int osd_it_next (const struct lu_env *env, struct dt_it *di);
176 static int osd_it_del (const struct lu_env *env, struct dt_it *di,
178 static int osd_it_key_size (const struct lu_env *env,
179 const struct dt_it *di);
180 static void osd_conf_get (const struct lu_env *env,
181 const struct dt_device *dev,
182 struct dt_device_param *param);
183 static void osd_trans_stop (const struct lu_env *env,
185 static int osd_object_is_root(const struct osd_object *obj);
187 static struct osd_object *osd_obj (const struct lu_object *o);
188 static struct osd_device *osd_dev (const struct lu_device *d);
189 static struct osd_device *osd_dt_dev (const struct dt_device *d);
190 static struct osd_object *osd_dt_obj (const struct dt_object *d);
191 static struct osd_device *osd_obj2dev (const struct osd_object *o);
192 static struct lu_device *osd2lu_dev (struct osd_device *osd);
193 static struct lu_device *osd_device_fini (const struct lu_env *env,
194 struct lu_device *d);
195 static struct lu_device *osd_device_alloc (const struct lu_env *env,
196 struct lu_device_type *t,
197 struct lustre_cfg *cfg);
198 static struct lu_object *osd_object_alloc (const struct lu_env *env,
199 const struct lu_object_header *hdr,
200 struct lu_device *d);
201 static struct inode *osd_iget (struct osd_thread_info *info,
202 struct osd_device *dev,
203 const struct osd_inode_id *id);
204 static struct super_block *osd_sb (const struct osd_device *dev);
205 static struct dt_it *osd_it_init (const struct lu_env *env,
206 struct dt_object *dt, int wable,
207 struct lustre_capa *capa);
208 static struct dt_key *osd_it_key (const struct lu_env *env,
209 const struct dt_it *di);
210 static struct dt_rec *osd_it_rec (const struct lu_env *env,
211 const struct dt_it *di);
212 static struct timespec *osd_inode_time (const struct lu_env *env,
215 static struct thandle *osd_trans_start (const struct lu_env *env,
217 struct txn_param *p);
218 static journal_t *osd_journal (const struct osd_device *dev);
220 static const struct lu_device_type_operations osd_device_type_ops;
221 static struct lu_device_type osd_device_type;
222 static const struct lu_object_operations osd_lu_obj_ops;
223 static struct obd_ops osd_obd_device_ops;
224 static const struct lu_device_operations osd_lu_ops;
225 static struct lu_context_key osd_key;
226 static const struct dt_object_operations osd_obj_ops;
227 static const struct dt_body_operations osd_body_ops;
228 static const struct dt_index_operations osd_index_ops;
229 static const struct dt_index_operations osd_index_compat_ops;
232 struct thandle ot_super;
234 struct journal_callback ot_jcb;
235 /* Link to the device, for debugging. */
236 struct lu_ref_link *ot_dev_link;
240 #ifdef HAVE_QUOTA_SUPPORT
242 osd_push_ctxt(const struct lu_env *env, struct osd_ctxt *save)
244 struct md_ucred *uc = md_ucred(env);
248 save->oc_uid = current->fsuid;
249 save->oc_gid = current->fsgid;
250 save->oc_cap = current->cap_effective;
251 current->fsuid = uc->mu_fsuid;
252 current->fsgid = uc->mu_fsgid;
253 current->cap_effective = uc->mu_cap;
257 osd_pop_ctxt(struct osd_ctxt *save)
259 current->fsuid = save->oc_uid;
260 current->fsgid = save->oc_gid;
261 current->cap_effective = save->oc_cap;
266 * Invariants, assertions.
270 * XXX: do not enable this, until invariant checking code is made thread safe
271 * in the face of pdirops locking.
273 #define OSD_INVARIANT_CHECKS (0)
275 #if OSD_INVARIANT_CHECKS
276 static int osd_invariant(const struct osd_object *obj)
280 ergo(obj->oo_inode != NULL,
281 obj->oo_inode->i_sb == osd_sb(osd_obj2dev(obj)) &&
282 atomic_read(&obj->oo_inode->i_count) > 0) &&
283 ergo(obj->oo_dir != NULL &&
284 obj->oo_dir->od_conationer.ic_object != NULL,
285 obj->oo_dir->od_conationer.ic_object == obj->oo_inode);
288 #define osd_invariant(obj) (1)
291 static inline struct osd_thread_info *osd_oti_get(const struct lu_env *env)
293 return lu_context_key_get(&env->le_ctx, &osd_key);
297 * Concurrency: doesn't matter
299 static int osd_read_locked(const struct lu_env *env, struct osd_object *o)
301 return osd_oti_get(env)->oti_r_locks > 0;
305 * Concurrency: doesn't matter
307 static int osd_write_locked(const struct lu_env *env, struct osd_object *o)
309 struct osd_thread_info *oti = osd_oti_get(env);
310 return oti->oti_w_locks > 0 && o->oo_owner == env;
314 * Concurrency: doesn't access mutable data
316 static int osd_root_get(const struct lu_env *env,
317 struct dt_device *dev, struct lu_fid *f)
321 inode = osd_sb(osd_dt_dev(dev))->s_root->d_inode;
322 lu_igif_build(f, inode->i_ino, inode->i_generation);
327 * OSD object methods.
331 * Concurrency: no concurrent access is possible that early in object
334 static struct lu_object *osd_object_alloc(const struct lu_env *env,
335 const struct lu_object_header *hdr,
338 struct osd_object *mo;
344 l = &mo->oo_dt.do_lu;
345 dt_object_init(&mo->oo_dt, NULL, d);
346 mo->oo_dt.do_ops = &osd_obj_ops;
347 l->lo_ops = &osd_lu_obj_ops;
348 init_rwsem(&mo->oo_sem);
349 spin_lock_init(&mo->oo_guard);
356 * Concurrency: shouldn't matter.
358 static void osd_object_init0(struct osd_object *obj)
360 LASSERT(obj->oo_inode != NULL);
361 obj->oo_dt.do_body_ops = &osd_body_ops;
362 obj->oo_dt.do_lu.lo_header->loh_attr |=
363 (LOHA_EXISTS | (obj->oo_inode->i_mode & S_IFMT));
367 * Concurrency: no concurrent access is possible that early in object
370 static int osd_object_init(const struct lu_env *env, struct lu_object *l,
371 const struct lu_object_conf *_)
373 struct osd_object *obj = osd_obj(l);
376 LINVRNT(osd_invariant(obj));
378 result = osd_fid_lookup(env, obj, lu_object_fid(l));
380 if (obj->oo_inode != NULL)
381 osd_object_init0(obj);
383 LINVRNT(osd_invariant(obj));
388 * Concurrency: no concurrent access is possible that late in object
391 static void osd_object_free(const struct lu_env *env, struct lu_object *l)
393 struct osd_object *obj = osd_obj(l);
395 LINVRNT(osd_invariant(obj));
397 dt_object_fini(&obj->oo_dt);
401 static struct iam_path_descr *osd_ipd_get(const struct lu_env *env,
402 const struct iam_container *bag)
404 return bag->ic_descr->id_ops->id_ipd_alloc(bag,
405 osd_oti_get(env)->oti_ipd);
408 static void osd_ipd_put(const struct lu_env *env,
409 const struct iam_container *bag,
410 struct iam_path_descr *ipd)
412 bag->ic_descr->id_ops->id_ipd_free(ipd);
416 * Concurrency: no concurrent access is possible that late in object
419 static void osd_index_fini(struct osd_object *o)
421 struct iam_container *bag;
423 if (o->oo_dir != NULL) {
424 bag = &o->oo_dir->od_container;
425 if (o->oo_inode != NULL) {
426 if (bag->ic_object == o->oo_inode)
427 iam_container_fini(bag);
429 OBD_FREE_PTR(o->oo_dir);
435 * Concurrency: no concurrent access is possible that late in object
436 * life-cycle (for all existing callers, that is. New callers have to provide
437 * their own locking.)
439 static int osd_inode_unlinked(const struct inode *inode)
441 return inode->i_nlink == 0;
445 OSD_TXN_OI_DELETE_CREDITS = 20,
446 OSD_TXN_INODE_DELETE_CREDITS = 20
450 * Concurrency: no concurrent access is possible that late in object
453 static int osd_inode_remove(const struct lu_env *env, struct osd_object *obj)
455 const struct lu_fid *fid = lu_object_fid(&obj->oo_dt.do_lu);
456 struct osd_device *osd = osd_obj2dev(obj);
457 struct osd_thread_info *oti = osd_oti_get(env);
458 struct txn_param *prm = &oti->oti_txn;
462 txn_param_init(prm, OSD_TXN_OI_DELETE_CREDITS +
463 OSD_TXN_INODE_DELETE_CREDITS);
464 th = osd_trans_start(env, &osd->od_dt_dev, prm);
466 result = osd_oi_delete(oti, &osd->od_oi, fid, th);
467 osd_trans_stop(env, th);
469 result = PTR_ERR(th);
474 * Called just before object is freed. Releases all resources except for
475 * object itself (that is released by osd_object_free()).
477 * Concurrency: no concurrent access is possible that late in object
480 static void osd_object_delete(const struct lu_env *env, struct lu_object *l)
482 struct osd_object *obj = osd_obj(l);
483 struct inode *inode = obj->oo_inode;
485 LINVRNT(osd_invariant(obj));
488 * If object is unlinked remove fid->ino mapping from object index.
490 * File body will be deleted by iput().
497 if (osd_inode_unlinked(inode)) {
498 result = osd_inode_remove(env, obj);
500 LU_OBJECT_DEBUG(D_ERROR, env, l,
501 "Failed to cleanup: %d\n",
505 obj->oo_inode = NULL;
510 * Concurrency: ->loo_object_release() is called under site spin-lock.
512 static void osd_object_release(const struct lu_env *env,
515 struct osd_object *o = osd_obj(l);
517 LASSERT(!lu_object_is_dying(l->lo_header));
518 if (o->oo_inode != NULL && osd_inode_unlinked(o->oo_inode))
519 set_bit(LU_OBJECT_HEARD_BANSHEE, &l->lo_header->loh_flags);
523 * Concurrency: shouldn't matter.
525 static int osd_object_print(const struct lu_env *env, void *cookie,
526 lu_printer_t p, const struct lu_object *l)
528 struct osd_object *o = osd_obj(l);
531 if (o->oo_dir != NULL)
532 d = o->oo_dir->od_container.ic_descr;
535 return (*p)(env, cookie, LUSTRE_OSD_NAME"-object@%p(i:%p:%lu/%u)[%s]",
537 o->oo_inode ? o->oo_inode->i_ino : 0UL,
538 o->oo_inode ? o->oo_inode->i_generation : 0,
539 d ? d->id_ops->id_name : "plain");
543 * Concurrency: shouldn't matter.
545 int osd_statfs(const struct lu_env *env, struct dt_device *d,
548 struct osd_device *osd = osd_dt_dev(d);
549 struct super_block *sb = osd_sb(osd);
552 spin_lock(&osd->od_osfs_lock);
554 if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
555 result = ll_do_statfs(sb, &osd->od_kstatfs);
556 if (likely(result == 0)) /* N.B. statfs can't really fail */
557 osd->od_osfs_age = cfs_time_current_64();
560 if (likely(result == 0))
561 *sfs = osd->od_kstatfs;
562 spin_unlock(&osd->od_osfs_lock);
568 * Concurrency: doesn't access mutable data.
570 static void osd_conf_get(const struct lu_env *env,
571 const struct dt_device *dev,
572 struct dt_device_param *param)
575 * XXX should be taken from not-yet-existing fs abstraction layer.
577 param->ddp_max_name_len = LDISKFS_NAME_LEN;
578 param->ddp_max_nlink = LDISKFS_LINK_MAX;
579 param->ddp_block_shift = osd_sb(osd_dt_dev(dev))->s_blocksize_bits;
587 * Concurrency: doesn't access mutable data.
589 static int osd_param_is_sane(const struct osd_device *dev,
590 const struct txn_param *param)
592 return param->tp_credits <= osd_journal(dev)->j_max_transaction_buffers;
596 * Concurrency: shouldn't matter.
598 static void osd_trans_commit_cb(struct journal_callback *jcb, int error)
600 struct osd_thandle *oh = container_of0(jcb, struct osd_thandle, ot_jcb);
601 struct thandle *th = &oh->ot_super;
602 struct dt_device *dev = th->th_dev;
603 struct lu_device *lud = &dev->dd_lu_dev;
605 LASSERT(dev != NULL);
606 LASSERT(oh->ot_handle == NULL);
609 CERROR("transaction @0x%p commit error: %d\n", th, error);
611 struct lu_env *env = &osd_dt_dev(dev)->od_env_for_commit;
613 * This od_env_for_commit is only for commit usage. see
616 lu_context_enter(&env->le_ctx);
617 dt_txn_hook_commit(env, th);
618 lu_context_exit(&env->le_ctx);
621 lu_ref_del_at(&lud->ld_reference, oh->ot_dev_link, "osd-tx", th);
625 lu_context_exit(&th->th_ctx);
626 lu_context_fini(&th->th_ctx);
631 * Concurrency: shouldn't matter.
633 static struct thandle *osd_trans_start(const struct lu_env *env,
637 struct osd_device *dev = osd_dt_dev(d);
639 struct osd_thandle *oh;
645 hook_res = dt_txn_hook_start(env, d, p);
647 RETURN(ERR_PTR(hook_res));
649 if (osd_param_is_sane(dev, p)) {
650 OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
652 struct osd_thread_info *oti = osd_oti_get(env);
655 * XXX temporary stuff. Some abstraction layer should
659 jh = journal_start(osd_journal(dev), p->tp_credits);
665 jh->h_sync = p->tp_sync;
666 lu_device_get(&d->dd_lu_dev);
667 oh->ot_dev_link = lu_ref_add
668 (&d->dd_lu_dev.ld_reference,
670 /* add commit callback */
671 lu_context_init(&th->th_ctx, LCT_TX_HANDLE);
672 lu_context_enter(&th->th_ctx);
673 journal_callback_set(jh, osd_trans_commit_cb,
674 (struct journal_callback *)&oh->ot_jcb);
675 LASSERT(oti->oti_txns == 0);
676 LASSERT(oti->oti_r_locks == 0);
677 LASSERT(oti->oti_w_locks == 0);
684 th = ERR_PTR(-ENOMEM);
686 CERROR("Invalid transaction parameters\n");
687 th = ERR_PTR(-EINVAL);
694 * Concurrency: shouldn't matter.
696 static void osd_trans_stop(const struct lu_env *env, struct thandle *th)
699 struct osd_thandle *oh;
700 struct osd_thread_info *oti = osd_oti_get(env);
704 oh = container_of0(th, struct osd_thandle, ot_super);
705 if (oh->ot_handle != NULL) {
706 handle_t *hdl = oh->ot_handle;
708 LASSERT(oti->oti_txns == 1);
710 LASSERT(oti->oti_r_locks == 0);
711 LASSERT(oti->oti_w_locks == 0);
712 result = dt_txn_hook_stop(env, th);
714 CERROR("Failure in transaction hook: %d\n", result);
715 oh->ot_handle = NULL;
716 result = journal_stop(hdl);
718 CERROR("Failure to stop transaction: %d\n", result);
724 * Concurrency: shouldn't matter.
726 static int osd_sync(const struct lu_env *env, struct dt_device *d)
728 CDEBUG(D_HA, "syncing OSD %s\n", LUSTRE_OSD_NAME);
729 return ldiskfs_force_commit(osd_sb(osd_dt_dev(d)));
733 * Start commit for OSD device.
735 * An implementation of dt_commit_async method for OSD device.
736 * Asychronously starts underlayng fs sync and thereby a transaction
739 * \param env environment
742 * \see dt_device_operations
744 static int osd_commit_async(const struct lu_env *env,
747 struct super_block *s = osd_sb(osd_dt_dev(d));
750 CDEBUG(D_HA, "async commit OSD %s\n", LUSTRE_OSD_NAME);
751 RETURN(s->s_op->sync_fs(s, 0));
755 * Concurrency: shouldn't matter.
757 lvfs_sbdev_type fsfilt_ldiskfs_journal_sbdev(struct super_block *);
759 static void osd_ro(const struct lu_env *env, struct dt_device *d)
763 CERROR("*** setting device %s read-only ***\n", LUSTRE_OSD_NAME);
765 __lvfs_set_rdonly(lvfs_sbdev(osd_sb(osd_dt_dev(d))),
766 fsfilt_ldiskfs_journal_sbdev(osd_sb(osd_dt_dev(d))));
771 * Concurrency: serialization provided by callers.
773 static int osd_init_capa_ctxt(const struct lu_env *env, struct dt_device *d,
774 int mode, unsigned long timeout, __u32 alg,
775 struct lustre_capa_key *keys)
777 struct osd_device *dev = osd_dt_dev(d);
780 dev->od_fl_capa = mode;
781 dev->od_capa_timeout = timeout;
782 dev->od_capa_alg = alg;
783 dev->od_capa_keys = keys;
788 * Concurrency: serialization provided by callers.
790 static void osd_init_quota_ctxt(const struct lu_env *env, struct dt_device *d,
791 struct dt_quota_ctxt *ctxt, void *data)
793 struct obd_device *obd = (void *)ctxt;
794 struct vfsmount *mnt = (struct vfsmount *)data;
797 obd->u.obt.obt_sb = mnt->mnt_root->d_inode->i_sb;
798 OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
799 obd->obd_lvfs_ctxt.pwdmnt = mnt;
800 obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
801 obd->obd_lvfs_ctxt.fs = get_ds();
807 * Note: we do not count into QUOTA here.
808 * If we mount with --data_journal we may need more.
810 static const int osd_dto_credits_noquota[DTO_NR] = {
813 * INDEX_EXTRA_TRANS_BLOCKS(8) +
814 * SINGLEDATA_TRANS_BLOCKS(8)
815 * XXX Note: maybe iam need more, since iam have more level than
818 [DTO_INDEX_INSERT] = 16,
819 [DTO_INDEX_DELETE] = 16,
823 [DTO_IDNEX_UPDATE] = 16,
825 * Create a object. The same as create object in EXT3.
826 * DATA_TRANS_BLOCKS(14) +
827 * INDEX_EXTRA_BLOCKS(8) +
828 * 3(inode bits, groups, GDT)
830 [DTO_OBJECT_CREATE] = 25,
834 [DTO_OBJECT_DELETE] = 25,
837 * 3(inode bits, group, GDT)
839 [DTO_ATTR_SET_BASE] = 3,
841 * Xattr set. The same as xattr of EXT3.
842 * DATA_TRANS_BLOCKS(14)
843 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS are
844 * also counted in. Do not know why?
846 [DTO_XATTR_SET] = 14,
849 * creadits for inode change during write.
851 [DTO_WRITE_BASE] = 3,
853 * credits for single block write.
855 [DTO_WRITE_BLOCK] = 14,
857 * Attr set credits for chown.
858 * 3 (inode bit, group, GDT)
860 [DTO_ATTR_SET_CHOWN]= 3
864 * Note: we count into QUOTA here.
865 * If we mount with --data_journal we may need more.
867 static const int osd_dto_credits_quota[DTO_NR] = {
869 * INDEX_EXTRA_TRANS_BLOCKS(8) +
870 * SINGLEDATA_TRANS_BLOCKS(8) +
871 * 2 * QUOTA_TRANS_BLOCKS(2)
873 [DTO_INDEX_INSERT] = 20,
875 * INDEX_EXTRA_TRANS_BLOCKS(8) +
876 * SINGLEDATA_TRANS_BLOCKS(8) +
877 * 2 * QUOTA_TRANS_BLOCKS(2)
879 [DTO_INDEX_DELETE] = 20,
883 [DTO_IDNEX_UPDATE] = 16,
885 * Create a object. Same as create object in EXT3 filesystem.
886 * DATA_TRANS_BLOCKS(16) +
887 * INDEX_EXTRA_BLOCKS(8) +
888 * 3(inode bits, groups, GDT) +
889 * 2 * QUOTA_INIT_BLOCKS(25)
891 [DTO_OBJECT_CREATE] = 77,
894 * DATA_TRANS_BLOCKS(16) +
895 * INDEX_EXTRA_BLOCKS(8) +
896 * 3(inode bits, groups, GDT) +
899 [DTO_OBJECT_DELETE] = 27,
902 * 3 (inode bit, group, GDT) +
904 [DTO_ATTR_SET_BASE] = 3,
906 * Xattr set. The same as xattr of EXT3.
907 * DATA_TRANS_BLOCKS(16)
908 * XXX Note: in original MDS implmentation INDEX_EXTRA_TRANS_BLOCKS are
909 * also counted in. Do not know why?
911 [DTO_XATTR_SET] = 16,
914 * creadits for inode change during write.
916 [DTO_WRITE_BASE] = 3,
918 * credits for single block write.
920 [DTO_WRITE_BLOCK] = 16,
922 * Attr set credits for chown.
923 * 3 (inode bit, group, GDT) +
924 * 2 * QUOTA_INIT_BLOCKS(25) +
925 * 2 * QUOTA_DEL_BLOCKS(9)
927 [DTO_ATTR_SET_CHOWN]= 71
930 static int osd_credit_get(const struct lu_env *env, struct dt_device *d,
933 LASSERT(ARRAY_SIZE(osd_dto_credits_noquota) ==
934 ARRAY_SIZE(osd_dto_credits_quota));
935 LASSERT(0 <= op && op < ARRAY_SIZE(osd_dto_credits_noquota));
936 #ifdef HAVE_QUOTA_SUPPORT
937 if (test_opt(osd_sb(osd_dt_dev(d)), QUOTA))
938 return osd_dto_credits_quota[op];
941 return osd_dto_credits_noquota[op];
944 static const struct dt_device_operations osd_dt_ops = {
945 .dt_root_get = osd_root_get,
946 .dt_statfs = osd_statfs,
947 .dt_trans_start = osd_trans_start,
948 .dt_trans_stop = osd_trans_stop,
949 .dt_conf_get = osd_conf_get,
952 .dt_commit_async = osd_commit_async,
953 .dt_credit_get = osd_credit_get,
954 .dt_init_capa_ctxt = osd_init_capa_ctxt,
955 .dt_init_quota_ctxt= osd_init_quota_ctxt,
958 static void osd_object_read_lock(const struct lu_env *env,
959 struct dt_object *dt, unsigned role)
961 struct osd_object *obj = osd_dt_obj(dt);
962 struct osd_thread_info *oti = osd_oti_get(env);
964 LINVRNT(osd_invariant(obj));
966 LASSERT(obj->oo_owner != env);
967 down_read_nested(&obj->oo_sem, role);
969 LASSERT(obj->oo_owner == NULL);
973 static void osd_object_write_lock(const struct lu_env *env,
974 struct dt_object *dt, unsigned role)
976 struct osd_object *obj = osd_dt_obj(dt);
977 struct osd_thread_info *oti = osd_oti_get(env);
979 LINVRNT(osd_invariant(obj));
981 LASSERT(obj->oo_owner != env);
982 down_write_nested(&obj->oo_sem, role);
984 LASSERT(obj->oo_owner == NULL);
989 static void osd_object_read_unlock(const struct lu_env *env,
990 struct dt_object *dt)
992 struct osd_object *obj = osd_dt_obj(dt);
993 struct osd_thread_info *oti = osd_oti_get(env);
995 LINVRNT(osd_invariant(obj));
997 LASSERT(oti->oti_r_locks > 0);
999 up_read(&obj->oo_sem);
1002 static void osd_object_write_unlock(const struct lu_env *env,
1003 struct dt_object *dt)
1005 struct osd_object *obj = osd_dt_obj(dt);
1006 struct osd_thread_info *oti = osd_oti_get(env);
1008 LINVRNT(osd_invariant(obj));
1010 LASSERT(obj->oo_owner == env);
1011 LASSERT(oti->oti_w_locks > 0);
1013 obj->oo_owner = NULL;
1014 up_write(&obj->oo_sem);
1017 static int capa_is_sane(const struct lu_env *env,
1018 struct osd_device *dev,
1019 struct lustre_capa *capa,
1020 struct lustre_capa_key *keys)
1022 struct osd_thread_info *oti = osd_oti_get(env);
1023 struct lustre_capa *tcapa = &oti->oti_capa;
1024 struct obd_capa *oc;
1028 oc = capa_lookup(dev->od_capa_hash, capa, 0);
1030 if (capa_is_expired(oc)) {
1031 DEBUG_CAPA(D_ERROR, capa, "expired");
1038 if (capa_is_expired_sec(capa)) {
1039 DEBUG_CAPA(D_ERROR, capa, "expired");
1043 spin_lock(&capa_lock);
1044 for (i = 0; i < 2; i++) {
1045 if (keys[i].lk_keyid == capa->lc_keyid) {
1046 oti->oti_capa_key = keys[i];
1050 spin_unlock(&capa_lock);
1053 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
1057 rc = capa_hmac(tcapa->lc_hmac, capa, oti->oti_capa_key.lk_key);
1061 if (memcmp(tcapa->lc_hmac, capa->lc_hmac, sizeof(capa->lc_hmac))) {
1062 DEBUG_CAPA(D_ERROR, capa, "HMAC mismatch");
1066 oc = capa_add(dev->od_capa_hash, capa);
1072 static int osd_object_auth(const struct lu_env *env, struct dt_object *dt,
1073 struct lustre_capa *capa, __u64 opc)
1075 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1076 struct osd_device *dev = osd_dev(dt->do_lu.lo_dev);
1077 struct md_capainfo *ci;
1080 if (!dev->od_fl_capa)
1083 if (capa == BYPASS_CAPA)
1086 ci = md_capainfo(env);
1090 if (ci->mc_auth == LC_ID_NONE)
1094 CERROR("no capability is provided for fid "DFID"\n", PFID(fid));
1098 if (!lu_fid_eq(fid, &capa->lc_fid)) {
1099 DEBUG_CAPA(D_ERROR, capa, "fid "DFID" mismatch with",
1104 if (!capa_opc_supported(capa, opc)) {
1105 DEBUG_CAPA(D_ERROR, capa, "opc "LPX64" not supported by", opc);
1109 if ((rc = capa_is_sane(env, dev, capa, dev->od_capa_keys))) {
1110 DEBUG_CAPA(D_ERROR, capa, "insane (rc %d)", rc);
1117 static int osd_attr_get(const struct lu_env *env,
1118 struct dt_object *dt,
1119 struct lu_attr *attr,
1120 struct lustre_capa *capa)
1122 struct osd_object *obj = osd_dt_obj(dt);
1124 LASSERT(dt_object_exists(dt));
1125 LINVRNT(osd_invariant(obj));
1127 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1130 spin_lock(&obj->oo_guard);
1131 osd_inode_getattr(env, obj->oo_inode, attr);
1132 spin_unlock(&obj->oo_guard);
1136 static int osd_attr_set(const struct lu_env *env,
1137 struct dt_object *dt,
1138 const struct lu_attr *attr,
1139 struct thandle *handle,
1140 struct lustre_capa *capa)
1142 struct osd_object *obj = osd_dt_obj(dt);
1145 LASSERT(handle != NULL);
1146 LASSERT(dt_object_exists(dt));
1147 LASSERT(osd_invariant(obj));
1149 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1152 spin_lock(&obj->oo_guard);
1153 rc = osd_inode_setattr(env, obj->oo_inode, attr);
1154 spin_unlock(&obj->oo_guard);
1157 mark_inode_dirty(obj->oo_inode);
1161 static struct timespec *osd_inode_time(const struct lu_env *env,
1162 struct inode *inode, __u64 seconds)
1164 struct osd_thread_info *oti = osd_oti_get(env);
1165 struct timespec *t = &oti->oti_time;
1167 t->tv_sec = seconds;
1169 *t = timespec_trunc(*t, get_sb_time_gran(inode->i_sb));
1173 static int osd_inode_setattr(const struct lu_env *env,
1174 struct inode *inode, const struct lu_attr *attr)
1178 bits = attr->la_valid;
1180 LASSERT(!(bits & LA_TYPE)); /* Huh? You want too much. */
1182 #ifdef HAVE_QUOTA_SUPPORT
1183 if ((bits & LA_UID && attr->la_uid != inode->i_uid) ||
1184 (bits & LA_GID && attr->la_gid != inode->i_gid)) {
1185 struct osd_ctxt *save = &osd_oti_get(env)->oti_ctxt;
1189 iattr.ia_valid = bits & (LA_UID | LA_GID);
1190 iattr.ia_uid = attr->la_uid;
1191 iattr.ia_gid = attr->la_gid;
1192 osd_push_ctxt(env, save);
1193 rc = DQUOT_TRANSFER(inode, &iattr) ? -EDQUOT : 0;
1200 if (bits & LA_ATIME)
1201 inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
1202 if (bits & LA_CTIME)
1203 inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
1204 if (bits & LA_MTIME)
1205 inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
1206 if (bits & LA_SIZE) {
1207 LDISKFS_I(inode)->i_disksize = attr->la_size;
1208 i_size_write(inode, attr->la_size);
1212 * OSD should not change "i_blocks" which is used by quota.
1213 * "i_blocks" should be changed by ldiskfs only.
1214 * Disable this assignment until SOM to fix some EA field. */
1215 if (bits & LA_BLOCKS)
1216 inode->i_blocks = attr->la_blocks;
1219 inode->i_mode = (inode->i_mode & S_IFMT) |
1220 (attr->la_mode & ~S_IFMT);
1222 inode->i_uid = attr->la_uid;
1224 inode->i_gid = attr->la_gid;
1225 if (bits & LA_NLINK)
1226 inode->i_nlink = attr->la_nlink;
1228 inode->i_rdev = attr->la_rdev;
1230 if (bits & LA_FLAGS) {
1231 struct ldiskfs_inode_info *li = LDISKFS_I(inode);
1233 li->i_flags = (li->i_flags & ~LDISKFS_FL_USER_MODIFIABLE) |
1234 (attr->la_flags & LDISKFS_FL_USER_MODIFIABLE);
1242 * XXX temporary solution.
1245 static int osd_create_pre(struct osd_thread_info *info, struct osd_object *obj,
1246 struct lu_attr *attr, struct thandle *th)
1251 static int osd_create_post(struct osd_thread_info *info, struct osd_object *obj,
1252 struct lu_attr *attr, struct thandle *th)
1254 LASSERT(obj->oo_inode != NULL);
1256 osd_object_init0(obj);
1260 extern struct inode *ldiskfs_create_inode(handle_t *handle,
1261 struct inode * dir, int mode);
1263 static int osd_mkfile(struct osd_thread_info *info, struct osd_object *obj,
1265 struct dt_allocation_hint *hint,
1269 struct osd_device *osd = osd_obj2dev(obj);
1270 struct osd_thandle *oth;
1271 struct inode *parent;
1272 struct inode *inode;
1273 #ifdef HAVE_QUOTA_SUPPORT
1274 struct osd_ctxt *save = &info->oti_ctxt;
1277 LINVRNT(osd_invariant(obj));
1278 LASSERT(obj->oo_inode == NULL);
1279 LASSERT(osd->od_obj_area != NULL);
1281 oth = container_of(th, struct osd_thandle, ot_super);
1282 LASSERT(oth->ot_handle->h_transaction != NULL);
1284 if (hint && hint->dah_parent)
1285 parent = osd_dt_obj(hint->dah_parent)->oo_inode;
1287 parent = osd->od_obj_area->d_inode;
1288 LASSERT(parent->i_op != NULL);
1290 #ifdef HAVE_QUOTA_SUPPORT
1291 osd_push_ctxt(info->oti_env, save);
1293 inode = ldiskfs_create_inode(oth->ot_handle, parent, mode);
1294 #ifdef HAVE_QUOTA_SUPPORT
1297 if (!IS_ERR(inode)) {
1298 obj->oo_inode = inode;
1301 result = PTR_ERR(inode);
1302 LINVRNT(osd_invariant(obj));
1307 extern int iam_lvar_create(struct inode *obj, int keysize, int ptrsize,
1308 int recsize, handle_t *handle);
1314 static int osd_mkdir(struct osd_thread_info *info, struct osd_object *obj,
1315 struct lu_attr *attr,
1316 struct dt_allocation_hint *hint,
1320 struct osd_thandle *oth;
1322 LASSERT(S_ISDIR(attr->la_mode));
1324 oth = container_of(th, struct osd_thandle, ot_super);
1325 LASSERT(oth->ot_handle->h_transaction != NULL);
1326 result = osd_mkfile(info, obj, (attr->la_mode &
1327 (S_IFMT | S_IRWXUGO | S_ISVTX)), hint, th);
1329 LASSERT(obj->oo_inode != NULL);
1331 * XXX uh-oh... call low-level iam function directly.
1333 result = iam_lvar_create(obj->oo_inode, OSD_NAME_LEN, 4,
1334 sizeof (struct lu_fid_pack),
1340 static int osd_mkreg(struct osd_thread_info *info, struct osd_object *obj,
1341 struct lu_attr *attr,
1342 struct dt_allocation_hint *hint,
1345 LASSERT(S_ISREG(attr->la_mode));
1346 return osd_mkfile(info, obj, (attr->la_mode &
1347 (S_IFMT | S_IRWXUGO | S_ISVTX)), hint, th);
1350 static int osd_mksym(struct osd_thread_info *info, struct osd_object *obj,
1351 struct lu_attr *attr,
1352 struct dt_allocation_hint *hint,
1355 LASSERT(S_ISLNK(attr->la_mode));
1356 return osd_mkfile(info, obj, (attr->la_mode &
1357 (S_IFMT | S_IRWXUGO | S_ISVTX)), hint, th);
1360 static int osd_mknod(struct osd_thread_info *info, struct osd_object *obj,
1361 struct lu_attr *attr,
1362 struct dt_allocation_hint *hint,
1366 struct osd_device *osd = osd_obj2dev(obj);
1368 umode_t mode = attr->la_mode & (S_IFMT | S_IRWXUGO | S_ISVTX);
1370 LINVRNT(osd_invariant(obj));
1371 LASSERT(obj->oo_inode == NULL);
1372 LASSERT(osd->od_obj_area != NULL);
1373 LASSERT(S_ISCHR(mode) || S_ISBLK(mode) ||
1374 S_ISFIFO(mode) || S_ISSOCK(mode));
1376 dir = osd->od_obj_area->d_inode;
1377 LASSERT(dir->i_op != NULL);
1379 result = osd_mkfile(info, obj, mode, hint, th);
1381 LASSERT(obj->oo_inode != NULL);
1382 init_special_inode(obj->oo_inode, mode, attr->la_rdev);
1384 LINVRNT(osd_invariant(obj));
1388 typedef int (*osd_obj_type_f)(struct osd_thread_info *, struct osd_object *,
1390 struct dt_allocation_hint *hint,
1393 static osd_obj_type_f osd_create_type_f(__u32 mode)
1395 osd_obj_type_f result;
1421 static void osd_ah_init(const struct lu_env *env, struct dt_allocation_hint *ah,
1422 struct dt_object *parent, umode_t child_mode)
1426 memset(ah, 0, sizeof(*ah));
1427 ah->dah_parent = parent;
1428 ah->dah_mode = child_mode;
1433 * Concurrency: @dt is write locked.
1435 static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
1436 struct lu_attr *attr,
1437 struct dt_allocation_hint *hint,
1440 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1441 struct osd_object *obj = osd_dt_obj(dt);
1442 struct osd_device *osd = osd_obj2dev(obj);
1443 struct osd_thread_info *info = osd_oti_get(env);
1448 LINVRNT(osd_invariant(obj));
1449 LASSERT(!dt_object_exists(dt));
1450 LASSERT(osd_write_locked(env, obj));
1451 LASSERT(th != NULL);
1454 * XXX missing: Quote handling.
1457 result = osd_create_pre(info, obj, attr, th);
1459 result = osd_create_type_f(attr->la_mode & S_IFMT)(info, obj,
1462 result = osd_create_post(info, obj, attr, th);
1465 struct osd_inode_id *id = &info->oti_id;
1466 struct md_ucred *uc = md_ucred(env);
1468 LASSERT(obj->oo_inode != NULL);
1469 LASSERT(uc != NULL);
1471 id->oii_ino = obj->oo_inode->i_ino;
1472 id->oii_gen = obj->oo_inode->i_generation;
1474 result = osd_oi_insert(info, &osd->od_oi, fid, id, th,
1475 uc->mu_cap & CFS_CAP_SYS_RESOURCE_MASK);
1478 LASSERT(ergo(result == 0, dt_object_exists(dt)));
1479 LINVRNT(osd_invariant(obj));
1484 * Concurrency: @dt is write locked.
1486 static void osd_object_ref_add(const struct lu_env *env,
1487 struct dt_object *dt,
1490 struct osd_object *obj = osd_dt_obj(dt);
1491 struct inode *inode = obj->oo_inode;
1493 LINVRNT(osd_invariant(obj));
1494 LASSERT(dt_object_exists(dt));
1495 LASSERT(osd_write_locked(env, obj));
1496 LASSERT(th != NULL);
1498 spin_lock(&obj->oo_guard);
1499 LASSERT(inode->i_nlink < LDISKFS_LINK_MAX);
1501 spin_unlock(&obj->oo_guard);
1502 mark_inode_dirty(inode);
1503 LINVRNT(osd_invariant(obj));
1507 * Concurrency: @dt is write locked.
1509 static void osd_object_ref_del(const struct lu_env *env,
1510 struct dt_object *dt,
1513 struct osd_object *obj = osd_dt_obj(dt);
1514 struct inode *inode = obj->oo_inode;
1516 LINVRNT(osd_invariant(obj));
1517 LASSERT(dt_object_exists(dt));
1518 LASSERT(osd_write_locked(env, obj));
1519 LASSERT(th != NULL);
1521 spin_lock(&obj->oo_guard);
1522 LASSERT(inode->i_nlink > 0);
1524 spin_unlock(&obj->oo_guard);
1525 mark_inode_dirty(inode);
1526 LINVRNT(osd_invariant(obj));
1530 * Concurrency: @dt is read locked.
1532 static int osd_xattr_get(const struct lu_env *env,
1533 struct dt_object *dt,
1536 struct lustre_capa *capa)
1538 struct osd_object *obj = osd_dt_obj(dt);
1539 struct inode *inode = obj->oo_inode;
1540 struct osd_thread_info *info = osd_oti_get(env);
1541 struct dentry *dentry = &info->oti_dentry;
1543 LASSERT(dt_object_exists(dt));
1544 LASSERT(inode->i_op != NULL && inode->i_op->getxattr != NULL);
1545 LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
1547 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1550 dentry->d_inode = inode;
1551 return inode->i_op->getxattr(dentry, name, buf->lb_buf, buf->lb_len);
1555 * Concurrency: @dt is write locked.
1557 static int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
1558 const struct lu_buf *buf, const char *name, int fl,
1559 struct thandle *handle, struct lustre_capa *capa)
1561 struct osd_object *obj = osd_dt_obj(dt);
1562 struct inode *inode = obj->oo_inode;
1563 struct osd_thread_info *info = osd_oti_get(env);
1564 struct dentry *dentry = &info->oti_dentry;
1565 struct timespec *t = &info->oti_time;
1566 int fs_flags = 0, rc;
1568 LASSERT(dt_object_exists(dt));
1569 LASSERT(inode->i_op != NULL && inode->i_op->setxattr != NULL);
1570 LASSERT(osd_write_locked(env, obj));
1571 LASSERT(handle != NULL);
1573 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1576 if (fl & LU_XATTR_REPLACE)
1577 fs_flags |= XATTR_REPLACE;
1579 if (fl & LU_XATTR_CREATE)
1580 fs_flags |= XATTR_CREATE;
1582 dentry->d_inode = inode;
1583 *t = inode->i_ctime;
1584 rc = inode->i_op->setxattr(dentry, name,
1585 buf->lb_buf, buf->lb_len, fs_flags);
1586 if (likely(rc == 0)) {
1587 /* ctime should not be updated with server-side time. */
1588 spin_lock(&obj->oo_guard);
1589 inode->i_ctime = *t;
1590 spin_unlock(&obj->oo_guard);
1591 mark_inode_dirty(inode);
1597 * Concurrency: @dt is read locked.
1599 static int osd_xattr_list(const struct lu_env *env,
1600 struct dt_object *dt,
1602 struct lustre_capa *capa)
1604 struct osd_object *obj = osd_dt_obj(dt);
1605 struct inode *inode = obj->oo_inode;
1606 struct osd_thread_info *info = osd_oti_get(env);
1607 struct dentry *dentry = &info->oti_dentry;
1609 LASSERT(dt_object_exists(dt));
1610 LASSERT(inode->i_op != NULL && inode->i_op->listxattr != NULL);
1611 LASSERT(osd_read_locked(env, obj) || osd_write_locked(env, obj));
1613 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
1616 dentry->d_inode = inode;
1617 return inode->i_op->listxattr(dentry, buf->lb_buf, buf->lb_len);
1621 * Concurrency: @dt is write locked.
1623 static int osd_xattr_del(const struct lu_env *env,
1624 struct dt_object *dt,
1626 struct thandle *handle,
1627 struct lustre_capa *capa)
1629 struct osd_object *obj = osd_dt_obj(dt);
1630 struct inode *inode = obj->oo_inode;
1631 struct osd_thread_info *info = osd_oti_get(env);
1632 struct dentry *dentry = &info->oti_dentry;
1633 struct timespec *t = &info->oti_time;
1636 LASSERT(dt_object_exists(dt));
1637 LASSERT(inode->i_op != NULL && inode->i_op->removexattr != NULL);
1638 LASSERT(osd_write_locked(env, obj));
1639 LASSERT(handle != NULL);
1641 if (osd_object_auth(env, dt, capa, CAPA_OPC_META_WRITE))
1644 dentry->d_inode = inode;
1645 *t = inode->i_ctime;
1646 rc = inode->i_op->removexattr(dentry, name);
1647 if (likely(rc == 0)) {
1648 /* ctime should not be updated with server-side time. */
1649 spin_lock(&obj->oo_guard);
1650 inode->i_ctime = *t;
1651 spin_unlock(&obj->oo_guard);
1652 mark_inode_dirty(inode);
1657 static struct obd_capa *osd_capa_get(const struct lu_env *env,
1658 struct dt_object *dt,
1659 struct lustre_capa *old,
1662 struct osd_thread_info *info = osd_oti_get(env);
1663 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1664 struct osd_object *obj = osd_dt_obj(dt);
1665 struct osd_device *dev = osd_obj2dev(obj);
1666 struct lustre_capa_key *key = &info->oti_capa_key;
1667 struct lustre_capa *capa = &info->oti_capa;
1668 struct obd_capa *oc;
1669 struct md_capainfo *ci;
1673 if (!dev->od_fl_capa)
1674 RETURN(ERR_PTR(-ENOENT));
1676 LASSERT(dt_object_exists(dt));
1677 LINVRNT(osd_invariant(obj));
1679 /* renewal sanity check */
1680 if (old && osd_object_auth(env, dt, old, opc))
1681 RETURN(ERR_PTR(-EACCES));
1683 ci = md_capainfo(env);
1685 RETURN(ERR_PTR(-ENOENT));
1687 switch (ci->mc_auth) {
1691 capa->lc_uid = obj->oo_inode->i_uid;
1692 capa->lc_gid = obj->oo_inode->i_gid;
1693 capa->lc_flags = LC_ID_PLAIN;
1695 case LC_ID_CONVERT: {
1698 s[0] = obj->oo_inode->i_uid;
1699 get_random_bytes(&(s[1]), sizeof(__u32));
1700 s[2] = obj->oo_inode->i_gid;
1701 get_random_bytes(&(s[3]), sizeof(__u32));
1702 rc = capa_encrypt_id(d, s, key->lk_key, CAPA_HMAC_KEY_MAX_LEN);
1704 RETURN(ERR_PTR(rc));
1706 capa->lc_uid = ((__u64)d[1] << 32) | d[0];
1707 capa->lc_gid = ((__u64)d[3] << 32) | d[2];
1708 capa->lc_flags = LC_ID_CONVERT;
1712 RETURN(ERR_PTR(-EINVAL));
1715 capa->lc_fid = *fid;
1717 capa->lc_flags |= dev->od_capa_alg << 24;
1718 capa->lc_timeout = dev->od_capa_timeout;
1719 capa->lc_expiry = 0;
1721 oc = capa_lookup(dev->od_capa_hash, capa, 1);
1723 LASSERT(!capa_is_expired(oc));
1727 spin_lock(&capa_lock);
1728 *key = dev->od_capa_keys[1];
1729 spin_unlock(&capa_lock);
1731 capa->lc_keyid = key->lk_keyid;
1732 capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
1734 rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
1736 DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
1737 RETURN(ERR_PTR(rc));
1740 oc = capa_add(dev->od_capa_hash, capa);
1744 static int osd_object_sync(const struct lu_env *env, struct dt_object *dt)
1747 struct osd_object *obj = osd_dt_obj(dt);
1748 struct inode *inode = obj->oo_inode;
1749 struct osd_thread_info *info = osd_oti_get(env);
1750 struct dentry *dentry = &info->oti_dentry;
1751 struct file *file = &info->oti_file;
1754 dentry->d_inode = inode;
1755 file->f_dentry = dentry;
1756 file->f_mapping = inode->i_mapping;
1757 file->f_op = inode->i_fop;
1758 LOCK_INODE_MUTEX(inode);
1759 rc = file->f_op->fsync(file, dentry, 0);
1760 UNLOCK_INODE_MUTEX(inode);
1764 static const struct dt_object_operations osd_obj_ops = {
1765 .do_read_lock = osd_object_read_lock,
1766 .do_write_lock = osd_object_write_lock,
1767 .do_read_unlock = osd_object_read_unlock,
1768 .do_write_unlock = osd_object_write_unlock,
1769 .do_attr_get = osd_attr_get,
1770 .do_attr_set = osd_attr_set,
1771 .do_ah_init = osd_ah_init,
1772 .do_create = osd_object_create,
1773 .do_index_try = osd_index_try,
1774 .do_ref_add = osd_object_ref_add,
1775 .do_ref_del = osd_object_ref_del,
1776 .do_xattr_get = osd_xattr_get,
1777 .do_xattr_set = osd_xattr_set,
1778 .do_xattr_del = osd_xattr_del,
1779 .do_xattr_list = osd_xattr_list,
1780 .do_capa_get = osd_capa_get,
1781 .do_object_sync = osd_object_sync,
1789 * XXX: Another layering violation for now.
1791 * We don't want to use ->f_op->read methods, because generic file write
1793 * - serializes on ->i_sem, and
1795 * - does a lot of extra work like balance_dirty_pages(),
1797 * which doesn't work for globally shared files like /last-received.
1799 int fsfilt_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs);
1800 int fsfilt_ldiskfs_write_handle(struct inode *inode, void *buf, int bufsize,
1801 loff_t *offs, handle_t *handle);
1803 static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
1804 struct lu_buf *buf, loff_t *pos,
1805 struct lustre_capa *capa)
1807 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1809 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_READ))
1812 return fsfilt_ldiskfs_read(inode, buf->lb_buf, buf->lb_len, pos);
1815 static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
1816 const struct lu_buf *buf, loff_t *pos,
1817 struct thandle *handle, struct lustre_capa *capa,
1820 struct inode *inode = osd_dt_obj(dt)->oo_inode;
1821 struct osd_thandle *oh;
1823 #ifdef HAVE_QUOTA_SUPPORT
1824 cfs_cap_t save = current->cap_effective;
1827 LASSERT(handle != NULL);
1829 if (osd_object_auth(env, dt, capa, CAPA_OPC_BODY_WRITE))
1832 oh = container_of(handle, struct osd_thandle, ot_super);
1833 LASSERT(oh->ot_handle->h_transaction != NULL);
1834 #ifdef HAVE_QUOTA_SUPPORT
1836 current->cap_effective |= CFS_CAP_SYS_RESOURCE_MASK;
1838 current->cap_effective &= ~CFS_CAP_SYS_RESOURCE_MASK;
1840 result = fsfilt_ldiskfs_write_handle(inode, buf->lb_buf, buf->lb_len,
1841 pos, oh->ot_handle);
1842 #ifdef HAVE_QUOTA_SUPPORT
1843 current->cap_effective = save;
1846 result = buf->lb_len;
1850 static const struct dt_body_operations osd_body_ops = {
1851 .dbo_read = osd_read,
1852 .dbo_write = osd_write
1859 static int osd_object_is_root(const struct osd_object *obj)
1861 return osd_sb(osd_obj2dev(obj))->s_root->d_inode == obj->oo_inode;
1864 static int osd_index_probe(const struct lu_env *env, struct osd_object *o,
1865 const struct dt_index_features *feat)
1867 struct iam_descr *descr;
1869 if (osd_object_is_root(o))
1870 return feat == &dt_directory_features;
1872 LASSERT(o->oo_dir != NULL);
1874 descr = o->oo_dir->od_container.ic_descr;
1875 if (feat == &dt_directory_features)
1876 return descr == &iam_htree_compat_param ||
1877 (descr->id_rec_size == sizeof(struct lu_fid_pack) &&
1879 * XXX check that index looks like directory.
1884 feat->dif_keysize_min <= descr->id_key_size &&
1885 descr->id_key_size <= feat->dif_keysize_max &&
1886 feat->dif_recsize_min <= descr->id_rec_size &&
1887 descr->id_rec_size <= feat->dif_recsize_max &&
1888 !(feat->dif_flags & (DT_IND_VARKEY |
1889 DT_IND_VARREC | DT_IND_NONUNQ)) &&
1890 ergo(feat->dif_flags & DT_IND_UPDATE,
1891 1 /* XXX check that object (and file system) is
1895 static int osd_container_init(const struct lu_env *env,
1896 struct osd_object *obj,
1897 struct osd_directory *dir)
1900 struct iam_container *bag;
1902 bag = &dir->od_container;
1903 result = iam_container_init(bag, &dir->od_descr, obj->oo_inode);
1905 result = iam_container_setup(bag);
1907 obj->oo_dt.do_index_ops = &osd_index_ops;
1909 iam_container_fini(bag);
1915 * Concurrency: no external locking is necessary.
1917 static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
1918 const struct dt_index_features *feat)
1921 struct osd_object *obj = osd_dt_obj(dt);
1923 LINVRNT(osd_invariant(obj));
1924 LASSERT(dt_object_exists(dt));
1926 if (osd_object_is_root(obj)) {
1927 dt->do_index_ops = &osd_index_compat_ops;
1929 } else if (!osd_has_index(obj)) {
1930 struct osd_directory *dir;
1934 sema_init(&dir->od_sem, 1);
1936 spin_lock(&obj->oo_guard);
1937 if (obj->oo_dir == NULL)
1941 * Concurrent thread allocated container data.
1944 spin_unlock(&obj->oo_guard);
1946 * Now, that we have container data, serialize its
1949 down(&obj->oo_dir->od_sem);
1951 * recheck under lock.
1953 if (!osd_has_index(obj))
1954 result = osd_container_init(env, obj, dir);
1957 up(&obj->oo_dir->od_sem);
1964 if (!osd_index_probe(env, obj, feat))
1967 LINVRNT(osd_invariant(obj));
1972 static int osd_index_delete(const struct lu_env *env, struct dt_object *dt,
1973 const struct dt_key *key, struct thandle *handle,
1974 struct lustre_capa *capa)
1976 struct osd_object *obj = osd_dt_obj(dt);
1977 struct osd_thandle *oh;
1978 struct iam_path_descr *ipd;
1979 struct iam_container *bag = &obj->oo_dir->od_container;
1984 LINVRNT(osd_invariant(obj));
1985 LASSERT(dt_object_exists(dt));
1986 LASSERT(bag->ic_object == obj->oo_inode);
1987 LASSERT(handle != NULL);
1989 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
1992 ipd = osd_ipd_get(env, bag);
1993 if (unlikely(ipd == NULL))
1996 oh = container_of0(handle, struct osd_thandle, ot_super);
1997 LASSERT(oh->ot_handle != NULL);
1998 LASSERT(oh->ot_handle->h_transaction != NULL);
2000 rc = iam_delete(oh->ot_handle, bag, (const struct iam_key *)key, ipd);
2001 osd_ipd_put(env, bag, ipd);
2002 LINVRNT(osd_invariant(obj));
2006 static int osd_index_lookup(const struct lu_env *env, struct dt_object *dt,
2007 struct dt_rec *rec, const struct dt_key *key,
2008 struct lustre_capa *capa)
2010 struct osd_object *obj = osd_dt_obj(dt);
2011 struct iam_path_descr *ipd;
2012 struct iam_container *bag = &obj->oo_dir->od_container;
2017 LINVRNT(osd_invariant(obj));
2018 LASSERT(dt_object_exists(dt));
2019 LASSERT(bag->ic_object == obj->oo_inode);
2021 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
2024 ipd = osd_ipd_get(env, bag);
2025 if (unlikely(ipd == NULL))
2028 rc = iam_lookup(bag, (const struct iam_key *)key,
2029 (struct iam_rec *)rec, ipd);
2030 osd_ipd_put(env, bag, ipd);
2031 LINVRNT(osd_invariant(obj));
2036 static int osd_index_insert(const struct lu_env *env, struct dt_object *dt,
2037 const struct dt_rec *rec, const struct dt_key *key,
2038 struct thandle *th, struct lustre_capa *capa,
2041 struct osd_object *obj = osd_dt_obj(dt);
2042 struct iam_path_descr *ipd;
2043 struct osd_thandle *oh;
2044 struct iam_container *bag = &obj->oo_dir->od_container;
2045 #ifdef HAVE_QUOTA_SUPPORT
2046 cfs_cap_t save = current->cap_effective;
2052 LINVRNT(osd_invariant(obj));
2053 LASSERT(dt_object_exists(dt));
2054 LASSERT(bag->ic_object == obj->oo_inode);
2055 LASSERT(th != NULL);
2057 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
2060 ipd = osd_ipd_get(env, bag);
2061 if (unlikely(ipd == NULL))
2064 oh = container_of0(th, struct osd_thandle, ot_super);
2065 LASSERT(oh->ot_handle != NULL);
2066 LASSERT(oh->ot_handle->h_transaction != NULL);
2067 #ifdef HAVE_QUOTA_SUPPORT
2069 current->cap_effective |= CFS_CAP_SYS_RESOURCE_MASK;
2071 current->cap_effective &= ~CFS_CAP_SYS_RESOURCE_MASK;
2073 rc = iam_insert(oh->ot_handle, bag, (const struct iam_key *)key,
2074 (struct iam_rec *)rec, ipd);
2075 #ifdef HAVE_QUOTA_SUPPORT
2076 current->cap_effective = save;
2078 osd_ipd_put(env, bag, ipd);
2079 LINVRNT(osd_invariant(obj));
2084 * Iterator operations.
2087 struct osd_object *oi_obj;
2088 struct iam_path_descr *oi_ipd;
2089 struct iam_iterator oi_it;
2092 static struct dt_it *osd_it_init(const struct lu_env *env,
2093 struct dt_object *dt, int writable,
2094 struct lustre_capa *capa)
2097 struct osd_object *obj = osd_dt_obj(dt);
2098 struct lu_object *lo = &dt->do_lu;
2099 struct iam_path_descr *ipd;
2100 struct iam_container *bag = &obj->oo_dir->od_container;
2103 LASSERT(lu_object_exists(lo));
2105 if (osd_object_auth(env, dt, capa, writable ? CAPA_OPC_BODY_WRITE :
2106 CAPA_OPC_BODY_READ))
2107 return ERR_PTR(-EACCES);
2109 flags = writable ? IAM_IT_MOVE|IAM_IT_WRITE : IAM_IT_MOVE;
2113 * XXX: as ipd is allocated within osd_thread_info, assignment
2114 * below implies that iterator usage is confined within single
2117 ipd = osd_ipd_get(env, bag);
2118 if (likely(ipd != NULL)) {
2122 iam_it_init(&it->oi_it, bag, flags, ipd);
2123 return (struct dt_it *)it;
2127 return ERR_PTR(-ENOMEM);
2130 static void osd_it_fini(const struct lu_env *env, struct dt_it *di)
2132 struct osd_it *it = (struct osd_it *)di;
2133 struct osd_object *obj = it->oi_obj;
2135 iam_it_fini(&it->oi_it);
2136 osd_ipd_put(env, &obj->oo_dir->od_container, it->oi_ipd);
2137 lu_object_put(env, &obj->oo_dt.do_lu);
2141 static int osd_it_get(const struct lu_env *env,
2142 struct dt_it *di, const struct dt_key *key)
2144 struct osd_it *it = (struct osd_it *)di;
2146 return iam_it_get(&it->oi_it, (const struct iam_key *)key);
2149 static void osd_it_put(const struct lu_env *env, struct dt_it *di)
2151 struct osd_it *it = (struct osd_it *)di;
2153 iam_it_put(&it->oi_it);
2156 static int osd_it_next(const struct lu_env *env, struct dt_it *di)
2158 struct osd_it *it = (struct osd_it *)di;
2160 return iam_it_next(&it->oi_it);
2163 static int osd_it_del(const struct lu_env *env, struct dt_it *di,
2166 struct osd_it *it = (struct osd_it *)di;
2167 struct osd_thandle *oh;
2169 LASSERT(th != NULL);
2171 oh = container_of0(th, struct osd_thandle, ot_super);
2172 LASSERT(oh->ot_handle != NULL);
2173 LASSERT(oh->ot_handle->h_transaction != NULL);
2175 return iam_it_rec_delete(oh->ot_handle, &it->oi_it);
2178 static struct dt_key *osd_it_key(const struct lu_env *env,
2179 const struct dt_it *di)
2181 struct osd_it *it = (struct osd_it *)di;
2183 return (struct dt_key *)iam_it_key_get(&it->oi_it);
2186 static int osd_it_key_size(const struct lu_env *env, const struct dt_it *di)
2188 struct osd_it *it = (struct osd_it *)di;
2190 return iam_it_key_size(&it->oi_it);
2193 static struct dt_rec *osd_it_rec(const struct lu_env *env,
2194 const struct dt_it *di)
2196 struct osd_it *it = (struct osd_it *)di;
2198 return (struct dt_rec *)iam_it_rec_get(&it->oi_it);
2201 static __u64 osd_it_store(const struct lu_env *env, const struct dt_it *di)
2203 struct osd_it *it = (struct osd_it *)di;
2205 return iam_it_store(&it->oi_it);
2208 static int osd_it_load(const struct lu_env *env,
2209 const struct dt_it *di, __u64 hash)
2211 struct osd_it *it = (struct osd_it *)di;
2213 return iam_it_load(&it->oi_it, hash);
2216 static const struct dt_index_operations osd_index_ops = {
2217 .dio_lookup = osd_index_lookup,
2218 .dio_insert = osd_index_insert,
2219 .dio_delete = osd_index_delete,
2221 .init = osd_it_init,
2222 .fini = osd_it_fini,
2226 .next = osd_it_next,
2228 .key_size = osd_it_key_size,
2230 .store = osd_it_store,
2235 static int osd_index_compat_delete(const struct lu_env *env,
2236 struct dt_object *dt,
2237 const struct dt_key *key,
2238 struct thandle *handle,
2239 struct lustre_capa *capa)
2241 struct osd_object *obj = osd_dt_obj(dt);
2243 LASSERT(handle != NULL);
2244 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2248 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_DELETE))
2252 RETURN(-EOPNOTSUPP);
2256 * Compatibility index operations.
2260 static void osd_build_pack(const struct lu_env *env, struct osd_device *osd,
2261 struct dentry *dentry, struct lu_fid_pack *pack)
2263 struct inode *inode = dentry->d_inode;
2264 struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
2266 lu_igif_build(fid, inode->i_ino, inode->i_generation);
2267 fid_cpu_to_be(fid, fid);
2268 pack->fp_len = sizeof *fid + 1;
2269 memcpy(pack->fp_area, fid, sizeof *fid);
2272 static int osd_index_compat_lookup(const struct lu_env *env,
2273 struct dt_object *dt,
2274 struct dt_rec *rec, const struct dt_key *key,
2275 struct lustre_capa *capa)
2277 struct osd_object *obj = osd_dt_obj(dt);
2279 struct osd_device *osd = osd_obj2dev(obj);
2280 struct osd_thread_info *info = osd_oti_get(env);
2286 * XXX temporary solution.
2288 struct dentry *dentry;
2289 struct dentry *parent;
2291 LINVRNT(osd_invariant(obj));
2292 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2293 LASSERT(osd_has_index(obj));
2295 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_LOOKUP))
2298 info->oti_str.name = (const char *)key;
2299 info->oti_str.len = strlen((const char *)key);
2301 dir = obj->oo_inode;
2302 LASSERT(dir->i_op != NULL && dir->i_op->lookup != NULL);
2304 parent = d_alloc_root(dir);
2308 dentry = d_alloc(parent, &info->oti_str);
2309 if (dentry != NULL) {
2313 * XXX passing NULL for nameidata should work for
2316 d = dir->i_op->lookup(dir, dentry, NULL);
2319 * normal case, result is in @dentry.
2321 if (dentry->d_inode != NULL) {
2322 osd_build_pack(env, osd, dentry,
2323 (struct lu_fid_pack *)rec);
2328 /* What? Disconnected alias? Ppheeeww... */
2329 CERROR("Aliasing where not expected\n");
2337 LINVRNT(osd_invariant(obj));
2341 static int osd_add_rec(struct osd_thread_info *info, struct osd_device *dev,
2342 struct inode *dir, struct inode *inode, const char *name)
2346 struct dentry *parent;
2350 info->oti_str.name = name;
2351 info->oti_str.len = strlen(name);
2353 LASSERT(atomic_read(&dir->i_count) > 0);
2355 old = d_alloc(dev->od_obj_area, &info->oti_str);
2357 d_instantiate(old, inode);
2359 LASSERT(atomic_read(&dir->i_count) > 0);
2360 parent = d_alloc_root(dir);
2361 if (parent != NULL) {
2363 LASSERT(atomic_read(&dir->i_count) > 1);
2364 new = d_alloc(parent, &info->oti_str);
2365 LASSERT(atomic_read(&dir->i_count) > 1);
2367 LASSERT(atomic_read(&dir->i_count) > 1);
2368 result = dir->i_op->link(old, dir, new);
2369 LASSERT(atomic_read(&dir->i_count) > 1);
2371 LASSERT(atomic_read(&dir->i_count) > 1);
2373 LASSERT(atomic_read(&dir->i_count) > 1);
2375 LASSERT(atomic_read(&dir->i_count) > 0);
2379 LASSERT(atomic_read(&dir->i_count) > 0);
2385 * XXX Temporary stuff.
2387 static int osd_index_compat_insert(const struct lu_env *env,
2388 struct dt_object *dt,
2389 const struct dt_rec *rec,
2390 const struct dt_key *key, struct thandle *th,
2391 struct lustre_capa *capa,
2394 struct osd_object *obj = osd_dt_obj(dt);
2396 const char *name = (const char *)key;
2398 struct lu_device *ludev = dt->do_lu.lo_dev;
2399 struct lu_object *luch;
2401 struct osd_thread_info *info = osd_oti_get(env);
2402 const struct lu_fid_pack *pack = (const struct lu_fid_pack *)rec;
2403 struct lu_fid *fid = &osd_oti_get(env)->oti_fid;
2407 LASSERT(S_ISDIR(obj->oo_inode->i_mode));
2408 LINVRNT(osd_invariant(obj));
2409 LASSERT(th != NULL);
2411 if (osd_object_auth(env, dt, capa, CAPA_OPC_INDEX_INSERT))
2414 result = fid_unpack(pack, fid);
2418 luch = lu_object_find(env, ludev, fid, NULL);
2419 if (!IS_ERR(luch)) {
2420 if (lu_object_exists(luch)) {
2421 struct osd_object *child;
2423 child = osd_obj(lu_object_locate(luch->lo_header,
2426 result = osd_add_rec(info, osd_obj2dev(obj),
2428 child->oo_inode, name);
2430 CERROR("No osd slice.\n");
2433 LINVRNT(osd_invariant(obj));
2434 LINVRNT(osd_invariant(child));
2439 lu_object_put(env, luch);
2441 result = PTR_ERR(luch);
2442 LINVRNT(osd_invariant(obj));
2446 static const struct dt_index_operations osd_index_compat_ops = {
2447 .dio_lookup = osd_index_compat_lookup,
2448 .dio_insert = osd_index_compat_insert,
2449 .dio_delete = osd_index_compat_delete
2452 /* type constructor/destructor: osd_type_init, osd_type_fini */
2453 LU_TYPE_INIT_FINI(osd, &osd_key);
2455 static struct lu_context_key osd_key = {
2456 .lct_tags = LCT_DT_THREAD | LCT_MD_THREAD,
2457 .lct_init = osd_key_init,
2458 .lct_fini = osd_key_fini,
2459 .lct_exit = osd_key_exit
2462 static void *osd_key_init(const struct lu_context *ctx,
2463 struct lu_context_key *key)
2465 struct osd_thread_info *info;
2467 OBD_ALLOC_PTR(info);
2469 info->oti_env = container_of(ctx, struct lu_env, le_ctx);
2471 info = ERR_PTR(-ENOMEM);
2475 /* context key destructor: osd_key_fini */
2476 LU_KEY_FINI(osd, struct osd_thread_info);
2478 static void osd_key_exit(const struct lu_context *ctx,
2479 struct lu_context_key *key, void *data)
2481 struct osd_thread_info *info = data;
2483 LASSERT(info->oti_r_locks == 0);
2484 LASSERT(info->oti_w_locks == 0);
2485 LASSERT(info->oti_txns == 0);
2488 static int osd_device_init(const struct lu_env *env, struct lu_device *d,
2489 const char *name, struct lu_device *next)
2492 struct lu_context *ctx;
2494 /* context for commit hooks */
2495 ctx = &osd_dev(d)->od_env_for_commit.le_ctx;
2496 rc = lu_context_init(ctx, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF);
2498 rc = osd_procfs_init(osd_dev(d), name);
2499 ctx->lc_cookie = 0x3;
2504 static int osd_shutdown(const struct lu_env *env, struct osd_device *o)
2506 struct osd_thread_info *info = osd_oti_get(env);
2508 if (o->od_obj_area != NULL) {
2509 dput(o->od_obj_area);
2510 o->od_obj_area = NULL;
2512 osd_oi_fini(info, &o->od_oi);
2517 static int osd_mount(const struct lu_env *env,
2518 struct osd_device *o, struct lustre_cfg *cfg)
2520 struct lustre_mount_info *lmi;
2521 const char *dev = lustre_cfg_string(cfg, 0);
2522 struct osd_thread_info *info = osd_oti_get(env);
2527 if (o->od_mount != NULL) {
2528 CERROR("Already mounted (%s)\n", dev);
2533 lmi = server_get_mount(dev);
2535 CERROR("Cannot get mount info for %s!\n", dev);
2539 LASSERT(lmi != NULL);
2540 /* save lustre_mount_info in dt_device */
2543 result = osd_oi_init(info, &o->od_oi, &o->od_dt_dev);
2547 d = simple_mkdir(osd_sb(o)->s_root, lmi->lmi_mnt, "*OBJ-TEMP*",
2552 result = PTR_ERR(d);
2555 osd_shutdown(env, o);
2559 static struct lu_device *osd_device_fini(const struct lu_env *env,
2560 struct lu_device *d)
2565 shrink_dcache_sb(osd_sb(osd_dev(d)));
2566 osd_sync(env, lu2dt_dev(d));
2568 rc = osd_procfs_fini(osd_dev(d));
2570 CERROR("proc fini error %d \n", rc);
2571 RETURN (ERR_PTR(rc));
2574 if (osd_dev(d)->od_mount)
2575 server_put_mount(osd_dev(d)->od_mount->lmi_name,
2576 osd_dev(d)->od_mount->lmi_mnt);
2577 osd_dev(d)->od_mount = NULL;
2579 lu_context_fini(&osd_dev(d)->od_env_for_commit.le_ctx);
2583 static struct lu_device *osd_device_alloc(const struct lu_env *env,
2584 struct lu_device_type *t,
2585 struct lustre_cfg *cfg)
2587 struct lu_device *l;
2588 struct osd_device *o;
2594 result = dt_device_init(&o->od_dt_dev, t);
2597 l->ld_ops = &osd_lu_ops;
2598 o->od_dt_dev.dd_ops = &osd_dt_ops;
2599 spin_lock_init(&o->od_osfs_lock);
2600 o->od_osfs_age = cfs_time_shift_64(-1000);
2601 o->od_capa_hash = init_capa_hash();
2602 if (o->od_capa_hash == NULL) {
2603 dt_device_fini(&o->od_dt_dev);
2604 l = ERR_PTR(-ENOMEM);
2607 l = ERR_PTR(result);
2612 l = ERR_PTR(-ENOMEM);
2616 static struct lu_device *osd_device_free(const struct lu_env *env,
2617 struct lu_device *d)
2619 struct osd_device *o = osd_dev(d);
2622 cleanup_capa_hash(o->od_capa_hash);
2623 dt_device_fini(&o->od_dt_dev);
2628 static int osd_process_config(const struct lu_env *env,
2629 struct lu_device *d, struct lustre_cfg *cfg)
2631 struct osd_device *o = osd_dev(d);
2635 switch(cfg->lcfg_command) {
2637 err = osd_mount(env, o, cfg);
2640 err = osd_shutdown(env, o);
2648 extern void ldiskfs_orphan_cleanup (struct super_block * sb,
2649 struct ldiskfs_super_block * es);
2651 static int osd_recovery_complete(const struct lu_env *env,
2652 struct lu_device *d)
2654 struct osd_device *o = osd_dev(d);
2656 /* TODO: orphans handling */
2657 ldiskfs_orphan_cleanup(osd_sb(o), LDISKFS_SB(osd_sb(o))->s_es);
2661 static struct inode *osd_iget(struct osd_thread_info *info,
2662 struct osd_device *dev,
2663 const struct osd_inode_id *id)
2665 struct inode *inode;
2667 inode = iget(osd_sb(dev), id->oii_ino);
2668 if (inode == NULL) {
2669 CERROR("no inode\n");
2670 inode = ERR_PTR(-EACCES);
2671 } else if (is_bad_inode(inode)) {
2672 CERROR("bad inode\n");
2674 inode = ERR_PTR(-ENOENT);
2675 } else if (inode->i_generation != id->oii_gen) {
2676 CERROR("stale inode\n");
2678 inode = ERR_PTR(-ESTALE);
2685 static int osd_fid_lookup(const struct lu_env *env,
2686 struct osd_object *obj, const struct lu_fid *fid)
2688 struct osd_thread_info *info;
2689 struct lu_device *ldev = obj->oo_dt.do_lu.lo_dev;
2690 struct osd_device *dev;
2691 struct osd_inode_id *id;
2693 struct inode *inode;
2696 LINVRNT(osd_invariant(obj));
2697 LASSERT(obj->oo_inode == NULL);
2698 LASSERT(fid_is_sane(fid));
2700 * This assertion checks that osd layer sees only local
2701 * fids. Unfortunately it is somewhat expensive (does a
2702 * cache-lookup). Disabling it for production/acceptance-testing.
2704 LASSERT(1 || fid_is_local(ldev->ld_site, fid));
2708 info = osd_oti_get(env);
2709 dev = osd_dev(ldev);
2713 if (OBD_FAIL_CHECK(OBD_FAIL_OST_ENOENT))
2716 result = osd_oi_lookup(info, oi, fid, id);
2718 inode = osd_iget(info, dev, id);
2719 if (!IS_ERR(inode)) {
2720 obj->oo_inode = inode;
2721 LASSERT(obj->oo_inode->i_sb == osd_sb(dev));
2725 * If fid wasn't found in oi, inode-less object is
2726 * created, for which lu_object_exists() returns
2727 * false. This is used in a (frequent) case when
2728 * objects are created as locking anchors or
2729 * place holders for objects yet to be created.
2731 result = PTR_ERR(inode);
2732 } else if (result == -ENOENT)
2734 LINVRNT(osd_invariant(obj));
2738 static void osd_inode_getattr(const struct lu_env *env,
2739 struct inode *inode, struct lu_attr *attr)
2741 attr->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE |
2742 LA_SIZE | LA_BLOCKS | LA_UID | LA_GID |
2743 LA_FLAGS | LA_NLINK | LA_RDEV | LA_BLKSIZE;
2745 attr->la_atime = LTIME_S(inode->i_atime);
2746 attr->la_mtime = LTIME_S(inode->i_mtime);
2747 attr->la_ctime = LTIME_S(inode->i_ctime);
2748 attr->la_mode = inode->i_mode;
2749 attr->la_size = i_size_read(inode);
2750 attr->la_blocks = inode->i_blocks;
2751 attr->la_uid = inode->i_uid;
2752 attr->la_gid = inode->i_gid;
2753 attr->la_flags = LDISKFS_I(inode)->i_flags;
2754 attr->la_nlink = inode->i_nlink;
2755 attr->la_rdev = inode->i_rdev;
2756 attr->la_blksize = ll_inode_blksize(inode);
2757 attr->la_blkbits = inode->i_blkbits;
2764 static int lu_device_is_osd(const struct lu_device *d)
2766 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &osd_lu_ops);
2769 static struct osd_object *osd_obj(const struct lu_object *o)
2771 LASSERT(lu_device_is_osd(o->lo_dev));
2772 return container_of0(o, struct osd_object, oo_dt.do_lu);
2775 static struct osd_device *osd_dt_dev(const struct dt_device *d)
2777 LASSERT(lu_device_is_osd(&d->dd_lu_dev));
2778 return container_of0(d, struct osd_device, od_dt_dev);
2781 static struct osd_device *osd_dev(const struct lu_device *d)
2783 LASSERT(lu_device_is_osd(d));
2784 return osd_dt_dev(container_of0(d, struct dt_device, dd_lu_dev));
2787 static struct osd_object *osd_dt_obj(const struct dt_object *d)
2789 return osd_obj(&d->do_lu);
2792 static struct osd_device *osd_obj2dev(const struct osd_object *o)
2794 return osd_dev(o->oo_dt.do_lu.lo_dev);
2797 static struct lu_device *osd2lu_dev(struct osd_device *osd)
2799 return &osd->od_dt_dev.dd_lu_dev;
2802 static struct super_block *osd_sb(const struct osd_device *dev)
2804 return dev->od_mount->lmi_mnt->mnt_sb;
2807 static journal_t *osd_journal(const struct osd_device *dev)
2809 return LDISKFS_SB(osd_sb(dev))->s_journal;
2812 static int osd_has_index(const struct osd_object *obj)
2814 return obj->oo_dt.do_index_ops != NULL;
2817 static int osd_object_invariant(const struct lu_object *l)
2819 return osd_invariant(osd_obj(l));
2822 static const struct lu_object_operations osd_lu_obj_ops = {
2823 .loo_object_init = osd_object_init,
2824 .loo_object_delete = osd_object_delete,
2825 .loo_object_release = osd_object_release,
2826 .loo_object_free = osd_object_free,
2827 .loo_object_print = osd_object_print,
2828 .loo_object_invariant = osd_object_invariant
2831 static const struct lu_device_operations osd_lu_ops = {
2832 .ldo_object_alloc = osd_object_alloc,
2833 .ldo_process_config = osd_process_config,
2834 .ldo_recovery_complete = osd_recovery_complete
2837 static const struct lu_device_type_operations osd_device_type_ops = {
2838 .ldto_init = osd_type_init,
2839 .ldto_fini = osd_type_fini,
2841 .ldto_start = osd_type_start,
2842 .ldto_stop = osd_type_stop,
2844 .ldto_device_alloc = osd_device_alloc,
2845 .ldto_device_free = osd_device_free,
2847 .ldto_device_init = osd_device_init,
2848 .ldto_device_fini = osd_device_fini
2851 static struct lu_device_type osd_device_type = {
2852 .ldt_tags = LU_DEVICE_DT,
2853 .ldt_name = LUSTRE_OSD_NAME,
2854 .ldt_ops = &osd_device_type_ops,
2855 .ldt_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
2859 * lprocfs legacy support.
2861 static struct obd_ops osd_obd_device_ops = {
2862 .o_owner = THIS_MODULE
2865 static int __init osd_mod_init(void)
2867 struct lprocfs_static_vars lvars;
2869 lprocfs_osd_init_vars(&lvars);
2870 return class_register_type(&osd_obd_device_ops, NULL, lvars.module_vars,
2871 LUSTRE_OSD_NAME, &osd_device_type);
2874 static void __exit osd_mod_exit(void)
2876 class_unregister_type(LUSTRE_OSD_NAME);
2879 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2880 MODULE_DESCRIPTION("Lustre Object Storage Device ("LUSTRE_OSD_NAME")");
2881 MODULE_LICENSE("GPL");
2883 cfs_module(osd, "0.0.2", osd_mod_init, osd_mod_exit);