4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_ECHO
39 #include <libcfs/libcfs.h>
41 #include <liblustre.h>
45 #include <obd_support.h>
46 #include <obd_class.h>
47 #include <lustre_debug.h>
48 #include <lprocfs_status.h>
49 #include <cl_object.h>
50 #include <md_object.h>
51 #include <lustre_fid.h>
52 #include <lustre_acl.h>
53 #include <lustre_ioctl.h>
54 #include <lustre_net.h>
56 #include "echo_internal.h"
58 /** \defgroup echo_client Echo Client
63 struct cl_device ed_cl;
64 struct echo_client_obd *ed_ec;
66 struct cl_site ed_site_myself;
67 struct cl_site *ed_site;
68 struct lu_device *ed_next;
71 struct lu_client_seq *ed_cl_seq;
75 struct cl_object eo_cl;
76 struct cl_object_header eo_hdr;
78 struct echo_device *eo_dev;
79 struct list_head eo_obj_chain;
80 struct lov_stripe_md *eo_lsm;
85 struct echo_object_conf {
86 struct cl_object_conf eoc_cl;
87 struct lov_stripe_md **eoc_md;
91 struct cl_page_slice ep_cl;
96 struct cl_lock_slice el_cl;
97 struct list_head el_chain;
98 struct echo_object *el_object;
100 atomic_t el_refcount;
103 static int echo_client_setup(const struct lu_env *env,
104 struct obd_device *obddev,
105 struct lustre_cfg *lcfg);
106 static int echo_client_cleanup(struct obd_device *obddev);
109 /** \defgroup echo_helpers Helper functions
112 static inline struct echo_device *cl2echo_dev(const struct cl_device *dev)
114 return container_of0(dev, struct echo_device, ed_cl);
117 static inline struct cl_device *echo_dev2cl(struct echo_device *d)
122 static inline struct echo_device *obd2echo_dev(const struct obd_device *obd)
124 return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev));
127 static inline struct cl_object *echo_obj2cl(struct echo_object *eco)
132 static inline struct echo_object *cl2echo_obj(const struct cl_object *o)
134 return container_of(o, struct echo_object, eo_cl);
137 static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s)
139 return container_of(s, struct echo_page, ep_cl);
142 static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s)
144 return container_of(s, struct echo_lock, el_cl);
147 static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl)
149 return ecl->el_cl.cls_lock;
152 static struct lu_context_key echo_thread_key;
153 static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
155 struct echo_thread_info *info;
156 info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
157 LASSERT(info != NULL);
162 struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
164 return container_of(c, struct echo_object_conf, eoc_cl);
167 /** @} echo_helpers */
169 static struct echo_object *cl_echo_object_find(struct echo_device *d,
170 struct lov_stripe_md **lsm);
171 static int cl_echo_object_put(struct echo_object *eco);
172 static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
173 struct page **pages, int npages, int async);
175 struct echo_thread_info {
176 struct echo_object_conf eti_conf;
177 struct lustre_md eti_md;
179 struct cl_2queue eti_queue;
181 struct cl_lock_descr eti_descr;
182 struct lu_fid eti_fid;
183 struct lu_fid eti_fid2;
184 #ifdef HAVE_SERVER_SUPPORT
185 struct md_op_spec eti_spec;
186 struct lov_mds_md_v3 eti_lmm;
187 struct lov_user_md_v3 eti_lum;
188 struct md_attr eti_ma;
189 struct lu_name eti_lname;
190 /* per-thread values, can be re-used */
194 struct lu_buf eti_buf;
195 char eti_xattr_buf[LUSTRE_POSIX_ACL_MAX_SIZE];
199 /* No session used right now */
200 struct echo_session_info {
204 static struct kmem_cache *echo_lock_kmem;
205 static struct kmem_cache *echo_object_kmem;
206 static struct kmem_cache *echo_thread_kmem;
207 static struct kmem_cache *echo_session_kmem;
208 /* static struct kmem_cache *echo_req_kmem; */
210 static struct lu_kmem_descr echo_caches[] = {
212 .ckd_cache = &echo_lock_kmem,
213 .ckd_name = "echo_lock_kmem",
214 .ckd_size = sizeof (struct echo_lock)
217 .ckd_cache = &echo_object_kmem,
218 .ckd_name = "echo_object_kmem",
219 .ckd_size = sizeof (struct echo_object)
222 .ckd_cache = &echo_thread_kmem,
223 .ckd_name = "echo_thread_kmem",
224 .ckd_size = sizeof (struct echo_thread_info)
227 .ckd_cache = &echo_session_kmem,
228 .ckd_name = "echo_session_kmem",
229 .ckd_size = sizeof (struct echo_session_info)
236 /** \defgroup echo_page Page operations
238 * Echo page operations.
242 static int echo_page_own(const struct lu_env *env,
243 const struct cl_page_slice *slice,
244 struct cl_io *io, int nonblock)
246 struct echo_page *ep = cl2echo_page(slice);
249 mutex_lock(&ep->ep_lock);
250 else if (!mutex_trylock(&ep->ep_lock))
255 static void echo_page_disown(const struct lu_env *env,
256 const struct cl_page_slice *slice,
259 struct echo_page *ep = cl2echo_page(slice);
261 LASSERT(mutex_is_locked(&ep->ep_lock));
262 mutex_unlock(&ep->ep_lock);
265 static void echo_page_discard(const struct lu_env *env,
266 const struct cl_page_slice *slice,
267 struct cl_io *unused)
269 cl_page_delete(env, slice->cpl_page);
272 static int echo_page_is_vmlocked(const struct lu_env *env,
273 const struct cl_page_slice *slice)
275 if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
280 static void echo_page_completion(const struct lu_env *env,
281 const struct cl_page_slice *slice,
284 LASSERT(slice->cpl_page->cp_sync_io != NULL);
287 static void echo_page_fini(const struct lu_env *env,
288 struct cl_page_slice *slice)
290 struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
293 atomic_dec(&eco->eo_npages);
294 page_cache_release(slice->cpl_page->cp_vmpage);
298 static int echo_page_prep(const struct lu_env *env,
299 const struct cl_page_slice *slice,
300 struct cl_io *unused)
305 static int echo_page_print(const struct lu_env *env,
306 const struct cl_page_slice *slice,
307 void *cookie, lu_printer_t printer)
309 struct echo_page *ep = cl2echo_page(slice);
311 (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
312 ep, mutex_is_locked(&ep->ep_lock),
313 slice->cpl_page->cp_vmpage);
317 static const struct cl_page_operations echo_page_ops = {
318 .cpo_own = echo_page_own,
319 .cpo_disown = echo_page_disown,
320 .cpo_discard = echo_page_discard,
321 .cpo_fini = echo_page_fini,
322 .cpo_print = echo_page_print,
323 .cpo_is_vmlocked = echo_page_is_vmlocked,
326 .cpo_prep = echo_page_prep,
327 .cpo_completion = echo_page_completion,
330 .cpo_prep = echo_page_prep,
331 .cpo_completion = echo_page_completion,
337 /** \defgroup echo_lock Locking
339 * echo lock operations
343 static void echo_lock_fini(const struct lu_env *env,
344 struct cl_lock_slice *slice)
346 struct echo_lock *ecl = cl2echo_lock(slice);
348 LASSERT(list_empty(&ecl->el_chain));
349 OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
352 static void echo_lock_delete(const struct lu_env *env,
353 const struct cl_lock_slice *slice)
355 struct echo_lock *ecl = cl2echo_lock(slice);
357 LASSERT(list_empty(&ecl->el_chain));
360 static int echo_lock_fits_into(const struct lu_env *env,
361 const struct cl_lock_slice *slice,
362 const struct cl_lock_descr *need,
363 const struct cl_io *unused)
368 static struct cl_lock_operations echo_lock_ops = {
369 .clo_fini = echo_lock_fini,
370 .clo_delete = echo_lock_delete,
371 .clo_fits_into = echo_lock_fits_into
376 /** \defgroup echo_cl_ops cl_object operations
378 * operations for cl_object
382 static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
383 struct cl_page *page, pgoff_t index)
385 struct echo_page *ep = cl_object_page_slice(obj, page);
386 struct echo_object *eco = cl2echo_obj(obj);
389 page_cache_get(page->cp_vmpage);
390 mutex_init(&ep->ep_lock);
391 cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
392 atomic_inc(&eco->eo_npages);
396 static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
402 static int echo_lock_init(const struct lu_env *env,
403 struct cl_object *obj, struct cl_lock *lock,
404 const struct cl_io *unused)
406 struct echo_lock *el;
409 OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, GFP_NOFS);
411 cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
412 el->el_object = cl2echo_obj(obj);
413 INIT_LIST_HEAD(&el->el_chain);
414 atomic_set(&el->el_refcount, 0);
416 RETURN(el == NULL ? -ENOMEM : 0);
419 static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
420 const struct cl_object_conf *conf)
425 static const struct cl_object_operations echo_cl_obj_ops = {
426 .coo_page_init = echo_page_init,
427 .coo_lock_init = echo_lock_init,
428 .coo_io_init = echo_io_init,
429 .coo_conf_set = echo_conf_set
431 /** @} echo_cl_ops */
433 /** \defgroup echo_lu_ops lu_object operations
435 * operations for echo lu object.
439 static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
440 const struct lu_object_conf *conf)
442 struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
443 struct echo_client_obd *ec = ed->ed_ec;
444 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
448 struct lu_object *below;
449 struct lu_device *under;
452 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
456 lu_object_add(obj, below);
459 if (!ed->ed_next_ismd) {
460 const struct cl_object_conf *cconf = lu2cl_conf(conf);
461 struct echo_object_conf *econf = cl2echo_conf(cconf);
463 LASSERT(econf->eoc_md);
464 eco->eo_lsm = *econf->eoc_md;
465 /* clear the lsm pointer so that it won't get freed. */
466 *econf->eoc_md = NULL;
472 atomic_set(&eco->eo_npages, 0);
473 cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
475 spin_lock(&ec->ec_lock);
476 list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
477 spin_unlock(&ec->ec_lock);
482 /* taken from osc_unpackmd() */
483 static int echo_alloc_memmd(struct echo_device *ed,
484 struct lov_stripe_md **lsmp)
490 /* If export is lov/osc then use their obd method */
491 if (ed->ed_next != NULL)
492 return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
493 /* OFD has no unpackmd method, do everything here */
494 lsm_size = lov_stripe_md_size(1);
496 LASSERT(*lsmp == NULL);
497 OBD_ALLOC(*lsmp, lsm_size);
501 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
502 if ((*lsmp)->lsm_oinfo[0] == NULL) {
503 OBD_FREE(*lsmp, lsm_size);
507 loi_init((*lsmp)->lsm_oinfo[0]);
508 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
509 ostid_set_seq_echo(&(*lsmp)->lsm_oi);
514 static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
520 /* If export is lov/osc then use their obd method */
521 if (ed->ed_next != NULL)
522 return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
523 /* OFD has no unpackmd method, do everything here */
524 lsm_size = lov_stripe_md_size(1);
526 LASSERT(*lsmp != NULL);
527 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
528 OBD_FREE(*lsmp, lsm_size);
533 static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
535 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
536 struct echo_client_obd *ec = eco->eo_dev->ed_ec;
539 LASSERT(atomic_read(&eco->eo_npages) == 0);
541 spin_lock(&ec->ec_lock);
542 list_del_init(&eco->eo_obj_chain);
543 spin_unlock(&ec->ec_lock);
546 lu_object_header_fini(obj->lo_header);
549 echo_free_memmd(eco->eo_dev, &eco->eo_lsm);
550 OBD_SLAB_FREE_PTR(eco, echo_object_kmem);
554 static int echo_object_print(const struct lu_env *env, void *cookie,
555 lu_printer_t p, const struct lu_object *o)
557 struct echo_object *obj = cl2echo_obj(lu2cl(o));
559 return (*p)(env, cookie, "echoclient-object@%p", obj);
562 static const struct lu_object_operations echo_lu_obj_ops = {
563 .loo_object_init = echo_object_init,
564 .loo_object_delete = NULL,
565 .loo_object_release = NULL,
566 .loo_object_free = echo_object_free,
567 .loo_object_print = echo_object_print,
568 .loo_object_invariant = NULL
570 /** @} echo_lu_ops */
572 /** \defgroup echo_lu_dev_ops lu_device operations
574 * Operations for echo lu device.
578 static struct lu_object *echo_object_alloc(const struct lu_env *env,
579 const struct lu_object_header *hdr,
580 struct lu_device *dev)
582 struct echo_object *eco;
583 struct lu_object *obj = NULL;
586 /* we're the top dev. */
587 LASSERT(hdr == NULL);
588 OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, GFP_NOFS);
590 struct cl_object_header *hdr = &eco->eo_hdr;
592 obj = &echo_obj2cl(eco)->co_lu;
593 cl_object_header_init(hdr);
594 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
596 lu_object_init(obj, &hdr->coh_lu, dev);
597 lu_object_add_top(&hdr->coh_lu, obj);
599 eco->eo_cl.co_ops = &echo_cl_obj_ops;
600 obj->lo_ops = &echo_lu_obj_ops;
605 static struct lu_device_operations echo_device_lu_ops = {
606 .ldo_object_alloc = echo_object_alloc,
609 /** @} echo_lu_dev_ops */
611 static struct cl_device_operations echo_device_cl_ops = {
614 /** \defgroup echo_init Setup and teardown
616 * Init and fini functions for echo client.
620 static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
622 struct cl_site *site = &ed->ed_site_myself;
625 /* initialize site */
626 rc = cl_site_init(site, &ed->ed_cl);
628 CERROR("Cannot initilize site for echo client(%d)\n", rc);
632 rc = lu_site_init_finish(&site->cs_lu);
640 static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
643 if (!ed->ed_next_ismd)
644 cl_site_fini(ed->ed_site);
649 static void *echo_thread_key_init(const struct lu_context *ctx,
650 struct lu_context_key *key)
652 struct echo_thread_info *info;
654 OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, GFP_NOFS);
656 info = ERR_PTR(-ENOMEM);
660 static void echo_thread_key_fini(const struct lu_context *ctx,
661 struct lu_context_key *key, void *data)
663 struct echo_thread_info *info = data;
664 OBD_SLAB_FREE_PTR(info, echo_thread_kmem);
667 static void echo_thread_key_exit(const struct lu_context *ctx,
668 struct lu_context_key *key, void *data)
672 static struct lu_context_key echo_thread_key = {
673 .lct_tags = LCT_CL_THREAD,
674 .lct_init = echo_thread_key_init,
675 .lct_fini = echo_thread_key_fini,
676 .lct_exit = echo_thread_key_exit
679 static void *echo_session_key_init(const struct lu_context *ctx,
680 struct lu_context_key *key)
682 struct echo_session_info *session;
684 OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, GFP_NOFS);
686 session = ERR_PTR(-ENOMEM);
690 static void echo_session_key_fini(const struct lu_context *ctx,
691 struct lu_context_key *key, void *data)
693 struct echo_session_info *session = data;
694 OBD_SLAB_FREE_PTR(session, echo_session_kmem);
697 static void echo_session_key_exit(const struct lu_context *ctx,
698 struct lu_context_key *key, void *data)
702 static struct lu_context_key echo_session_key = {
703 .lct_tags = LCT_SESSION,
704 .lct_init = echo_session_key_init,
705 .lct_fini = echo_session_key_fini,
706 .lct_exit = echo_session_key_exit
709 LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
711 #ifdef HAVE_SERVER_SUPPORT
712 # define ECHO_SEQ_WIDTH 0xffffffff
713 static int echo_fid_init(struct echo_device *ed, char *obd_name,
714 struct seq_server_site *ss)
720 OBD_ALLOC_PTR(ed->ed_cl_seq);
721 if (ed->ed_cl_seq == NULL)
724 OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
726 GOTO(out_free_seq, rc = -ENOMEM);
728 snprintf(prefix, MAX_OBD_NAME + 5, "srv-%s", obd_name);
730 /* Init client side sequence-manager */
731 rc = seq_client_init(ed->ed_cl_seq, NULL,
733 prefix, ss->ss_server_seq);
734 ed->ed_cl_seq->lcs_width = ECHO_SEQ_WIDTH;
735 OBD_FREE(prefix, MAX_OBD_NAME + 5);
737 GOTO(out_free_seq, rc);
742 OBD_FREE_PTR(ed->ed_cl_seq);
743 ed->ed_cl_seq = NULL;
747 static int echo_fid_fini(struct obd_device *obddev)
749 struct echo_device *ed = obd2echo_dev(obddev);
752 if (ed->ed_cl_seq != NULL) {
753 seq_client_fini(ed->ed_cl_seq);
754 OBD_FREE_PTR(ed->ed_cl_seq);
755 ed->ed_cl_seq = NULL;
760 #endif /* HAVE_SERVER_SUPPORT */
762 static struct lu_device *echo_device_alloc(const struct lu_env *env,
763 struct lu_device_type *t,
764 struct lustre_cfg *cfg)
766 struct lu_device *next;
767 struct echo_device *ed;
768 struct cl_device *cd;
769 struct obd_device *obd = NULL; /* to keep compiler happy */
770 struct obd_device *tgt;
771 const char *tgt_type_name;
778 GOTO(out, rc = -ENOMEM);
782 rc = cl_device_init(cd, t);
786 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
787 cd->cd_ops = &echo_device_cl_ops;
790 obd = class_name2obd(lustre_cfg_string(cfg, 0));
791 LASSERT(obd != NULL);
792 LASSERT(env != NULL);
794 tgt = class_name2obd(lustre_cfg_string(cfg, 1));
796 CERROR("Can not find tgt device %s\n",
797 lustre_cfg_string(cfg, 1));
798 GOTO(out, rc = -ENODEV);
801 next = tgt->obd_lu_dev;
802 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
803 ed->ed_next_ismd = 1;
805 ed->ed_next_ismd = 0;
806 rc = echo_site_init(env, ed);
812 rc = echo_client_setup(env, obd, cfg);
816 ed->ed_ec = &obd->u.echo_client;
819 if (ed->ed_next_ismd) {
820 #ifdef HAVE_SERVER_SUPPORT
821 /* Suppose to connect to some Metadata layer */
823 struct lu_device *ld;
827 CERROR("%s is not lu device type!\n",
828 lustre_cfg_string(cfg, 1));
829 GOTO(out, rc = -EINVAL);
832 tgt_type_name = lustre_cfg_string(cfg, 2);
833 if (!tgt_type_name) {
834 CERROR("%s no type name for echo %s setup\n",
835 lustre_cfg_string(cfg, 1),
836 tgt->obd_type->typ_name);
837 GOTO(out, rc = -EINVAL);
842 spin_lock(&ls->ls_ld_lock);
843 list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
844 if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
849 spin_unlock(&ls->ls_ld_lock);
852 CERROR("%s is not lu device type!\n",
853 lustre_cfg_string(cfg, 1));
854 GOTO(out, rc = -EINVAL);
858 /* For MD echo client, it will use the site in MDS stack */
859 ed->ed_site_myself.cs_lu = *ls;
860 ed->ed_site = &ed->ed_site_myself;
861 ed->ed_cl.cd_lu_dev.ld_site = &ed->ed_site_myself.cs_lu;
862 rc = echo_fid_init(ed, obd->obd_name, lu_site2seq(ls));
864 CERROR("echo fid init error %d\n", rc);
867 #else /* !HAVE_SERVER_SUPPORT */
868 CERROR("Local operations are NOT supported on client side. "
869 "Only remote operations are supported. Metadata client "
870 "must be run on server side.\n");
871 GOTO(out, rc = -EOPNOTSUPP);
874 /* if echo client is to be stacked upon ost device, the next is
875 * NULL since ost is not a clio device so far */
876 if (next != NULL && !lu_device_is_cl(next))
879 tgt_type_name = tgt->obd_type->typ_name;
881 LASSERT(next != NULL);
882 if (next->ld_site != NULL)
883 GOTO(out, rc = -EBUSY);
885 next->ld_site = &ed->ed_site->cs_lu;
886 rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
887 next->ld_type->ldt_name,
892 /* Tricky case, I have to determine the obd type since
893 * CLIO uses the different parameters to initialize
894 * objects for lov & osc. */
895 if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0)
896 ed->ed_next_islov = 1;
898 LASSERT(strcmp(tgt_type_name,
899 LUSTRE_OSC_NAME) == 0);
901 LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
905 RETURN(&cd->cd_lu_dev);
910 rc2 = echo_client_cleanup(obd);
912 CERROR("Cleanup obd device %s error(%d)\n",
917 echo_site_fini(env, ed);
919 cl_device_fini(&ed->ed_cl);
929 static int echo_device_init(const struct lu_env *env, struct lu_device *d,
930 const char *name, struct lu_device *next)
936 static struct lu_device *echo_device_fini(const struct lu_env *env,
939 struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
940 struct lu_device *next = ed->ed_next;
942 while (next && !ed->ed_next_ismd)
943 next = next->ld_type->ldt_ops->ldto_device_fini(env, next);
947 static void echo_lock_release(const struct lu_env *env,
948 struct echo_lock *ecl,
951 struct cl_lock *clk = echo_lock2cl(ecl);
955 cl_lock_release(env, clk, "ec enqueue", ecl->el_object);
957 cl_lock_mutex_get(env, clk);
958 cl_lock_cancel(env, clk);
959 cl_lock_delete(env, clk);
960 cl_lock_mutex_put(env, clk);
962 cl_lock_put(env, clk);
965 static struct lu_device *echo_device_free(const struct lu_env *env,
968 struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
969 struct echo_client_obd *ec = ed->ed_ec;
970 struct echo_object *eco;
971 struct lu_device *next = ed->ed_next;
973 CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
976 lu_site_purge(env, &ed->ed_site->cs_lu, -1);
978 /* check if there are objects still alive.
979 * It shouldn't have any object because lu_site_purge would cleanup
980 * all of cached objects. Anyway, probably the echo device is being
981 * parallelly accessed.
983 spin_lock(&ec->ec_lock);
984 list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
986 spin_unlock(&ec->ec_lock);
989 lu_site_purge(env, &ed->ed_site->cs_lu, -1);
992 "Waiting for the reference of echo object to be dropped\n");
994 /* Wait for the last reference to be dropped. */
995 spin_lock(&ec->ec_lock);
996 while (!list_empty(&ec->ec_objects)) {
997 spin_unlock(&ec->ec_lock);
998 CERROR("echo_client still has objects at cleanup time, "
999 "wait for 1 second\n");
1000 schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
1001 cfs_time_seconds(1));
1002 lu_site_purge(env, &ed->ed_site->cs_lu, -1);
1003 spin_lock(&ec->ec_lock);
1005 spin_unlock(&ec->ec_lock);
1007 LASSERT(list_empty(&ec->ec_locks));
1009 CDEBUG(D_INFO, "No object exists, exiting...\n");
1011 echo_client_cleanup(d->ld_obd);
1012 #ifdef HAVE_SERVER_SUPPORT
1013 echo_fid_fini(d->ld_obd);
1015 while (next && !ed->ed_next_ismd)
1016 next = next->ld_type->ldt_ops->ldto_device_free(env, next);
1018 LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
1019 echo_site_fini(env, ed);
1020 cl_device_fini(&ed->ed_cl);
1026 static const struct lu_device_type_operations echo_device_type_ops = {
1027 .ldto_init = echo_type_init,
1028 .ldto_fini = echo_type_fini,
1030 .ldto_start = echo_type_start,
1031 .ldto_stop = echo_type_stop,
1033 .ldto_device_alloc = echo_device_alloc,
1034 .ldto_device_free = echo_device_free,
1035 .ldto_device_init = echo_device_init,
1036 .ldto_device_fini = echo_device_fini
1039 static struct lu_device_type echo_device_type = {
1040 .ldt_tags = LU_DEVICE_CL,
1041 .ldt_name = LUSTRE_ECHO_CLIENT_NAME,
1042 .ldt_ops = &echo_device_type_ops,
1043 .ldt_ctx_tags = LCT_CL_THREAD | LCT_MD_THREAD | LCT_DT_THREAD,
1047 /** \defgroup echo_exports Exported operations
1049 * exporting functions to echo client
1054 /* Interfaces to echo client obd device */
1055 static struct echo_object *cl_echo_object_find(struct echo_device *d,
1056 struct lov_stripe_md **lsmp)
1059 struct echo_thread_info *info;
1060 struct echo_object_conf *conf;
1061 struct lov_stripe_md *lsm;
1062 struct echo_object *eco;
1063 struct cl_object *obj;
1072 LASSERTF(ostid_id(&lsm->lsm_oi) != 0, DOSTID"\n", POSTID(&lsm->lsm_oi));
1073 LASSERTF(ostid_seq(&lsm->lsm_oi) == FID_SEQ_ECHO, DOSTID"\n",
1074 POSTID(&lsm->lsm_oi));
1076 /* Never return an object if the obd is to be freed. */
1077 if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
1078 RETURN(ERR_PTR(-ENODEV));
1080 env = cl_env_get(&refcheck);
1082 RETURN((void *)env);
1084 info = echo_env_info(env);
1085 conf = &info->eti_conf;
1087 if (!d->ed_next_islov) {
1088 struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
1089 LASSERT(oinfo != NULL);
1090 oinfo->loi_oi = lsm->lsm_oi;
1091 conf->eoc_cl.u.coc_oinfo = oinfo;
1093 struct lustre_md *md;
1095 memset(md, 0, sizeof *md);
1097 conf->eoc_cl.u.coc_md = md;
1100 conf->eoc_md = lsmp;
1102 fid = &info->eti_fid;
1103 rc = ostid_to_fid(fid, &lsm->lsm_oi, 0);
1105 GOTO(out, eco = ERR_PTR(rc));
1107 /* In the function below, .hs_keycmp resolves to
1108 * lu_obj_hop_keycmp() */
1109 /* coverity[overrun-buffer-val] */
1110 obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
1112 GOTO(out, eco = (void*)obj);
1114 eco = cl2echo_obj(obj);
1115 if (eco->eo_deleted) {
1116 cl_object_put(env, obj);
1117 eco = ERR_PTR(-EAGAIN);
1121 cl_env_put(env, &refcheck);
1125 static int cl_echo_object_put(struct echo_object *eco)
1128 struct cl_object *obj = echo_obj2cl(eco);
1132 env = cl_env_get(&refcheck);
1134 RETURN(PTR_ERR(env));
1136 /* an external function to kill an object? */
1137 if (eco->eo_deleted) {
1138 struct lu_object_header *loh = obj->co_lu.lo_header;
1139 LASSERT(&eco->eo_hdr == luh2coh(loh));
1140 set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
1143 cl_object_put(env, obj);
1144 cl_env_put(env, &refcheck);
1148 static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
1149 obd_off start, obd_off end, int mode,
1150 __u64 *cookie , __u32 enqflags)
1153 struct cl_lock *lck;
1154 struct cl_object *obj;
1155 struct cl_lock_descr *descr;
1156 struct echo_thread_info *info;
1160 info = echo_env_info(env);
1162 descr = &info->eti_descr;
1163 obj = echo_obj2cl(eco);
1165 descr->cld_obj = obj;
1166 descr->cld_start = cl_index(obj, start);
1167 descr->cld_end = cl_index(obj, end);
1168 descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
1169 descr->cld_enq_flags = enqflags;
1172 lck = cl_lock_request(env, io, descr, "ec enqueue", eco);
1174 struct echo_client_obd *ec = eco->eo_dev->ed_ec;
1175 struct echo_lock *el;
1177 rc = cl_wait(env, lck);
1179 el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
1180 spin_lock(&ec->ec_lock);
1181 if (list_empty(&el->el_chain)) {
1182 list_add(&el->el_chain, &ec->ec_locks);
1183 el->el_cookie = ++ec->ec_unique;
1185 atomic_inc(&el->el_refcount);
1186 *cookie = el->el_cookie;
1187 spin_unlock(&ec->ec_lock);
1189 cl_lock_release(env, lck, "ec enqueue", current);
1195 static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
1198 struct echo_client_obd *ec = ed->ed_ec;
1199 struct echo_lock *ecl = NULL;
1200 struct list_head *el;
1201 int found = 0, still_used = 0;
1204 LASSERT(ec != NULL);
1205 spin_lock(&ec->ec_lock);
1206 list_for_each(el, &ec->ec_locks) {
1207 ecl = list_entry(el, struct echo_lock, el_chain);
1208 CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
1209 found = (ecl->el_cookie == cookie);
1211 if (atomic_dec_and_test(&ecl->el_refcount))
1212 list_del_init(&ecl->el_chain);
1218 spin_unlock(&ec->ec_lock);
1223 echo_lock_release(env, ecl, still_used);
1227 static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
1228 struct cl_page *page)
1230 struct echo_thread_info *info;
1231 struct cl_2queue *queue;
1233 info = echo_env_info(env);
1234 LASSERT(io == &info->eti_io);
1236 queue = &info->eti_queue;
1237 cl_page_list_add(&queue->c2_qout, page);
1240 static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
1241 struct page **pages, int npages, int async)
1244 struct echo_thread_info *info;
1245 struct cl_object *obj = echo_obj2cl(eco);
1246 struct echo_device *ed = eco->eo_dev;
1247 struct cl_2queue *queue;
1249 struct cl_page *clp;
1250 struct lustre_handle lh = { 0 };
1251 int page_size = cl_page_size(obj);
1257 LASSERT((offset & ~CFS_PAGE_MASK) == 0);
1258 LASSERT(ed->ed_next != NULL);
1259 env = cl_env_get(&refcheck);
1261 RETURN(PTR_ERR(env));
1263 info = echo_env_info(env);
1265 queue = &info->eti_queue;
1267 cl_2queue_init(queue);
1269 io->ci_ignore_layout = 1;
1270 rc = cl_io_init(env, io, CIT_MISC, obj);
1276 rc = cl_echo_enqueue0(env, eco, offset,
1277 offset + npages * PAGE_CACHE_SIZE - 1,
1278 rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
1281 GOTO(error_lock, rc);
1283 for (i = 0; i < npages; i++) {
1285 clp = cl_page_find(env, obj, cl_index(obj, offset),
1286 pages[i], CPT_TRANSIENT);
1291 LASSERT(clp->cp_type == CPT_TRANSIENT);
1293 rc = cl_page_own(env, io, clp);
1295 LASSERT(clp->cp_state == CPS_FREEING);
1296 cl_page_put(env, clp);
1300 cl_2queue_add(queue, clp);
1302 /* drop the reference count for cl_page_find, so that the page
1303 * will be freed in cl_2queue_fini. */
1304 cl_page_put(env, clp);
1305 cl_page_clip(env, clp, 0, page_size);
1307 offset += page_size;
1311 enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE;
1313 async = async && (typ == CRT_WRITE);
1315 rc = cl_io_commit_async(env, io, &queue->c2_qin,
1317 echo_commit_callback);
1319 rc = cl_io_submit_sync(env, io, typ, queue, 0);
1320 CDEBUG(D_INFO, "echo_client %s write returns %d\n",
1321 async ? "async" : "sync", rc);
1324 cl_echo_cancel0(env, ed, lh.cookie);
1327 cl_2queue_discard(env, io, queue);
1328 cl_2queue_disown(env, io, queue);
1329 cl_2queue_fini(env, queue);
1330 cl_io_fini(env, io);
1332 cl_env_put(env, &refcheck);
1335 /** @} echo_exports */
1338 static obd_id last_object_id;
1341 echo_copyin_lsm (struct echo_device *ed, struct lov_stripe_md *lsm,
1342 void *ulsm, int ulsm_nob)
1344 struct echo_client_obd *ec = ed->ed_ec;
1347 if (ulsm_nob < sizeof (*lsm))
1350 if (copy_from_user (lsm, ulsm, sizeof (*lsm)))
1353 if (lsm->lsm_stripe_count > ec->ec_nstripes ||
1354 lsm->lsm_magic != LOV_MAGIC ||
1355 (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 ||
1356 ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
1360 for (i = 0; i < lsm->lsm_stripe_count; i++) {
1361 if (copy_from_user(lsm->lsm_oinfo[i],
1362 ((struct lov_stripe_md *)ulsm)-> \
1364 sizeof(lsm->lsm_oinfo[0])))
1370 #ifdef HAVE_SERVER_SUPPORT
1371 static inline void echo_md_build_name(struct lu_name *lname, char *name,
1374 sprintf(name, LPU64, id);
1375 lname->ln_name = name;
1376 lname->ln_namelen = strlen(name);
1379 /* similar to mdt_attr_get_complex */
1380 static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
1383 struct echo_thread_info *info = echo_env_info(env);
1388 LASSERT(ma->ma_lmm_size > 0);
1390 rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LOV);
1394 /* big_lmm may need to be grown */
1395 if (info->eti_big_lmmsize < rc) {
1396 int size = size_roundup_power2(rc);
1398 if (info->eti_big_lmmsize > 0) {
1399 /* free old buffer */
1400 LASSERT(info->eti_big_lmm);
1401 OBD_FREE_LARGE(info->eti_big_lmm,
1402 info->eti_big_lmmsize);
1403 info->eti_big_lmm = NULL;
1404 info->eti_big_lmmsize = 0;
1407 OBD_ALLOC_LARGE(info->eti_big_lmm, size);
1408 if (info->eti_big_lmm == NULL)
1410 info->eti_big_lmmsize = size;
1412 LASSERT(info->eti_big_lmmsize >= rc);
1414 info->eti_buf.lb_buf = info->eti_big_lmm;
1415 info->eti_buf.lb_len = info->eti_big_lmmsize;
1416 rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LOV);
1420 ma->ma_valid |= MA_LOV;
1421 ma->ma_lmm = info->eti_big_lmm;
1422 ma->ma_lmm_size = rc;
1427 static int echo_attr_get_complex(const struct lu_env *env,
1428 struct md_object *next,
1431 struct echo_thread_info *info = echo_env_info(env);
1432 struct lu_buf *buf = &info->eti_buf;
1433 umode_t mode = lu_object_attr(&next->mo_lu);
1434 int need = ma->ma_need;
1441 if (need & MA_INODE) {
1442 ma->ma_need = MA_INODE;
1443 rc = mo_attr_get(env, next, ma);
1446 ma->ma_valid |= MA_INODE;
1449 if (need & MA_LOV) {
1450 if (S_ISREG(mode) || S_ISDIR(mode)) {
1451 LASSERT(ma->ma_lmm_size > 0);
1452 buf->lb_buf = ma->ma_lmm;
1453 buf->lb_len = ma->ma_lmm_size;
1454 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LOV);
1456 ma->ma_lmm_size = rc2;
1457 ma->ma_valid |= MA_LOV;
1458 } else if (rc2 == -ENODATA) {
1460 ma->ma_lmm_size = 0;
1461 } else if (rc2 == -ERANGE) {
1462 rc2 = echo_big_lmm_get(env, next, ma);
1464 GOTO(out, rc = rc2);
1466 GOTO(out, rc = rc2);
1471 #ifdef CONFIG_FS_POSIX_ACL
1472 if (need & MA_ACL_DEF && S_ISDIR(mode)) {
1473 buf->lb_buf = ma->ma_acl;
1474 buf->lb_len = ma->ma_acl_size;
1475 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
1477 ma->ma_acl_size = rc2;
1478 ma->ma_valid |= MA_ACL_DEF;
1479 } else if (rc2 == -ENODATA) {
1481 ma->ma_acl_size = 0;
1483 GOTO(out, rc = rc2);
1489 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
1490 rc, ma->ma_valid, ma->ma_lmm);
1495 echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
1496 struct md_object *parent, struct lu_fid *fid,
1497 struct lu_name *lname, struct md_op_spec *spec,
1500 struct lu_object *ec_child, *child;
1501 struct lu_device *ld = ed->ed_next;
1502 struct echo_thread_info *info = echo_env_info(env);
1503 struct lu_fid *fid2 = &info->eti_fid2;
1504 struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
1509 rc = mdo_lookup(env, parent, lname, fid2, spec);
1512 else if (rc != -ENOENT)
1515 ec_child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev,
1517 if (IS_ERR(ec_child)) {
1518 CERROR("Can not find the child "DFID": rc = %ld\n", PFID(fid),
1520 RETURN(PTR_ERR(ec_child));
1523 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1524 if (child == NULL) {
1525 CERROR("Can not locate the child "DFID"\n", PFID(fid));
1526 GOTO(out_put, rc = -EINVAL);
1529 CDEBUG(D_RPCTRACE, "Start creating object "DFID" %s %p\n",
1530 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
1533 * Do not perform lookup sanity check. We know that name does not exist.
1535 spec->sp_cr_lookup = 0;
1536 rc = mdo_create(env, parent, lname, lu2md(child), spec, ma);
1538 CERROR("Can not create child "DFID": rc = %d\n", PFID(fid), rc);
1541 CDEBUG(D_RPCTRACE, "End creating object "DFID" %s %p rc = %d\n",
1542 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent, rc);
1545 lu_object_put(env, ec_child);
1549 static int echo_set_lmm_size(const struct lu_env *env, struct lu_device *ld,
1552 struct echo_thread_info *info = echo_env_info(env);
1554 if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
1555 ma->ma_lmm = (void *)&info->eti_lmm;
1556 ma->ma_lmm_size = sizeof(info->eti_lmm);
1558 LASSERT(info->eti_big_lmmsize);
1559 ma->ma_lmm = info->eti_big_lmm;
1560 ma->ma_lmm_size = info->eti_big_lmmsize;
1566 static int echo_create_md_object(const struct lu_env *env,
1567 struct echo_device *ed,
1568 struct lu_object *ec_parent,
1570 char *name, int namelen,
1571 __u64 id, __u32 mode, int count,
1572 int stripe_count, int stripe_offset)
1574 struct lu_object *parent;
1575 struct echo_thread_info *info = echo_env_info(env);
1576 struct lu_name *lname = &info->eti_lname;
1577 struct md_op_spec *spec = &info->eti_spec;
1578 struct md_attr *ma = &info->eti_ma;
1579 struct lu_device *ld = ed->ed_next;
1585 if (ec_parent == NULL)
1587 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1591 memset(ma, 0, sizeof(*ma));
1592 memset(spec, 0, sizeof(*spec));
1593 if (stripe_count != 0) {
1594 spec->sp_cr_flags |= FMODE_WRITE;
1595 echo_set_lmm_size(env, ld, ma);
1596 if (stripe_count != -1) {
1597 struct lov_user_md_v3 *lum = &info->eti_lum;
1599 lum->lmm_magic = LOV_USER_MAGIC_V3;
1600 lum->lmm_stripe_count = stripe_count;
1601 lum->lmm_stripe_offset = stripe_offset;
1602 lum->lmm_pattern = 0;
1603 spec->u.sp_ea.eadata = lum;
1604 spec->u.sp_ea.eadatalen = sizeof(*lum);
1605 spec->sp_cr_flags |= MDS_OPEN_HAS_EA;
1609 ma->ma_attr.la_mode = mode;
1610 ma->ma_attr.la_valid = LA_CTIME | LA_MODE;
1611 ma->ma_attr.la_ctime = cfs_time_current_64();
1614 lname->ln_name = name;
1615 lname->ln_namelen = namelen;
1616 /* If name is specified, only create one object by name */
1617 rc = echo_md_create_internal(env, ed, lu2md(parent), fid, lname,
1622 /* Create multiple object sequenced by id */
1623 for (i = 0; i < count; i++) {
1624 char *tmp_name = info->eti_name;
1626 echo_md_build_name(lname, tmp_name, id);
1628 rc = echo_md_create_internal(env, ed, lu2md(parent), fid, lname,
1631 CERROR("Can not create child %s: rc = %d\n", tmp_name,
1642 static struct lu_object *echo_md_lookup(const struct lu_env *env,
1643 struct echo_device *ed,
1644 struct md_object *parent,
1645 struct lu_name *lname)
1647 struct echo_thread_info *info = echo_env_info(env);
1648 struct lu_fid *fid = &info->eti_fid;
1649 struct lu_object *child;
1653 CDEBUG(D_INFO, "lookup %s in parent "DFID" %p\n", lname->ln_name,
1655 rc = mdo_lookup(env, parent, lname, fid, NULL);
1657 CERROR("lookup %s: rc = %d\n", lname->ln_name, rc);
1658 RETURN(ERR_PTR(rc));
1661 /* In the function below, .hs_keycmp resolves to
1662 * lu_obj_hop_keycmp() */
1663 /* coverity[overrun-buffer-val] */
1664 child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
1669 static int echo_setattr_object(const struct lu_env *env,
1670 struct echo_device *ed,
1671 struct lu_object *ec_parent,
1672 __u64 id, int count)
1674 struct lu_object *parent;
1675 struct echo_thread_info *info = echo_env_info(env);
1676 struct lu_name *lname = &info->eti_lname;
1677 char *name = info->eti_name;
1678 struct lu_device *ld = ed->ed_next;
1679 struct lu_buf *buf = &info->eti_buf;
1685 if (ec_parent == NULL)
1687 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1691 for (i = 0; i < count; i++) {
1692 struct lu_object *ec_child, *child;
1694 echo_md_build_name(lname, name, id);
1696 ec_child = echo_md_lookup(env, ed, lu2md(parent), lname);
1697 if (IS_ERR(ec_child)) {
1698 CERROR("Can't find child %s: rc = %ld\n",
1699 lname->ln_name, PTR_ERR(ec_child));
1700 RETURN(PTR_ERR(ec_child));
1703 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1704 if (child == NULL) {
1705 CERROR("Can not locate the child %s\n", lname->ln_name);
1706 lu_object_put(env, ec_child);
1711 CDEBUG(D_RPCTRACE, "Start setattr object "DFID"\n",
1712 PFID(lu_object_fid(child)));
1714 buf->lb_buf = info->eti_xattr_buf;
1715 buf->lb_len = sizeof(info->eti_xattr_buf);
1717 sprintf(name, "%s.test1", XATTR_USER_PREFIX);
1718 rc = mo_xattr_set(env, lu2md(child), buf, name,
1721 CERROR("Can not setattr child "DFID": rc = %d\n",
1722 PFID(lu_object_fid(child)), rc);
1723 lu_object_put(env, ec_child);
1726 CDEBUG(D_RPCTRACE, "End setattr object "DFID"\n",
1727 PFID(lu_object_fid(child)));
1729 lu_object_put(env, ec_child);
1734 static int echo_getattr_object(const struct lu_env *env,
1735 struct echo_device *ed,
1736 struct lu_object *ec_parent,
1737 __u64 id, int count)
1739 struct lu_object *parent;
1740 struct echo_thread_info *info = echo_env_info(env);
1741 struct lu_name *lname = &info->eti_lname;
1742 char *name = info->eti_name;
1743 struct md_attr *ma = &info->eti_ma;
1744 struct lu_device *ld = ed->ed_next;
1750 if (ec_parent == NULL)
1752 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1756 memset(ma, 0, sizeof(*ma));
1757 ma->ma_need |= MA_INODE | MA_LOV | MA_PFID | MA_HSM | MA_ACL_DEF;
1758 ma->ma_acl = info->eti_xattr_buf;
1759 ma->ma_acl_size = sizeof(info->eti_xattr_buf);
1761 for (i = 0; i < count; i++) {
1762 struct lu_object *ec_child, *child;
1765 echo_md_build_name(lname, name, id);
1766 echo_set_lmm_size(env, ld, ma);
1768 ec_child = echo_md_lookup(env, ed, lu2md(parent), lname);
1769 if (IS_ERR(ec_child)) {
1770 CERROR("Can't find child %s: rc = %ld\n",
1771 lname->ln_name, PTR_ERR(ec_child));
1772 RETURN(PTR_ERR(ec_child));
1775 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1776 if (child == NULL) {
1777 CERROR("Can not locate the child %s\n", lname->ln_name);
1778 lu_object_put(env, ec_child);
1782 CDEBUG(D_RPCTRACE, "Start getattr object "DFID"\n",
1783 PFID(lu_object_fid(child)));
1784 rc = echo_attr_get_complex(env, lu2md(child), ma);
1786 CERROR("Can not getattr child "DFID": rc = %d\n",
1787 PFID(lu_object_fid(child)), rc);
1788 lu_object_put(env, ec_child);
1791 CDEBUG(D_RPCTRACE, "End getattr object "DFID"\n",
1792 PFID(lu_object_fid(child)));
1794 lu_object_put(env, ec_child);
1800 static int echo_lookup_object(const struct lu_env *env,
1801 struct echo_device *ed,
1802 struct lu_object *ec_parent,
1803 __u64 id, int count)
1805 struct lu_object *parent;
1806 struct echo_thread_info *info = echo_env_info(env);
1807 struct lu_name *lname = &info->eti_lname;
1808 char *name = info->eti_name;
1809 struct lu_fid *fid = &info->eti_fid;
1810 struct lu_device *ld = ed->ed_next;
1814 if (ec_parent == NULL)
1816 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1820 /*prepare the requests*/
1821 for (i = 0; i < count; i++) {
1822 echo_md_build_name(lname, name, id);
1824 CDEBUG(D_RPCTRACE, "Start lookup object "DFID" %s %p\n",
1825 PFID(lu_object_fid(parent)), lname->ln_name, parent);
1827 rc = mdo_lookup(env, lu2md(parent), lname, fid, NULL);
1829 CERROR("Can not lookup child %s: rc = %d\n", name, rc);
1832 CDEBUG(D_RPCTRACE, "End lookup object "DFID" %s %p\n",
1833 PFID(lu_object_fid(parent)), lname->ln_name, parent);
1840 static int echo_md_destroy_internal(const struct lu_env *env,
1841 struct echo_device *ed,
1842 struct md_object *parent,
1843 struct lu_name *lname,
1846 struct lu_device *ld = ed->ed_next;
1847 struct lu_object *ec_child;
1848 struct lu_object *child;
1853 ec_child = echo_md_lookup(env, ed, parent, lname);
1854 if (IS_ERR(ec_child)) {
1855 CERROR("Can't find child %s: rc = %ld\n", lname->ln_name,
1857 RETURN(PTR_ERR(ec_child));
1860 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1861 if (child == NULL) {
1862 CERROR("Can not locate the child %s\n", lname->ln_name);
1863 GOTO(out_put, rc = -EINVAL);
1866 if (lu_object_remote(child)) {
1867 CERROR("Can not destroy remote object %s: rc = %d\n",
1868 lname->ln_name, -EPERM);
1869 GOTO(out_put, rc = -EPERM);
1871 CDEBUG(D_RPCTRACE, "Start destroy object "DFID" %s %p\n",
1872 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
1874 rc = mdo_unlink(env, parent, lu2md(child), lname, ma, 0);
1876 CERROR("Can not unlink child %s: rc = %d\n",
1877 lname->ln_name, rc);
1880 CDEBUG(D_RPCTRACE, "End destroy object "DFID" %s %p\n",
1881 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
1883 lu_object_put(env, ec_child);
1887 static int echo_destroy_object(const struct lu_env *env,
1888 struct echo_device *ed,
1889 struct lu_object *ec_parent,
1890 char *name, int namelen,
1891 __u64 id, __u32 mode,
1894 struct echo_thread_info *info = echo_env_info(env);
1895 struct lu_name *lname = &info->eti_lname;
1896 struct md_attr *ma = &info->eti_ma;
1897 struct lu_device *ld = ed->ed_next;
1898 struct lu_object *parent;
1903 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1907 memset(ma, 0, sizeof(*ma));
1908 ma->ma_attr.la_mode = mode;
1909 ma->ma_attr.la_valid = LA_CTIME;
1910 ma->ma_attr.la_ctime = cfs_time_current_64();
1911 ma->ma_need = MA_INODE;
1915 lname->ln_name = name;
1916 lname->ln_namelen = namelen;
1917 rc = echo_md_destroy_internal(env, ed, lu2md(parent), lname,
1922 /*prepare the requests*/
1923 for (i = 0; i < count; i++) {
1924 char *tmp_name = info->eti_name;
1927 echo_md_build_name(lname, tmp_name, id);
1929 rc = echo_md_destroy_internal(env, ed, lu2md(parent), lname,
1932 CERROR("Can not unlink child %s: rc = %d\n", name, rc);
1941 static struct lu_object *echo_resolve_path(const struct lu_env *env,
1942 struct echo_device *ed, char *path,
1945 struct lu_device *ld = ed->ed_next;
1946 struct md_device *md = lu2md_dev(ld);
1947 struct echo_thread_info *info = echo_env_info(env);
1948 struct lu_fid *fid = &info->eti_fid;
1949 struct lu_name *lname = &info->eti_lname;
1950 struct lu_object *parent = NULL;
1951 struct lu_object *child = NULL;
1955 /*Only support MDD layer right now*/
1956 rc = md->md_ops->mdo_root_get(env, md, fid);
1958 CERROR("get root error: rc = %d\n", rc);
1959 RETURN(ERR_PTR(rc));
1962 /* In the function below, .hs_keycmp resolves to
1963 * lu_obj_hop_keycmp() */
1964 /* coverity[overrun-buffer-val] */
1965 parent = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
1966 if (IS_ERR(parent)) {
1967 CERROR("Can not find the parent "DFID": rc = %ld\n",
1968 PFID(fid), PTR_ERR(parent));
1973 struct lu_object *ld_parent;
1976 e = strsep(&path, "/");
1981 if (!path || path[0] == '\0')
1987 lname->ln_namelen = strlen(e);
1989 ld_parent = lu_object_locate(parent->lo_header, ld->ld_type);
1990 if (ld_parent == NULL) {
1991 lu_object_put(env, parent);
1996 child = echo_md_lookup(env, ed, lu2md(ld_parent), lname);
1997 lu_object_put(env, parent);
1998 if (IS_ERR(child)) {
1999 rc = (int)PTR_ERR(child);
2000 CERROR("lookup %s under parent "DFID": rc = %d\n",
2001 lname->ln_name, PFID(lu_object_fid(ld_parent)),
2008 RETURN(ERR_PTR(rc));
2013 static void echo_ucred_init(struct lu_env *env)
2015 struct lu_ucred *ucred = lu_ucred(env);
2017 ucred->uc_valid = UCRED_INVALID;
2019 ucred->uc_suppgids[0] = -1;
2020 ucred->uc_suppgids[1] = -1;
2022 ucred->uc_uid = ucred->uc_o_uid =
2023 from_kuid(&init_user_ns, current_uid());
2024 ucred->uc_gid = ucred->uc_o_gid =
2025 from_kgid(&init_user_ns, current_gid());
2026 ucred->uc_fsuid = ucred->uc_o_fsuid =
2027 from_kuid(&init_user_ns, current_fsuid());
2028 ucred->uc_fsgid = ucred->uc_o_fsgid =
2029 from_kgid(&init_user_ns, current_fsgid());
2030 ucred->uc_cap = cfs_curproc_cap_pack();
2032 /* remove fs privilege for non-root user. */
2033 if (ucred->uc_fsuid)
2034 ucred->uc_cap &= ~CFS_CAP_FS_MASK;
2035 ucred->uc_valid = UCRED_NEW;
2038 static void echo_ucred_fini(struct lu_env *env)
2040 struct lu_ucred *ucred = lu_ucred(env);
2041 ucred->uc_valid = UCRED_INIT;
2044 #define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
2045 #define ECHO_MD_SES_TAG (LCT_REMEMBER | LCT_SESSION | LCT_SERVER_SESSION)
2046 static int echo_md_handler(struct echo_device *ed, int command,
2047 char *path, int path_len, __u64 id, int count,
2048 struct obd_ioctl_data *data)
2050 struct echo_thread_info *info;
2051 struct lu_device *ld = ed->ed_next;
2054 struct lu_object *parent;
2056 int namelen = data->ioc_plen2;
2061 CERROR("MD echo client is not being initialized properly\n");
2065 if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
2066 CERROR("Only support MDD layer right now!\n");
2070 env = cl_env_get(&refcheck);
2072 RETURN(PTR_ERR(env));
2074 rc = lu_env_refill_by_tags(env, ECHO_MD_CTX_TAG, ECHO_MD_SES_TAG);
2078 /* init big_lmm buffer */
2079 info = echo_env_info(env);
2080 LASSERT(info->eti_big_lmm == NULL);
2081 OBD_ALLOC_LARGE(info->eti_big_lmm, MIN_MD_SIZE);
2082 if (info->eti_big_lmm == NULL)
2083 GOTO(out_env, rc = -ENOMEM);
2084 info->eti_big_lmmsize = MIN_MD_SIZE;
2086 parent = echo_resolve_path(env, ed, path, path_len);
2087 if (IS_ERR(parent)) {
2088 CERROR("Can not resolve the path %s: rc = %ld\n", path,
2090 GOTO(out_free, rc = PTR_ERR(parent));
2094 OBD_ALLOC(name, namelen + 1);
2096 GOTO(out_put, rc = -ENOMEM);
2097 if (copy_from_user(name, data->ioc_pbuf2, namelen))
2098 GOTO(out_name, rc = -EFAULT);
2101 echo_ucred_init(env);
2104 case ECHO_MD_CREATE:
2105 case ECHO_MD_MKDIR: {
2106 struct echo_thread_info *info = echo_env_info(env);
2107 __u32 mode = data->ioc_obdo2.o_mode;
2108 struct lu_fid *fid = &info->eti_fid;
2109 int stripe_count = (int)data->ioc_obdo2.o_misc;
2110 int stripe_index = (int)data->ioc_obdo2.o_stripe_idx;
2112 rc = ostid_to_fid(fid, &data->ioc_obdo1.o_oi, 0);
2116 /* In the function below, .hs_keycmp resolves to
2117 * lu_obj_hop_keycmp() */
2118 /* coverity[overrun-buffer-val] */
2119 rc = echo_create_md_object(env, ed, parent, fid, name, namelen,
2120 id, mode, count, stripe_count,
2124 case ECHO_MD_DESTROY:
2125 case ECHO_MD_RMDIR: {
2126 __u32 mode = data->ioc_obdo2.o_mode;
2128 rc = echo_destroy_object(env, ed, parent, name, namelen,
2132 case ECHO_MD_LOOKUP:
2133 rc = echo_lookup_object(env, ed, parent, id, count);
2135 case ECHO_MD_GETATTR:
2136 rc = echo_getattr_object(env, ed, parent, id, count);
2138 case ECHO_MD_SETATTR:
2139 rc = echo_setattr_object(env, ed, parent, id, count);
2142 CERROR("unknown command %d\n", command);
2146 echo_ucred_fini(env);
2150 OBD_FREE(name, namelen + 1);
2152 lu_object_put(env, parent);
2154 LASSERT(info->eti_big_lmm);
2155 OBD_FREE_LARGE(info->eti_big_lmm, info->eti_big_lmmsize);
2156 info->eti_big_lmm = NULL;
2157 info->eti_big_lmmsize = 0;
2159 cl_env_put(env, &refcheck);
2162 #endif /* HAVE_SERVER_SUPPORT */
2164 static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
2165 int on_target, struct obdo *oa, void *ulsm,
2166 int ulsm_nob, struct obd_trans_info *oti)
2168 struct echo_object *eco;
2169 struct echo_client_obd *ec = ed->ed_ec;
2170 struct lov_stripe_md *lsm = NULL;
2175 if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */
2176 (on_target || /* set_stripe */
2177 ec->ec_nstripes != 0)) { /* LOV */
2178 CERROR ("No valid oid\n");
2182 rc = echo_alloc_memmd(ed, &lsm);
2184 CERROR("Cannot allocate md: rc = %d\n", rc);
2191 rc = echo_copyin_lsm (ed, lsm, ulsm, ulsm_nob);
2195 if (lsm->lsm_stripe_count == 0)
2196 lsm->lsm_stripe_count = ec->ec_nstripes;
2198 if (lsm->lsm_stripe_size == 0)
2199 lsm->lsm_stripe_size = PAGE_CACHE_SIZE;
2203 /* setup stripes: indices + default ids if required */
2204 for (i = 0; i < lsm->lsm_stripe_count; i++) {
2205 if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0)
2206 lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi;
2208 lsm->lsm_oinfo[i]->loi_ost_idx =
2209 (idx + i) % ec->ec_nstripes;
2213 /* setup object ID here for !on_target and LOV hint */
2214 if (oa->o_valid & OBD_MD_FLID) {
2215 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
2216 lsm->lsm_oi = oa->o_oi;
2219 if (ostid_id(&lsm->lsm_oi) == 0)
2220 ostid_set_id(&lsm->lsm_oi, ++last_object_id);
2224 /* Only echo objects are allowed to be created */
2225 LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
2226 (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO));
2227 rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
2229 CERROR("Cannot create objects: rc = %d\n", rc);
2235 /* See what object ID we were given */
2236 oa->o_oi = lsm->lsm_oi;
2237 oa->o_valid |= OBD_MD_FLID;
2239 eco = cl_echo_object_find(ed, &lsm);
2241 GOTO(failed, rc = PTR_ERR(eco));
2242 cl_echo_object_put(eco);
2244 CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
2249 obd_destroy(env, ec->ec_exp, oa, lsm, oti, NULL, NULL);
2251 echo_free_memmd(ed, &lsm);
2253 CERROR("create object failed with: rc = %d\n", rc);
2257 static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
2260 struct lov_stripe_md *lsm = NULL;
2261 struct echo_object *eco;
2265 if ((oa->o_valid & OBD_MD_FLID) == 0 || ostid_id(&oa->o_oi) == 0) {
2266 /* disallow use of object id 0 */
2267 CERROR ("No valid oid\n");
2271 rc = echo_alloc_memmd(ed, &lsm);
2275 lsm->lsm_oi = oa->o_oi;
2276 if (!(oa->o_valid & OBD_MD_FLGROUP))
2277 ostid_set_seq_echo(&lsm->lsm_oi);
2280 eco = cl_echo_object_find(ed, &lsm);
2286 echo_free_memmd(ed, &lsm);
2290 static void echo_put_object(struct echo_object *eco)
2292 if (cl_echo_object_put(eco))
2293 CERROR("echo client: drop an object failed");
2297 echo_get_stripe_off_id (struct lov_stripe_md *lsm, obd_off *offp, obd_id *idp)
2299 unsigned long stripe_count;
2300 unsigned long stripe_size;
2301 unsigned long width;
2302 unsigned long woffset;
2306 if (lsm->lsm_stripe_count <= 1)
2310 stripe_size = lsm->lsm_stripe_size;
2311 stripe_count = lsm->lsm_stripe_count;
2313 /* width = # bytes in all stripes */
2314 width = stripe_size * stripe_count;
2316 /* woffset = offset within a width; offset = whole number of widths */
2317 woffset = do_div (offset, width);
2319 stripe_index = woffset / stripe_size;
2321 *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi);
2322 *offp = offset * stripe_size + woffset % stripe_size;
2326 echo_client_page_debug_setup(struct lov_stripe_md *lsm,
2327 struct page *page, int rw, obd_id id,
2328 obd_off offset, obd_off count)
2335 /* no partial pages on the client */
2336 LASSERT(count == PAGE_CACHE_SIZE);
2340 for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
2341 if (rw == OBD_BRW_WRITE) {
2342 stripe_off = offset + delta;
2344 echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
2346 stripe_off = 0xdeadbeef00c0ffeeULL;
2347 stripe_id = 0xdeadbeef00c0ffeeULL;
2349 block_debug_setup(addr + delta, OBD_ECHO_BLOCK_SIZE,
2350 stripe_off, stripe_id);
2356 static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
2357 struct page *page, obd_id id,
2358 obd_off offset, obd_off count)
2367 /* no partial pages on the client */
2368 LASSERT(count == PAGE_CACHE_SIZE);
2372 for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
2373 stripe_off = offset + delta;
2375 echo_get_stripe_off_id (lsm, &stripe_off, &stripe_id);
2377 rc2 = block_debug_check("test_brw",
2378 addr + delta, OBD_ECHO_BLOCK_SIZE,
2379 stripe_off, stripe_id);
2381 CERROR ("Error in echo object "LPX64"\n", id);
2390 static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
2391 struct echo_object *eco, obd_off offset,
2392 obd_size count, int async,
2393 struct obd_trans_info *oti)
2395 struct lov_stripe_md *lsm = eco->eo_lsm;
2397 struct brw_page *pga;
2398 struct brw_page *pgp;
2399 struct page **pages;
2408 verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
2409 (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
2410 (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
2412 gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER;
2414 LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
2415 LASSERT(lsm != NULL);
2416 LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi));
2419 (count & (~CFS_PAGE_MASK)) != 0)
2422 /* XXX think again with misaligned I/O */
2423 npages = count >> PAGE_CACHE_SHIFT;
2425 if (rw == OBD_BRW_WRITE)
2426 brw_flags = OBD_BRW_ASYNC;
2428 OBD_ALLOC(pga, npages * sizeof(*pga));
2432 OBD_ALLOC(pages, npages * sizeof(*pages));
2433 if (pages == NULL) {
2434 OBD_FREE(pga, npages * sizeof(*pga));
2438 for (i = 0, pgp = pga, off = offset;
2440 i++, pgp++, off += PAGE_CACHE_SIZE) {
2442 LASSERT (pgp->pg == NULL); /* for cleanup */
2445 OBD_PAGE_ALLOC(pgp->pg, gfp_mask);
2446 if (pgp->pg == NULL)
2450 pgp->count = PAGE_CACHE_SIZE;
2452 pgp->flag = brw_flags;
2455 echo_client_page_debug_setup(lsm, pgp->pg, rw,
2456 ostid_id(&oa->o_oi), off,
2460 /* brw mode can only be used at client */
2461 LASSERT(ed->ed_next != NULL);
2462 rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
2465 if (rc != 0 || rw != OBD_BRW_READ)
2468 for (i = 0, pgp = pga; i < npages; i++, pgp++) {
2469 if (pgp->pg == NULL)
2474 vrc = echo_client_page_debug_check(lsm, pgp->pg,
2475 ostid_id(&oa->o_oi),
2476 pgp->off, pgp->count);
2477 if (vrc != 0 && rc == 0)
2480 OBD_PAGE_FREE(pgp->pg);
2482 OBD_FREE(pga, npages * sizeof(*pga));
2483 OBD_FREE(pages, npages * sizeof(*pages));
2487 static int echo_client_prep_commit(const struct lu_env *env,
2488 struct obd_export *exp, int rw,
2489 struct obdo *oa, struct echo_object *eco,
2490 obd_off offset, obd_size count,
2491 obd_size batch, struct obd_trans_info *oti,
2494 struct lov_stripe_md *lsm = eco->eo_lsm;
2495 struct obd_ioobj ioo;
2496 struct niobuf_local *lnb;
2497 struct niobuf_remote *rnb;
2499 obd_size npages, tot_pages;
2500 int i, ret = 0, brw_flags = 0;
2504 if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
2505 (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
2508 npages = batch >> PAGE_CACHE_SHIFT;
2509 tot_pages = count >> PAGE_CACHE_SHIFT;
2511 OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local));
2512 OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote));
2514 if (lnb == NULL || rnb == NULL)
2515 GOTO(out, ret = -ENOMEM);
2517 if (rw == OBD_BRW_WRITE && async)
2518 brw_flags |= OBD_BRW_ASYNC;
2520 obdo_to_ioobj(oa, &ioo);
2524 for(; tot_pages; tot_pages -= npages) {
2527 if (tot_pages < npages)
2530 for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
2531 rnb[i].offset = off;
2532 rnb[i].len = PAGE_CACHE_SIZE;
2533 rnb[i].flags = brw_flags;
2536 ioo.ioo_bufcnt = npages;
2539 ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
2543 LASSERT(lpages == npages);
2545 for (i = 0; i < lpages; i++) {
2546 struct page *page = lnb[i].page;
2548 /* read past eof? */
2549 if (page == NULL && lnb[i].rc == 0)
2553 lnb[i].flags |= OBD_BRW_ASYNC;
2555 if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
2556 (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
2557 (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
2560 if (rw == OBD_BRW_WRITE)
2561 echo_client_page_debug_setup(lsm, page, rw,
2562 ostid_id(&oa->o_oi),
2566 echo_client_page_debug_check(lsm, page,
2567 ostid_id(&oa->o_oi),
2572 ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
2573 rnb, npages, lnb, oti, ret);
2577 /* Reset oti otherwise it would confuse ldiskfs. */
2578 memset(oti, 0, sizeof(*oti));
2580 /* Reuse env context. */
2581 lu_context_exit((struct lu_context *)&env->le_ctx);
2582 lu_context_enter((struct lu_context *)&env->le_ctx);
2587 OBD_FREE(lnb, npages * sizeof(struct niobuf_local));
2589 OBD_FREE(rnb, npages * sizeof(struct niobuf_remote));
2593 static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
2594 struct obd_export *exp,
2595 struct obd_ioctl_data *data,
2596 struct obd_trans_info *dummy_oti)
2598 struct obd_device *obd = class_exp2obd(exp);
2599 struct echo_device *ed = obd2echo_dev(obd);
2600 struct echo_client_obd *ec = ed->ed_ec;
2601 struct obdo *oa = &data->ioc_obdo1;
2602 struct echo_object *eco;
2608 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
2610 rc = echo_get_object(&eco, ed, oa);
2614 oa->o_valid &= ~OBD_MD_FLHANDLE;
2616 /* OFD/obdfilter works only via prep/commit */
2617 test_mode = (long)data->ioc_pbuf1;
2621 if (ed->ed_next == NULL && test_mode != 3) {
2623 data->ioc_plen1 = data->ioc_count;
2626 /* Truncate batch size to maximum */
2627 if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE)
2628 data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
2630 switch (test_mode) {
2634 rc = echo_client_kbrw(ed, rw, oa,
2635 eco, data->ioc_offset,
2636 data->ioc_count, async, dummy_oti);
2639 rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa,
2640 eco, data->ioc_offset,
2641 data->ioc_count, data->ioc_plen1,
2647 echo_put_object(eco);
2652 echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2653 void *karg, void *uarg)
2655 #ifdef HAVE_SERVER_SUPPORT
2656 struct tgt_session_info *tsi;
2658 struct obd_device *obd = exp->exp_obd;
2659 struct echo_device *ed = obd2echo_dev(obd);
2660 struct echo_client_obd *ec = ed->ed_ec;
2661 struct echo_object *eco;
2662 struct obd_ioctl_data *data = karg;
2663 struct obd_trans_info dummy_oti;
2665 struct oti_req_ack_lock *ack_lock;
2668 int rw = OBD_BRW_READ;
2671 #ifdef HAVE_SERVER_SUPPORT
2672 struct lu_context echo_session;
2676 memset(&dummy_oti, 0, sizeof(dummy_oti));
2678 oa = &data->ioc_obdo1;
2679 if (!(oa->o_valid & OBD_MD_FLGROUP)) {
2680 oa->o_valid |= OBD_MD_FLGROUP;
2681 ostid_set_seq_echo(&oa->o_oi);
2684 /* This FID is unpacked just for validation at this point */
2685 rc = ostid_to_fid(&fid, &oa->o_oi, 0);
2693 rc = lu_env_init(env, LCT_DT_THREAD);
2695 GOTO(out_alloc, rc = -ENOMEM);
2697 #ifdef HAVE_SERVER_SUPPORT
2698 env->le_ses = &echo_session;
2699 rc = lu_context_init(env->le_ses, LCT_SERVER_SESSION | LCT_NOREF);
2700 if (unlikely(rc < 0))
2702 lu_context_enter(env->le_ses);
2704 tsi = tgt_ses_info(env);
2705 tsi->tsi_exp = ec->ec_exp;
2706 tsi->tsi_jobid = NULL;
2709 case OBD_IOC_CREATE: /* may create echo object */
2710 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2711 GOTO (out, rc = -EPERM);
2713 rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
2714 data->ioc_plen1, &dummy_oti);
2717 #ifdef HAVE_SERVER_SUPPORT
2718 case OBD_IOC_ECHO_MD: {
2725 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2726 GOTO(out, rc = -EPERM);
2728 count = data->ioc_count;
2729 cmd = data->ioc_command;
2731 id = data->ioc_obdo2.o_oi.oi.oi_id;
2732 dirlen = data->ioc_plen1;
2733 OBD_ALLOC(dir, dirlen + 1);
2735 GOTO(out, rc = -ENOMEM);
2737 if (copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
2738 OBD_FREE(dir, data->ioc_plen1 + 1);
2739 GOTO(out, rc = -EFAULT);
2742 rc = echo_md_handler(ed, cmd, dir, dirlen, id, count, data);
2743 OBD_FREE(dir, dirlen + 1);
2746 case OBD_IOC_ECHO_ALLOC_SEQ: {
2747 struct lu_env *cl_env;
2752 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2753 GOTO(out, rc = -EPERM);
2755 cl_env = cl_env_get(&refcheck);
2757 GOTO(out, rc = PTR_ERR(cl_env));
2759 rc = lu_env_refill_by_tags(cl_env, ECHO_MD_CTX_TAG,
2762 cl_env_put(cl_env, &refcheck);
2766 rc = seq_client_get_seq(cl_env, ed->ed_cl_seq, &seq);
2767 cl_env_put(cl_env, &refcheck);
2769 CERROR("%s: Can not alloc seq: rc = %d\n",
2774 if (copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
2777 max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
2778 if (copy_to_user(data->ioc_pbuf2, &max_count,
2783 #endif /* HAVE_SERVER_SUPPORT */
2784 case OBD_IOC_DESTROY:
2785 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2786 GOTO (out, rc = -EPERM);
2788 rc = echo_get_object(&eco, ed, oa);
2790 rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm,
2791 &dummy_oti, NULL, NULL);
2793 eco->eo_deleted = 1;
2794 echo_put_object(eco);
2798 case OBD_IOC_GETATTR:
2799 rc = echo_get_object(&eco, ed, oa);
2801 struct obd_info oinfo = { { { 0 } } };
2802 oinfo.oi_md = eco->eo_lsm;
2804 rc = obd_getattr(env, ec->ec_exp, &oinfo);
2805 echo_put_object(eco);
2809 case OBD_IOC_SETATTR:
2810 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2811 GOTO (out, rc = -EPERM);
2813 rc = echo_get_object(&eco, ed, oa);
2815 struct obd_info oinfo = { { { 0 } } };
2817 oinfo.oi_md = eco->eo_lsm;
2819 rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
2820 echo_put_object(eco);
2824 case OBD_IOC_BRW_WRITE:
2825 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2826 GOTO (out, rc = -EPERM);
2830 case OBD_IOC_BRW_READ:
2831 rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
2835 CERROR ("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
2836 GOTO (out, rc = -ENOTTY);
2841 #ifdef HAVE_SERVER_SUPPORT
2842 lu_context_exit(env->le_ses);
2843 lu_context_fini(env->le_ses);
2850 /* XXX this should be in a helper also called by target_send_reply */
2851 for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
2853 if (!ack_lock->mode)
2855 ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
2861 static int echo_client_setup(const struct lu_env *env,
2862 struct obd_device *obddev, struct lustre_cfg *lcfg)
2864 struct echo_client_obd *ec = &obddev->u.echo_client;
2865 struct obd_device *tgt;
2866 struct obd_uuid echo_uuid = { "ECHO_UUID" };
2867 struct obd_connect_data *ocd = NULL;
2871 if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
2872 CERROR("requires a TARGET OBD name\n");
2876 tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
2877 if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
2878 CERROR("device not attached or not set up (%s)\n",
2879 lustre_cfg_string(lcfg, 1));
2883 spin_lock_init(&ec->ec_lock);
2884 INIT_LIST_HEAD(&ec->ec_objects);
2885 INIT_LIST_HEAD(&ec->ec_locks);
2887 ec->ec_nstripes = 0;
2889 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
2890 #ifdef HAVE_SERVER_SUPPORT
2891 lu_context_tags_update(ECHO_MD_CTX_TAG);
2892 lu_session_tags_update(ECHO_MD_SES_TAG);
2894 CERROR("Local operations are NOT supported on client side. "
2895 "Only remote operations are supported. Metadata client "
2896 "must be run on server side.\n");
2901 OBD_ALLOC(ocd, sizeof(*ocd));
2903 CERROR("Can't alloc ocd connecting to %s\n",
2904 lustre_cfg_string(lcfg, 1));
2908 ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
2909 OBD_CONNECT_BRW_SIZE |
2910 OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
2911 OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE |
2913 ocd->ocd_brw_size = DT_MAX_BRW_SIZE;
2914 ocd->ocd_version = LUSTRE_VERSION_CODE;
2915 ocd->ocd_group = FID_SEQ_ECHO;
2917 rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
2919 /* Turn off pinger because it connects to tgt obd directly. */
2920 spin_lock(&tgt->obd_dev_lock);
2921 list_del_init(&ec->ec_exp->exp_obd_chain_timed);
2922 spin_unlock(&tgt->obd_dev_lock);
2925 OBD_FREE(ocd, sizeof(*ocd));
2928 CERROR("fail to connect to device %s\n",
2929 lustre_cfg_string(lcfg, 1));
2936 static int echo_client_cleanup(struct obd_device *obddev)
2938 struct echo_device *ed = obd2echo_dev(obddev);
2939 struct echo_client_obd *ec = &obddev->u.echo_client;
2943 /*Do nothing for Metadata echo client*/
2947 if (ed->ed_next_ismd) {
2948 #ifdef HAVE_SERVER_SUPPORT
2949 lu_context_tags_clear(ECHO_MD_CTX_TAG);
2950 lu_session_tags_clear(ECHO_MD_SES_TAG);
2952 CERROR("This is client-side only module, does not support "
2953 "metadata echo client.\n");
2958 if (!list_empty(&obddev->obd_exports)) {
2959 CERROR("still has clients!\n");
2963 LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
2964 rc = obd_disconnect(ec->ec_exp);
2966 CERROR("fail to disconnect device: %d\n", rc);
2971 static int echo_client_connect(const struct lu_env *env,
2972 struct obd_export **exp,
2973 struct obd_device *src, struct obd_uuid *cluuid,
2974 struct obd_connect_data *data, void *localdata)
2977 struct lustre_handle conn = { 0 };
2980 rc = class_connect(&conn, src, cluuid);
2982 *exp = class_conn2export(&conn);
2988 static int echo_client_disconnect(struct obd_export *exp)
2994 GOTO(out, rc = -EINVAL);
2996 rc = class_disconnect(exp);
3002 static struct obd_ops echo_client_obd_ops = {
3003 .o_owner = THIS_MODULE,
3004 .o_iocontrol = echo_client_iocontrol,
3005 .o_connect = echo_client_connect,
3006 .o_disconnect = echo_client_disconnect
3009 int echo_client_init(void)
3013 rc = lu_kmem_init(echo_caches);
3015 rc = class_register_type(&echo_client_obd_ops, NULL, true, NULL,
3016 #ifndef HAVE_ONLY_PROCFS_SEQ
3019 LUSTRE_ECHO_CLIENT_NAME,
3022 lu_kmem_fini(echo_caches);
3027 void echo_client_exit(void)
3029 class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
3030 lu_kmem_fini(echo_caches);
3034 static int __init obdecho_init(void)
3039 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
3041 LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
3043 # ifdef HAVE_SERVER_SUPPORT
3044 rc = echo_persistent_pages_init();
3048 rc = class_register_type(&echo_obd_ops, NULL, true, NULL,
3049 #ifndef HAVE_ONLY_PROCFS_SEQ
3052 LUSTRE_ECHO_NAME, NULL);
3057 rc = echo_client_init();
3059 # ifdef HAVE_SERVER_SUPPORT
3063 class_unregister_type(LUSTRE_ECHO_NAME);
3065 echo_persistent_pages_fini();
3071 static void /*__exit*/ obdecho_exit(void)
3075 # ifdef HAVE_SERVER_SUPPORT
3076 class_unregister_type(LUSTRE_ECHO_NAME);
3077 echo_persistent_pages_fini();
3081 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3082 MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
3083 MODULE_LICENSE("GPL");
3085 cfs_module(obdecho, LUSTRE_VERSION_STRING, obdecho_init, obdecho_exit);
3086 #endif /* __KERNEL__ */
3088 /** @} echo_client */