4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_ECHO
35 #include <linux/user_namespace.h>
36 #include <linux/uidgid.h>
38 #include <libcfs/libcfs.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lprocfs_status.h>
43 #include <cl_object.h>
44 #include <lustre_fid.h>
45 #include <lustre_lmv.h>
46 #include <lustre_acl.h>
47 #include <uapi/linux/lustre/lustre_ioctl.h>
48 #include <lustre_net.h>
49 #ifdef HAVE_SERVER_SUPPORT
50 # include <md_object.h>
52 #define ETI_NAME_LEN 20
54 #endif /* HAVE_SERVER_SUPPORT */
56 #include "echo_internal.h"
58 /** \defgroup echo_client Echo Client
62 /* echo thread key have a CL_THREAD flag, which set cl_env function directly */
63 #define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
64 #define ECHO_DT_CTX_TAG (LCT_REMEMBER | LCT_DT_THREAD)
65 #define ECHO_SES_TAG (LCT_REMEMBER | LCT_SESSION | LCT_SERVER_SESSION)
68 struct cl_device ed_cl;
69 struct echo_client_obd *ed_ec;
71 struct cl_site ed_site_myself;
72 struct lu_site *ed_site;
73 struct lu_device *ed_next;
75 struct lu_client_seq *ed_cl_seq;
76 #ifdef HAVE_SERVER_SUPPORT
77 struct local_oid_storage *ed_los;
78 struct lu_fid ed_root_fid;
79 #endif /* HAVE_SERVER_SUPPORT */
83 struct cl_object eo_cl;
84 struct cl_object_header eo_hdr;
85 struct echo_device *eo_dev;
86 struct list_head eo_obj_chain;
87 struct lov_oinfo *eo_oinfo;
92 struct echo_object_conf {
93 struct cl_object_conf eoc_cl;
94 struct lov_oinfo **eoc_oinfo;
98 struct cl_page_slice ep_cl;
99 unsigned long ep_lock;
103 struct cl_lock_slice el_cl;
104 struct list_head el_chain;
105 struct echo_object *el_object;
107 atomic_t el_refcount;
110 #ifdef HAVE_SERVER_SUPPORT
111 static const char echo_md_root_dir_name[] = "ROOT_ECHO";
114 * In order to use the values of members in struct mdd_device,
115 * we define an alias structure here.
117 struct echo_md_device {
118 struct md_device emd_md_dev;
119 struct obd_export *emd_child_exp;
120 struct dt_device *emd_child;
121 struct dt_device *emd_bottom;
122 struct lu_fid emd_root_fid;
123 struct lu_fid emd_local_root_fid;
125 #endif /* HAVE_SERVER_SUPPORT */
127 static int echo_client_setup(const struct lu_env *env,
128 struct obd_device *obd,
129 struct lustre_cfg *lcfg);
130 static int echo_client_cleanup(struct obd_device *obd);
132 /** \defgroup echo_helpers Helper functions
135 static inline struct echo_device *cl2echo_dev(const struct cl_device *dev)
137 return container_of_safe(dev, struct echo_device, ed_cl);
140 static inline struct cl_device *echo_dev2cl(struct echo_device *d)
145 static inline struct echo_device *obd2echo_dev(const struct obd_device *obd)
147 return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev));
150 static inline struct cl_object *echo_obj2cl(struct echo_object *eco)
155 static inline struct echo_object *cl2echo_obj(const struct cl_object *o)
157 return container_of(o, struct echo_object, eo_cl);
160 static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s)
162 return container_of(s, struct echo_page, ep_cl);
165 static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s)
167 return container_of(s, struct echo_lock, el_cl);
170 static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl)
172 return ecl->el_cl.cls_lock;
175 static struct lu_context_key echo_thread_key;
177 static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
179 struct echo_thread_info *info;
181 info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
182 LASSERT(info != NULL);
187 struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
189 return container_of(c, struct echo_object_conf, eoc_cl);
192 #ifdef HAVE_SERVER_SUPPORT
193 static inline struct echo_md_device *lu2emd_dev(struct lu_device *d)
195 return container_of_safe(d, struct echo_md_device,
196 emd_md_dev.md_lu_dev);
199 static inline struct lu_device *emd2lu_dev(struct echo_md_device *d)
201 return &d->emd_md_dev.md_lu_dev;
204 static inline struct seq_server_site *echo_md_seq_site(struct echo_md_device *d)
206 return emd2lu_dev(d)->ld_site->ld_seq_site;
209 static inline struct obd_device *emd2obd_dev(struct echo_md_device *d)
211 return d->emd_md_dev.md_lu_dev.ld_obd;
213 #endif /* HAVE_SERVER_SUPPORT */
215 /** @} echo_helpers */
217 static int cl_echo_object_put(struct echo_object *eco);
218 static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
219 struct page **pages, int npages, int async);
221 struct echo_thread_info {
222 struct echo_object_conf eti_conf;
223 struct lustre_md eti_md;
224 struct cl_2queue eti_queue;
226 struct cl_lock eti_lock;
227 struct lu_fid eti_fid;
228 struct lu_fid eti_fid2;
229 #ifdef HAVE_SERVER_SUPPORT
230 struct md_op_spec eti_spec;
231 struct lov_mds_md_v3 eti_lmm;
232 struct lov_user_md_v3 eti_lum;
233 struct md_attr eti_ma;
234 struct lu_name eti_lname;
235 /* per-thread values, can be re-used */
236 void *eti_big_lmm; /* may be vmalloc'd */
238 char eti_name[ETI_NAME_LEN];
239 struct lu_buf eti_buf;
240 /* If we want to test large ACL, then need to enlarge the buffer. */
241 char eti_xattr_buf[LUSTRE_POSIX_ACL_MAX_SIZE_OLD];
245 /* No session used right now */
246 struct echo_session_info {
250 static struct kmem_cache *echo_lock_kmem;
251 static struct kmem_cache *echo_object_kmem;
252 static struct kmem_cache *echo_thread_kmem;
253 static struct kmem_cache *echo_session_kmem;
254 /* static struct kmem_cache *echo_req_kmem; */
256 static struct lu_kmem_descr echo_caches[] = {
258 .ckd_cache = &echo_lock_kmem,
259 .ckd_name = "echo_lock_kmem",
260 .ckd_size = sizeof(struct echo_lock)
263 .ckd_cache = &echo_object_kmem,
264 .ckd_name = "echo_object_kmem",
265 .ckd_size = sizeof(struct echo_object)
268 .ckd_cache = &echo_thread_kmem,
269 .ckd_name = "echo_thread_kmem",
270 .ckd_size = sizeof(struct echo_thread_info)
273 .ckd_cache = &echo_session_kmem,
274 .ckd_name = "echo_session_kmem",
275 .ckd_size = sizeof(struct echo_session_info)
282 /** \defgroup echo_page Page operations
284 * Echo page operations.
288 static int echo_page_own(const struct lu_env *env,
289 const struct cl_page_slice *slice,
290 struct cl_io *io, int nonblock)
292 struct echo_page *ep = cl2echo_page(slice);
295 if (test_and_set_bit(0, &ep->ep_lock))
298 while (test_and_set_bit(0, &ep->ep_lock))
299 wait_on_bit(&ep->ep_lock, 0, TASK_UNINTERRUPTIBLE);
304 static void echo_page_disown(const struct lu_env *env,
305 const struct cl_page_slice *slice,
308 struct echo_page *ep = cl2echo_page(slice);
310 LASSERT(test_bit(0, &ep->ep_lock));
311 clear_and_wake_up_bit(0, &ep->ep_lock);
314 static void echo_page_discard(const struct lu_env *env,
315 const struct cl_page_slice *slice,
316 struct cl_io *unused)
318 cl_page_delete(env, slice->cpl_page);
321 static int echo_page_is_vmlocked(const struct lu_env *env,
322 const struct cl_page_slice *slice)
324 if (test_bit(0, &cl2echo_page(slice)->ep_lock))
329 static void echo_page_completion(const struct lu_env *env,
330 const struct cl_page_slice *slice,
333 LASSERT(slice->cpl_page->cp_sync_io != NULL);
336 static void echo_page_fini(const struct lu_env *env,
337 struct cl_page_slice *slice,
338 struct pagevec *pvec)
340 struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
343 atomic_dec(&eco->eo_npages);
344 put_page(slice->cpl_page->cp_vmpage);
348 static int echo_page_prep(const struct lu_env *env,
349 const struct cl_page_slice *slice,
350 struct cl_io *unused)
355 static int echo_page_print(const struct lu_env *env,
356 const struct cl_page_slice *slice,
357 void *cookie, lu_printer_t printer)
359 struct echo_page *ep = cl2echo_page(slice);
361 (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
362 ep, test_bit(0, &ep->ep_lock),
363 slice->cpl_page->cp_vmpage);
367 static const struct cl_page_operations echo_page_ops = {
368 .cpo_own = echo_page_own,
369 .cpo_disown = echo_page_disown,
370 .cpo_discard = echo_page_discard,
371 .cpo_fini = echo_page_fini,
372 .cpo_print = echo_page_print,
373 .cpo_is_vmlocked = echo_page_is_vmlocked,
376 .cpo_prep = echo_page_prep,
377 .cpo_completion = echo_page_completion,
380 .cpo_prep = echo_page_prep,
381 .cpo_completion = echo_page_completion,
388 /** \defgroup echo_lock Locking
390 * echo lock operations
394 static void echo_lock_fini(const struct lu_env *env,
395 struct cl_lock_slice *slice)
397 struct echo_lock *ecl = cl2echo_lock(slice);
399 LASSERT(list_empty(&ecl->el_chain));
400 OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
403 static struct cl_lock_operations echo_lock_ops = {
404 .clo_fini = echo_lock_fini,
409 /** \defgroup echo_cl_ops cl_object operations
411 * operations for cl_object
415 static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
416 struct cl_page *page, pgoff_t index)
418 struct echo_page *ep = cl_object_page_slice(obj, page);
419 struct echo_object *eco = cl2echo_obj(obj);
422 get_page(page->cp_vmpage);
424 * ep_lock is similar to the lock_page() lock, and
425 * cannot usefully be monitored by lockdep.
426 * So just use a bit in an "unsigned long" and use the
427 * wait_on_bit() interface to wait for the bit to be clear.
430 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
431 atomic_inc(&eco->eo_npages);
435 static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
441 static int echo_lock_init(const struct lu_env *env,
442 struct cl_object *obj, struct cl_lock *lock,
443 const struct cl_io *unused)
445 struct echo_lock *el;
448 OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, GFP_NOFS);
450 cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
451 el->el_object = cl2echo_obj(obj);
452 INIT_LIST_HEAD(&el->el_chain);
453 atomic_set(&el->el_refcount, 0);
455 RETURN(el ? 0 : -ENOMEM);
458 static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
459 const struct cl_object_conf *conf)
464 static const struct cl_object_operations echo_cl_obj_ops = {
465 .coo_page_init = echo_page_init,
466 .coo_lock_init = echo_lock_init,
467 .coo_io_init = echo_io_init,
468 .coo_conf_set = echo_conf_set
470 /** @} echo_cl_ops */
472 /** \defgroup echo_lu_ops lu_object operations
474 * operations for echo lu object.
478 static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
479 const struct lu_object_conf *conf)
481 struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
482 struct echo_client_obd *ec = ed->ed_ec;
483 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
487 struct lu_object *below;
488 struct lu_device *under;
491 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
495 lu_object_add(obj, below);
498 if (!ed->ed_next_ismd) {
499 const struct cl_object_conf *cconf = lu2cl_conf(conf);
500 struct echo_object_conf *econf = cl2echo_conf(cconf);
502 LASSERT(econf->eoc_oinfo != NULL);
505 * Transfer the oinfo pointer to eco that it won't be
508 eco->eo_oinfo = *econf->eoc_oinfo;
509 *econf->eoc_oinfo = NULL;
511 eco->eo_oinfo = NULL;
515 atomic_set(&eco->eo_npages, 0);
516 cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
518 spin_lock(&ec->ec_lock);
519 list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
520 spin_unlock(&ec->ec_lock);
525 static void echo_object_delete(const struct lu_env *env, struct lu_object *obj)
527 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
528 struct echo_client_obd *ec;
532 /* object delete called unconditolally - layer init or not */
533 if (eco->eo_dev == NULL)
536 ec = eco->eo_dev->ed_ec;
538 LASSERT(atomic_read(&eco->eo_npages) == 0);
540 spin_lock(&ec->ec_lock);
541 list_del_init(&eco->eo_obj_chain);
542 spin_unlock(&ec->ec_lock);
545 OBD_FREE_PTR(eco->eo_oinfo);
548 static void echo_object_free_rcu(struct rcu_head *head)
550 struct echo_object *eco = container_of(head, struct echo_object,
551 eo_hdr.coh_lu.loh_rcu);
553 kmem_cache_free(echo_object_kmem, eco);
556 static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
558 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
563 lu_object_header_fini(obj->lo_header);
565 OBD_FREE_PRE(eco, sizeof(*eco), "slab-freed");
566 call_rcu(&eco->eo_hdr.coh_lu.loh_rcu, echo_object_free_rcu);
570 static int echo_object_print(const struct lu_env *env, void *cookie,
571 lu_printer_t p, const struct lu_object *o)
573 struct echo_object *obj = cl2echo_obj(lu2cl(o));
575 return (*p)(env, cookie, "echoclient-object@%p", obj);
578 static const struct lu_object_operations echo_lu_obj_ops = {
579 .loo_object_init = echo_object_init,
580 .loo_object_delete = echo_object_delete,
581 .loo_object_release = NULL,
582 .loo_object_free = echo_object_free,
583 .loo_object_print = echo_object_print,
584 .loo_object_invariant = NULL
586 /** @} echo_lu_ops */
588 /** \defgroup echo_lu_dev_ops lu_device operations
590 * Operations for echo lu device.
594 static struct lu_object *echo_object_alloc(const struct lu_env *env,
595 const struct lu_object_header *hdr,
596 struct lu_device *dev)
598 struct echo_object *eco;
599 struct lu_object *obj = NULL;
602 /* we're the top dev. */
603 LASSERT(hdr == NULL);
604 OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, GFP_NOFS);
606 struct cl_object_header *hdr = &eco->eo_hdr;
608 obj = &echo_obj2cl(eco)->co_lu;
609 cl_object_header_init(hdr);
610 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
612 lu_object_init(obj, &hdr->coh_lu, dev);
613 lu_object_add_top(&hdr->coh_lu, obj);
615 eco->eo_cl.co_ops = &echo_cl_obj_ops;
616 obj->lo_ops = &echo_lu_obj_ops;
621 static struct lu_device_operations echo_device_lu_ops = {
622 .ldo_object_alloc = echo_object_alloc,
625 /** @} echo_lu_dev_ops */
627 /** \defgroup echo_init Setup and teardown
629 * Init and fini functions for echo client.
633 static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
635 struct cl_site *site = &ed->ed_site_myself;
638 /* initialize site */
639 rc = cl_site_init(site, &ed->ed_cl);
641 CERROR("Cannot initialize site for echo client(%d)\n", rc);
645 rc = lu_site_init_finish(&site->cs_lu);
651 ed->ed_site = &site->cs_lu;
655 static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
658 if (!ed->ed_next_ismd)
659 lu_site_fini(ed->ed_site);
664 static void *echo_thread_key_init(const struct lu_context *ctx,
665 struct lu_context_key *key)
667 struct echo_thread_info *info;
669 OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, GFP_NOFS);
671 info = ERR_PTR(-ENOMEM);
675 static void echo_thread_key_fini(const struct lu_context *ctx,
676 struct lu_context_key *key, void *data)
678 struct echo_thread_info *info = data;
680 OBD_SLAB_FREE_PTR(info, echo_thread_kmem);
683 static struct lu_context_key echo_thread_key = {
684 .lct_tags = LCT_CL_THREAD,
685 .lct_init = echo_thread_key_init,
686 .lct_fini = echo_thread_key_fini,
689 static void *echo_session_key_init(const struct lu_context *ctx,
690 struct lu_context_key *key)
692 struct echo_session_info *session;
694 OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, GFP_NOFS);
696 session = ERR_PTR(-ENOMEM);
700 static void echo_session_key_fini(const struct lu_context *ctx,
701 struct lu_context_key *key, void *data)
703 struct echo_session_info *session = data;
705 OBD_SLAB_FREE_PTR(session, echo_session_kmem);
708 static struct lu_context_key echo_session_key = {
709 .lct_tags = LCT_SESSION,
710 .lct_init = echo_session_key_init,
711 .lct_fini = echo_session_key_fini,
714 LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
716 #ifdef HAVE_SERVER_SUPPORT
717 # define ECHO_SEQ_WIDTH 0xffffffff
718 static int echo_fid_init(struct echo_device *ed, char *obd_name,
719 struct seq_server_site *ss)
725 OBD_ALLOC_PTR(ed->ed_cl_seq);
729 OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
731 GOTO(out_free_seq, rc = -ENOMEM);
733 snprintf(prefix, MAX_OBD_NAME + 5, "srv-%s", obd_name);
735 /* Init client side sequence-manager */
736 seq_client_init(ed->ed_cl_seq, NULL,
738 prefix, ss->ss_server_seq);
739 ed->ed_cl_seq->lcs_width = ECHO_SEQ_WIDTH;
740 OBD_FREE(prefix, MAX_OBD_NAME + 5);
745 OBD_FREE_PTR(ed->ed_cl_seq);
746 ed->ed_cl_seq = NULL;
750 static int echo_fid_fini(struct obd_device *obd)
752 struct echo_device *ed = obd2echo_dev(obd);
756 seq_client_fini(ed->ed_cl_seq);
757 OBD_FREE_PTR(ed->ed_cl_seq);
758 ed->ed_cl_seq = NULL;
764 static void echo_ed_los_fini(const struct lu_env *env, struct echo_device *ed)
767 if (ed != NULL && ed->ed_next_ismd && ed->ed_los != NULL) {
768 local_oid_storage_fini(env, ed->ed_los);
774 echo_md_local_file_create(const struct lu_env *env, struct echo_md_device *emd,
775 struct local_oid_storage *los,
776 const struct lu_fid *pfid, const char *name,
777 __u32 mode, struct lu_fid *fid)
779 struct dt_object *parent = NULL;
780 struct dt_object *dto = NULL;
784 LASSERT(!fid_is_zero(pfid));
785 parent = dt_locate(env, emd->emd_bottom, pfid);
786 if (unlikely(IS_ERR(parent)))
787 RETURN(PTR_ERR(parent));
789 /* create local file with @fid */
790 dto = local_file_find_or_create_with_fid(env, emd->emd_bottom, fid,
793 GOTO(out_put, rc = PTR_ERR(dto));
795 *fid = *lu_object_fid(&dto->do_lu);
797 * since stack is not fully set up the local_storage uses own stack
798 * and we should drop its object from cache
800 dt_object_put_nocache(env, dto);
804 dt_object_put(env, parent);
809 echo_md_root_get(const struct lu_env *env, struct echo_md_device *emd,
810 struct echo_device *ed)
816 /* Setup local dirs */
817 fid.f_seq = FID_SEQ_LOCAL_NAME;
820 rc = local_oid_storage_init(env, emd->emd_bottom, &fid, &ed->ed_los);
824 lu_echo_root_fid(&fid);
825 if (echo_md_seq_site(emd)->ss_node_id == 0) {
826 rc = echo_md_local_file_create(env, emd, ed->ed_los,
827 &emd->emd_local_root_fid,
828 echo_md_root_dir_name, S_IFDIR |
829 S_IRUGO | S_IWUSR | S_IXUGO,
832 CERROR("%s: create md echo root fid failed: rc = %d\n",
833 emd2obd_dev(emd)->obd_name, rc);
837 ed->ed_root_fid = fid;
841 echo_ed_los_fini(env, ed);
845 #endif /* HAVE_SERVER_SUPPORT */
847 static struct lu_device *echo_device_alloc(const struct lu_env *env,
848 struct lu_device_type *t,
849 struct lustre_cfg *cfg)
851 struct lu_device *next;
852 struct echo_device *ed;
853 struct cl_device *cd;
854 struct obd_device *obd = NULL; /* to keep compiler happy */
855 struct obd_device *tgt;
856 const char *tgt_type_name;
863 GOTO(out, rc = -ENOMEM);
867 rc = cl_device_init(cd, t);
871 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
874 obd = class_name2obd(lustre_cfg_string(cfg, 0));
875 LASSERT(obd != NULL);
876 LASSERT(env != NULL);
878 tgt = class_name2obd(lustre_cfg_string(cfg, 1));
880 CERROR("Can not find tgt device %s\n",
881 lustre_cfg_string(cfg, 1));
882 GOTO(out, rc = -ENODEV);
885 next = tgt->obd_lu_dev;
887 if (strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME) == 0) {
888 ed->ed_next_ismd = 1;
889 } else if (strcmp(tgt->obd_type->typ_name, LUSTRE_OST_NAME) == 0 ||
890 strcmp(tgt->obd_type->typ_name, LUSTRE_OSC_NAME) == 0) {
891 ed->ed_next_ismd = 0;
892 rc = echo_site_init(env, ed);
896 GOTO(out, rc = -EINVAL);
901 rc = echo_client_setup(env, obd, cfg);
905 ed->ed_ec = &obd->u.echo_client;
908 if (ed->ed_next_ismd) {
909 #ifdef HAVE_SERVER_SUPPORT
910 /* Suppose to connect to some Metadata layer */
911 struct lu_site *ls = NULL;
912 struct lu_device *ld = NULL;
913 struct md_device *md = NULL;
914 struct echo_md_device *emd = NULL;
918 CERROR("%s is not lu device type!\n",
919 lustre_cfg_string(cfg, 1));
920 GOTO(out, rc = -EINVAL);
923 tgt_type_name = lustre_cfg_string(cfg, 2);
924 if (!tgt_type_name) {
925 CERROR("%s no type name for echo %s setup\n",
926 lustre_cfg_string(cfg, 1),
927 tgt->obd_type->typ_name);
928 GOTO(out, rc = -EINVAL);
933 spin_lock(&ls->ls_ld_lock);
934 list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
935 if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
940 spin_unlock(&ls->ls_ld_lock);
943 CERROR("%s is not lu device type!\n",
944 lustre_cfg_string(cfg, 1));
945 GOTO(out, rc = -EINVAL);
949 /* For MD echo client, it will use the site in MDS stack */
951 ed->ed_cl.cd_lu_dev.ld_site = ls;
952 rc = echo_fid_init(ed, obd->obd_name, lu_site2seq(ls));
954 CERROR("echo fid init error %d\n", rc);
958 md = lu2md_dev(next);
959 emd = lu2emd_dev(&md->md_lu_dev);
960 rc = echo_md_root_get(env, emd, ed);
962 CERROR("%s: get root error: rc = %d\n",
963 emd2obd_dev(emd)->obd_name, rc);
966 #else /* !HAVE_SERVER_SUPPORT */
968 "Local operations are NOT supported on client side. Only remote operations are supported. Metadata client must be run on server side.\n");
969 GOTO(out, rc = -EOPNOTSUPP);
970 #endif /* HAVE_SERVER_SUPPORT */
973 * if echo client is to be stacked upon ost device, the next is
974 * NULL since ost is not a clio device so far
976 if (next != NULL && !lu_device_is_cl(next))
979 tgt_type_name = tgt->obd_type->typ_name;
981 LASSERT(next != NULL);
983 GOTO(out, rc = -EBUSY);
985 next->ld_site = ed->ed_site;
986 rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
987 next->ld_type->ldt_name,
992 LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
997 RETURN(&cd->cd_lu_dev);
1003 rc2 = echo_client_cleanup(obd);
1005 CERROR("Cleanup obd device %s error(%d)\n",
1006 obd->obd_name, rc2);
1011 echo_site_fini(env, ed);
1014 cl_device_fini(&ed->ed_cl);
1026 static int echo_device_init(const struct lu_env *env, struct lu_device *d,
1027 const char *name, struct lu_device *next)
1033 static struct lu_device *echo_device_fini(const struct lu_env *env,
1034 struct lu_device *d)
1036 struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
1037 struct lu_device *next = ed->ed_next;
1039 while (next && !ed->ed_next_ismd)
1040 next = next->ld_type->ldt_ops->ldto_device_fini(env, next);
1044 static void echo_lock_release(const struct lu_env *env,
1045 struct echo_lock *ecl,
1048 struct cl_lock *clk = echo_lock2cl(ecl);
1050 cl_lock_release(env, clk);
1053 static struct lu_device *echo_device_free(const struct lu_env *env,
1054 struct lu_device *d)
1056 struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
1057 struct echo_client_obd *ec = ed->ed_ec;
1058 struct echo_object *eco;
1059 struct lu_device *next = ed->ed_next;
1061 CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
1064 lu_site_purge(env, ed->ed_site, -1);
1067 * check if there are objects still alive.
1068 * It shouldn't have any object because lu_site_purge would cleanup
1069 * all of cached objects. Anyway, probably the echo device is being
1070 * parallelly accessed.
1072 spin_lock(&ec->ec_lock);
1073 list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
1074 eco->eo_deleted = 1;
1075 spin_unlock(&ec->ec_lock);
1078 lu_site_purge(env, ed->ed_site, -1);
1081 "Waiting for the reference of echo object to be dropped\n");
1083 /* Wait for the last reference to be dropped. */
1084 spin_lock(&ec->ec_lock);
1085 while (!list_empty(&ec->ec_objects)) {
1086 spin_unlock(&ec->ec_lock);
1088 "echo_client still has objects at cleanup time, wait for 1 second\n");
1089 schedule_timeout_uninterruptible(cfs_time_seconds(1));
1090 lu_site_purge(env, ed->ed_site, -1);
1091 spin_lock(&ec->ec_lock);
1093 spin_unlock(&ec->ec_lock);
1095 LASSERT(list_empty(&ec->ec_locks));
1097 CDEBUG(D_INFO, "No object exists, exiting...\n");
1099 echo_client_cleanup(d->ld_obd);
1100 #ifdef HAVE_SERVER_SUPPORT
1101 echo_fid_fini(d->ld_obd);
1102 echo_ed_los_fini(env, ed);
1104 while (next && !ed->ed_next_ismd)
1105 next = next->ld_type->ldt_ops->ldto_device_free(env, next);
1107 LASSERT(ed->ed_site == d->ld_site);
1108 echo_site_fini(env, ed);
1109 cl_device_fini(&ed->ed_cl);
1112 cl_env_cache_purge(~0);
1117 static const struct lu_device_type_operations echo_device_type_ops = {
1118 .ldto_init = echo_type_init,
1119 .ldto_fini = echo_type_fini,
1121 .ldto_start = echo_type_start,
1122 .ldto_stop = echo_type_stop,
1124 .ldto_device_alloc = echo_device_alloc,
1125 .ldto_device_free = echo_device_free,
1126 .ldto_device_init = echo_device_init,
1127 .ldto_device_fini = echo_device_fini
1130 static struct lu_device_type echo_device_type = {
1131 .ldt_tags = LU_DEVICE_CL,
1132 .ldt_name = LUSTRE_ECHO_CLIENT_NAME,
1133 .ldt_ops = &echo_device_type_ops,
1134 .ldt_ctx_tags = LCT_CL_THREAD | LCT_MD_THREAD | LCT_DT_THREAD,
1138 /** \defgroup echo_exports Exported operations
1140 * exporting functions to echo client
1145 /* Interfaces to echo client obd device */
1146 static struct echo_object *
1147 cl_echo_object_find(struct echo_device *d, const struct ost_id *oi)
1150 struct echo_thread_info *info;
1151 struct echo_object_conf *conf;
1152 struct echo_object *eco;
1153 struct cl_object *obj;
1154 struct lov_oinfo *oinfo = NULL;
1160 LASSERTF(ostid_id(oi) != 0, DOSTID"\n", POSTID(oi));
1161 LASSERTF(ostid_seq(oi) == FID_SEQ_ECHO, DOSTID"\n", POSTID(oi));
1163 /* Never return an object if the obd is to be freed. */
1164 if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
1165 RETURN(ERR_PTR(-ENODEV));
1167 env = cl_env_get(&refcheck);
1169 RETURN((void *)env);
1171 info = echo_env_info(env);
1172 conf = &info->eti_conf;
1174 OBD_ALLOC_PTR(oinfo);
1176 GOTO(out, eco = ERR_PTR(-ENOMEM));
1178 oinfo->loi_oi = *oi;
1179 conf->eoc_cl.u.coc_oinfo = oinfo;
1183 * If echo_object_init() is successful then ownership of oinfo
1184 * is transferred to the object.
1186 conf->eoc_oinfo = &oinfo;
1188 fid = &info->eti_fid;
1189 rc = ostid_to_fid(fid, oi, 0);
1191 GOTO(out, eco = ERR_PTR(rc));
1194 * In the function below, .hs_keycmp resolves to
1195 * lu_obj_hop_keycmp()
1197 /* coverity[overrun-buffer-val] */
1198 obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
1200 GOTO(out, eco = (void *)obj);
1202 eco = cl2echo_obj(obj);
1203 if (eco->eo_deleted) {
1204 cl_object_put(env, obj);
1205 eco = ERR_PTR(-EAGAIN);
1210 OBD_FREE_PTR(oinfo);
1212 cl_env_put(env, &refcheck);
1216 static int cl_echo_object_put(struct echo_object *eco)
1219 struct cl_object *obj = echo_obj2cl(eco);
1223 env = cl_env_get(&refcheck);
1225 RETURN(PTR_ERR(env));
1227 /* an external function to kill an object? */
1228 if (eco->eo_deleted) {
1229 struct lu_object_header *loh = obj->co_lu.lo_header;
1231 LASSERT(&eco->eo_hdr == luh2coh(loh));
1232 set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
1235 cl_object_put(env, obj);
1236 cl_env_put(env, &refcheck);
1240 static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
1241 u64 start, u64 end, int mode,
1242 __u64 *cookie, __u32 enqflags)
1245 struct cl_lock *lck;
1246 struct cl_object *obj;
1247 struct cl_lock_descr *descr;
1248 struct echo_thread_info *info;
1252 info = echo_env_info(env);
1254 lck = &info->eti_lock;
1255 obj = echo_obj2cl(eco);
1257 memset(lck, 0, sizeof(*lck));
1258 descr = &lck->cll_descr;
1259 descr->cld_obj = obj;
1260 descr->cld_start = cl_index(obj, start);
1261 descr->cld_end = cl_index(obj, end);
1262 descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
1263 descr->cld_enq_flags = enqflags;
1266 rc = cl_lock_request(env, io, lck);
1268 struct echo_client_obd *ec = eco->eo_dev->ed_ec;
1269 struct echo_lock *el;
1271 el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
1272 spin_lock(&ec->ec_lock);
1273 if (list_empty(&el->el_chain)) {
1274 list_add(&el->el_chain, &ec->ec_locks);
1275 el->el_cookie = ++ec->ec_unique;
1277 atomic_inc(&el->el_refcount);
1278 *cookie = el->el_cookie;
1279 spin_unlock(&ec->ec_lock);
1284 static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
1287 struct echo_client_obd *ec = ed->ed_ec;
1288 struct echo_lock *ecl = NULL;
1289 struct list_head *el;
1290 int found = 0, still_used = 0;
1293 LASSERT(ec != NULL);
1294 spin_lock(&ec->ec_lock);
1295 list_for_each(el, &ec->ec_locks) {
1296 ecl = list_entry(el, struct echo_lock, el_chain);
1297 CDEBUG(D_INFO, "ecl: %p, cookie: %#llx\n", ecl, ecl->el_cookie);
1298 found = (ecl->el_cookie == cookie);
1300 if (atomic_dec_and_test(&ecl->el_refcount))
1301 list_del_init(&ecl->el_chain);
1307 spin_unlock(&ec->ec_lock);
1312 echo_lock_release(env, ecl, still_used);
1316 static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
1317 struct pagevec *pvec)
1319 struct echo_thread_info *info;
1320 struct cl_2queue *queue;
1323 info = echo_env_info(env);
1324 LASSERT(io == &info->eti_io);
1326 queue = &info->eti_queue;
1328 for (i = 0; i < pagevec_count(pvec); i++) {
1329 struct page *vmpage = pvec->pages[i];
1330 struct cl_page *page = (struct cl_page *)vmpage->private;
1332 cl_page_list_add(&queue->c2_qout, page);
1336 static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1337 struct page **pages, int npages, int async)
1340 struct echo_thread_info *info;
1341 struct cl_object *obj = echo_obj2cl(eco);
1342 struct echo_device *ed = eco->eo_dev;
1343 struct cl_2queue *queue;
1345 struct cl_page *clp;
1346 struct lustre_handle lh = { 0 };
1347 int page_size = cl_page_size(obj);
1353 LASSERT((offset & ~PAGE_MASK) == 0);
1354 LASSERT(ed->ed_next != NULL);
1355 env = cl_env_get(&refcheck);
1357 RETURN(PTR_ERR(env));
1359 info = echo_env_info(env);
1361 queue = &info->eti_queue;
1363 cl_2queue_init(queue);
1365 io->ci_ignore_layout = 1;
1366 rc = cl_io_init(env, io, CIT_MISC, obj);
1371 rc = cl_echo_enqueue0(env, eco, offset,
1372 offset + npages * PAGE_SIZE - 1,
1373 rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
1376 GOTO(error_lock, rc);
1378 for (i = 0; i < npages; i++) {
1380 clp = cl_page_find(env, obj, cl_index(obj, offset),
1381 pages[i], CPT_TRANSIENT);
1386 LASSERT(clp->cp_type == CPT_TRANSIENT);
1388 rc = cl_page_own(env, io, clp);
1390 LASSERT(clp->cp_state == CPS_FREEING);
1391 cl_page_put(env, clp);
1395 cl_2queue_add(queue, clp);
1398 * drop the reference count for cl_page_find, so that the page
1399 * will be freed in cl_2queue_fini.
1401 cl_page_put(env, clp);
1402 cl_page_clip(env, clp, 0, page_size);
1404 offset += page_size;
1408 enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE;
1410 async = async && (typ == CRT_WRITE);
1412 rc = cl_io_commit_async(env, io, &queue->c2_qin,
1414 echo_commit_callback);
1416 rc = cl_io_submit_sync(env, io, typ, queue, 0);
1417 CDEBUG(D_INFO, "echo_client %s write returns %d\n",
1418 async ? "async" : "sync", rc);
1421 cl_echo_cancel0(env, ed, lh.cookie);
1424 cl_2queue_discard(env, io, queue);
1425 cl_2queue_disown(env, io, queue);
1426 cl_2queue_fini(env, queue);
1427 cl_io_fini(env, io);
1429 cl_env_put(env, &refcheck);
1432 /** @} echo_exports */
1434 static u64 last_object_id;
1436 #ifdef HAVE_SERVER_SUPPORT
1437 static inline void echo_md_build_name(struct lu_name *lname, char *name,
1440 snprintf(name, ETI_NAME_LEN, "%llu", id);
1441 lname->ln_name = name;
1442 lname->ln_namelen = strlen(name);
1445 /* similar to mdt_attr_get_complex */
1446 static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
1449 struct echo_thread_info *info = echo_env_info(env);
1454 LASSERT(ma->ma_lmm_size > 0);
1456 LASSERT(ma->ma_need & (MA_LOV | MA_LMV));
1457 if (ma->ma_need & MA_LOV)
1458 rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LOV);
1460 rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LMV);
1465 /* big_lmm may need to be grown */
1466 if (info->eti_big_lmmsize < rc) {
1467 int size = size_roundup_power2(rc);
1469 if (info->eti_big_lmmsize > 0) {
1470 /* free old buffer */
1471 LASSERT(info->eti_big_lmm);
1472 OBD_FREE_LARGE(info->eti_big_lmm,
1473 info->eti_big_lmmsize);
1474 info->eti_big_lmm = NULL;
1475 info->eti_big_lmmsize = 0;
1478 OBD_ALLOC_LARGE(info->eti_big_lmm, size);
1479 if (!info->eti_big_lmm)
1481 info->eti_big_lmmsize = size;
1483 LASSERT(info->eti_big_lmmsize >= rc);
1485 info->eti_buf.lb_buf = info->eti_big_lmm;
1486 info->eti_buf.lb_len = info->eti_big_lmmsize;
1487 if (ma->ma_need & MA_LOV)
1488 rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LOV);
1490 rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LMV);
1494 if (ma->ma_need & MA_LOV)
1495 ma->ma_valid |= MA_LOV;
1497 ma->ma_valid |= MA_LMV;
1499 ma->ma_lmm = info->eti_big_lmm;
1500 ma->ma_lmm_size = rc;
1505 static int echo_attr_get_complex(const struct lu_env *env,
1506 struct md_object *next,
1509 struct echo_thread_info *info = echo_env_info(env);
1510 struct lu_buf *buf = &info->eti_buf;
1511 umode_t mode = lu_object_attr(&next->mo_lu);
1518 if (ma->ma_need & MA_INODE) {
1519 rc = mo_attr_get(env, next, ma);
1522 ma->ma_valid |= MA_INODE;
1525 if ((ma->ma_need & MA_LOV) && (S_ISREG(mode) || S_ISDIR(mode))) {
1526 LASSERT(ma->ma_lmm_size > 0);
1527 buf->lb_buf = ma->ma_lmm;
1528 buf->lb_len = ma->ma_lmm_size;
1529 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LOV);
1531 ma->ma_lmm_size = rc2;
1532 ma->ma_valid |= MA_LOV;
1533 } else if (rc2 == -ENODATA) {
1535 ma->ma_lmm_size = 0;
1536 } else if (rc2 == -ERANGE) {
1537 rc2 = echo_big_lmm_get(env, next, ma);
1539 GOTO(out, rc = rc2);
1541 GOTO(out, rc = rc2);
1545 if ((ma->ma_need & MA_LMV) && S_ISDIR(mode)) {
1546 LASSERT(ma->ma_lmm_size > 0);
1547 buf->lb_buf = ma->ma_lmm;
1548 buf->lb_len = ma->ma_lmm_size;
1549 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
1551 ma->ma_lmm_size = rc2;
1552 ma->ma_valid |= MA_LMV;
1553 } else if (rc2 == -ENODATA) {
1555 ma->ma_lmm_size = 0;
1556 } else if (rc2 == -ERANGE) {
1557 rc2 = echo_big_lmm_get(env, next, ma);
1559 GOTO(out, rc = rc2);
1561 GOTO(out, rc = rc2);
1565 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1566 if ((ma->ma_need & MA_ACL_DEF) && S_ISDIR(mode)) {
1567 buf->lb_buf = ma->ma_acl;
1568 buf->lb_len = ma->ma_acl_size;
1569 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
1571 ma->ma_acl_size = rc2;
1572 ma->ma_valid |= MA_ACL_DEF;
1573 } else if (rc2 == -ENODATA) {
1575 ma->ma_acl_size = 0;
1577 GOTO(out, rc = rc2);
1582 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = %#llx ma_lmm=%p\n",
1583 rc, ma->ma_valid, ma->ma_lmm);
1588 echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
1589 struct md_object *parent, struct lu_fid *fid,
1590 struct lu_name *lname, struct md_op_spec *spec,
1593 struct lu_object *ec_child, *child;
1594 struct lu_device *ld = ed->ed_next;
1595 struct echo_thread_info *info = echo_env_info(env);
1596 struct lu_fid *fid2 = &info->eti_fid2;
1597 struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
1602 rc = mdo_lookup(env, parent, lname, fid2, spec);
1605 else if (rc != -ENOENT)
1608 ec_child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev,
1610 if (IS_ERR(ec_child)) {
1611 CERROR("Can not find the child "DFID": rc = %ld\n", PFID(fid),
1613 RETURN(PTR_ERR(ec_child));
1616 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1618 CERROR("Can not locate the child "DFID"\n", PFID(fid));
1619 GOTO(out_put, rc = -EINVAL);
1622 CDEBUG(D_RPCTRACE, "Start creating object "DFID" %s %p\n",
1623 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
1626 * Do not perform lookup sanity check. We know that name does not exist.
1628 spec->sp_cr_lookup = 0;
1629 rc = mdo_create(env, parent, lname, lu2md(child), spec, ma);
1631 CERROR("Can not create child "DFID": rc = %d\n", PFID(fid), rc);
1634 CDEBUG(D_RPCTRACE, "End creating object "DFID" %s %p rc = %d\n",
1635 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent, rc);
1638 lu_object_put(env, ec_child);
1642 static int echo_set_lmm_size(const struct lu_env *env, struct lu_device *ld,
1645 struct echo_thread_info *info = echo_env_info(env);
1647 if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
1648 ma->ma_lmm = (void *)&info->eti_lmm;
1649 ma->ma_lmm_size = sizeof(info->eti_lmm);
1651 LASSERT(info->eti_big_lmmsize);
1652 ma->ma_lmm = info->eti_big_lmm;
1653 ma->ma_lmm_size = info->eti_big_lmmsize;
1660 echo_md_dir_stripe_choose(const struct lu_env *env, struct echo_device *ed,
1661 struct lu_object *obj, const char *name,
1662 unsigned int namelen, __u64 id,
1663 struct lu_object **new_parent)
1665 struct echo_thread_info *info = echo_env_info(env);
1666 struct md_attr *ma = &info->eti_ma;
1667 struct lmv_mds_md_v1 *lmv;
1668 struct lu_device *ld = ed->ed_next;
1670 struct lu_name tmp_ln_name;
1671 struct lu_fid stripe_fid;
1672 struct lu_object *stripe_obj;
1675 LASSERT(obj != NULL);
1676 LASSERT(S_ISDIR(obj->lo_header->loh_attr));
1678 memset(ma, 0, sizeof(*ma));
1679 echo_set_lmm_size(env, ld, ma);
1680 ma->ma_need = MA_LMV;
1681 rc = echo_attr_get_complex(env, lu2md(obj), ma);
1683 CERROR("Can not getattr child "DFID": rc = %d\n",
1684 PFID(lu_object_fid(obj)), rc);
1688 if (!(ma->ma_valid & MA_LMV)) {
1693 lmv = (struct lmv_mds_md_v1 *)ma->ma_lmm;
1694 if (!lmv_is_sane(lmv)) {
1696 CERROR("Invalid mds md magic %x "DFID": rc = %d\n",
1697 le32_to_cpu(lmv->lmv_magic), PFID(lu_object_fid(obj)),
1703 tmp_ln_name.ln_name = name;
1704 tmp_ln_name.ln_namelen = namelen;
1707 echo_md_build_name(&tmp_ln_name, info->eti_name, id);
1710 idx = lmv_name_to_stripe_index(lmv, tmp_ln_name.ln_name,
1711 tmp_ln_name.ln_namelen);
1713 LASSERT(idx < le32_to_cpu(lmv->lmv_stripe_count));
1714 fid_le_to_cpu(&stripe_fid, &lmv->lmv_stripe_fids[idx]);
1716 stripe_obj = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, &stripe_fid,
1718 if (IS_ERR(stripe_obj)) {
1719 rc = PTR_ERR(stripe_obj);
1720 CERROR("Can not find the parent "DFID": rc = %d\n",
1721 PFID(&stripe_fid), rc);
1725 *new_parent = lu_object_locate(stripe_obj->lo_header, ld->ld_type);
1727 lu_object_put(env, stripe_obj);
1734 static int echo_create_md_object(const struct lu_env *env,
1735 struct echo_device *ed,
1736 struct lu_object *ec_parent,
1738 char *name, int namelen,
1739 __u64 id, __u32 mode, int count,
1740 int stripe_count, int stripe_offset)
1742 struct lu_object *parent;
1743 struct lu_object *new_parent;
1744 struct echo_thread_info *info = echo_env_info(env);
1745 struct lu_name *lname = &info->eti_lname;
1746 struct md_op_spec *spec = &info->eti_spec;
1747 struct md_attr *ma = &info->eti_ma;
1748 struct lu_device *ld = ed->ed_next;
1756 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1760 rc = echo_md_dir_stripe_choose(env, ed, parent, name, namelen,
1765 LASSERT(new_parent != NULL);
1766 memset(ma, 0, sizeof(*ma));
1767 memset(spec, 0, sizeof(*spec));
1768 echo_set_lmm_size(env, ld, ma);
1769 if (stripe_count != 0) {
1770 spec->sp_cr_flags |= MDS_FMODE_WRITE;
1771 if (stripe_count != -1) {
1772 if (S_ISDIR(mode)) {
1773 struct lmv_user_md *lmu;
1775 lmu = (struct lmv_user_md *)&info->eti_lum;
1776 lmu->lum_magic = LMV_USER_MAGIC;
1777 lmu->lum_stripe_offset = stripe_offset;
1778 lmu->lum_stripe_count = stripe_count;
1779 lmu->lum_hash_type = LMV_HASH_TYPE_FNV_1A_64;
1780 spec->u.sp_ea.eadata = lmu;
1781 spec->u.sp_ea.eadatalen = sizeof(*lmu);
1783 struct lov_user_md_v3 *lum = &info->eti_lum;
1785 lum->lmm_magic = LOV_USER_MAGIC_V3;
1786 lum->lmm_stripe_count = stripe_count;
1787 lum->lmm_stripe_offset = stripe_offset;
1788 lum->lmm_pattern = LOV_PATTERN_NONE;
1789 spec->u.sp_ea.eadata = lum;
1790 spec->u.sp_ea.eadatalen = sizeof(*lum);
1792 spec->sp_cr_flags |= MDS_OPEN_HAS_EA;
1796 ma->ma_attr.la_mode = mode;
1797 ma->ma_attr.la_valid = LA_CTIME | LA_MODE;
1798 ma->ma_attr.la_ctime = ktime_get_real_seconds();
1801 lname->ln_name = name;
1802 lname->ln_namelen = namelen;
1803 /* If name is specified, only create one object by name */
1804 rc = echo_md_create_internal(env, ed, lu2md(new_parent), fid,
1809 /* Create multiple object sequenced by id */
1810 for (i = 0; i < count; i++) {
1811 char *tmp_name = info->eti_name;
1813 echo_md_build_name(lname, tmp_name, id);
1815 rc = echo_md_create_internal(env, ed, lu2md(new_parent),
1816 fid, lname, spec, ma);
1818 CERROR("Can not create child %s: rc = %d\n", tmp_name,
1827 if (new_parent != parent)
1828 lu_object_put(env, new_parent);
1833 static struct lu_object *echo_md_lookup(const struct lu_env *env,
1834 struct echo_device *ed,
1835 struct md_object *parent,
1836 struct lu_name *lname)
1838 struct echo_thread_info *info = echo_env_info(env);
1839 struct lu_fid *fid = &info->eti_fid;
1840 struct lu_object *child;
1844 CDEBUG(D_INFO, "lookup %s in parent "DFID" %p\n", lname->ln_name,
1847 rc = mdo_lookup(env, parent, lname, fid, NULL);
1849 CERROR("lookup %s: rc = %d\n", lname->ln_name, rc);
1850 RETURN(ERR_PTR(rc));
1854 * In the function below, .hs_keycmp resolves to
1855 * lu_obj_hop_keycmp()
1857 /* coverity[overrun-buffer-val] */
1858 child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
1863 static int echo_setattr_object(const struct lu_env *env,
1864 struct echo_device *ed,
1865 struct lu_object *ec_parent,
1866 __u64 id, int count)
1868 struct lu_object *parent;
1869 struct lu_object *new_parent;
1870 struct echo_thread_info *info = echo_env_info(env);
1871 struct lu_name *lname = &info->eti_lname;
1872 char *name = info->eti_name;
1873 struct lu_device *ld = ed->ed_next;
1874 struct lu_buf *buf = &info->eti_buf;
1882 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1886 rc = echo_md_dir_stripe_choose(env, ed, parent, NULL, 0, id,
1891 for (i = 0; i < count; i++) {
1892 struct lu_object *ec_child, *child;
1894 echo_md_build_name(lname, name, id);
1896 ec_child = echo_md_lookup(env, ed, lu2md(new_parent), lname);
1897 if (IS_ERR(ec_child)) {
1898 rc = PTR_ERR(ec_child);
1899 CERROR("Can't find child %s: rc = %d\n",
1900 lname->ln_name, rc);
1904 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1906 CERROR("Can not locate the child %s\n", lname->ln_name);
1907 lu_object_put(env, ec_child);
1912 CDEBUG(D_RPCTRACE, "Start setattr object "DFID"\n",
1913 PFID(lu_object_fid(child)));
1915 buf->lb_buf = info->eti_xattr_buf;
1916 buf->lb_len = sizeof(info->eti_xattr_buf);
1918 sprintf(name, "%s.test1", XATTR_USER_PREFIX);
1919 rc = mo_xattr_set(env, lu2md(child), buf, name,
1922 CERROR("Can not setattr child "DFID": rc = %d\n",
1923 PFID(lu_object_fid(child)), rc);
1924 lu_object_put(env, ec_child);
1927 CDEBUG(D_RPCTRACE, "End setattr object "DFID"\n",
1928 PFID(lu_object_fid(child)));
1930 lu_object_put(env, ec_child);
1933 if (new_parent != parent)
1934 lu_object_put(env, new_parent);
1939 static int echo_getattr_object(const struct lu_env *env,
1940 struct echo_device *ed,
1941 struct lu_object *ec_parent,
1942 __u64 id, int count)
1944 struct lu_object *parent;
1945 struct lu_object *new_parent;
1946 struct echo_thread_info *info = echo_env_info(env);
1947 struct lu_name *lname = &info->eti_lname;
1948 char *name = info->eti_name;
1949 struct md_attr *ma = &info->eti_ma;
1950 struct lu_device *ld = ed->ed_next;
1958 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1962 rc = echo_md_dir_stripe_choose(env, ed, parent, NULL, 0, id,
1967 memset(ma, 0, sizeof(*ma));
1968 ma->ma_need |= MA_INODE | MA_LOV | MA_PFID | MA_HSM | MA_ACL_DEF;
1969 ma->ma_acl = info->eti_xattr_buf;
1970 ma->ma_acl_size = sizeof(info->eti_xattr_buf);
1972 for (i = 0; i < count; i++) {
1973 struct lu_object *ec_child, *child;
1976 echo_md_build_name(lname, name, id);
1977 echo_set_lmm_size(env, ld, ma);
1979 ec_child = echo_md_lookup(env, ed, lu2md(new_parent), lname);
1980 if (IS_ERR(ec_child)) {
1981 CERROR("Can't find child %s: rc = %ld\n",
1982 lname->ln_name, PTR_ERR(ec_child));
1983 RETURN(PTR_ERR(ec_child));
1986 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1988 CERROR("Can not locate the child %s\n", lname->ln_name);
1989 lu_object_put(env, ec_child);
1993 CDEBUG(D_RPCTRACE, "Start getattr object "DFID"\n",
1994 PFID(lu_object_fid(child)));
1995 rc = echo_attr_get_complex(env, lu2md(child), ma);
1997 CERROR("Can not getattr child "DFID": rc = %d\n",
1998 PFID(lu_object_fid(child)), rc);
1999 lu_object_put(env, ec_child);
2002 CDEBUG(D_RPCTRACE, "End getattr object "DFID"\n",
2003 PFID(lu_object_fid(child)));
2005 lu_object_put(env, ec_child);
2008 if (new_parent != parent)
2009 lu_object_put(env, new_parent);
2014 static int echo_lookup_object(const struct lu_env *env,
2015 struct echo_device *ed,
2016 struct lu_object *ec_parent,
2017 __u64 id, int count)
2019 struct lu_object *parent;
2020 struct lu_object *new_parent;
2021 struct echo_thread_info *info = echo_env_info(env);
2022 struct lu_name *lname = &info->eti_lname;
2023 char *name = info->eti_name;
2024 struct lu_fid *fid = &info->eti_fid;
2025 struct lu_device *ld = ed->ed_next;
2031 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
2035 rc = echo_md_dir_stripe_choose(env, ed, parent, NULL, 0, id,
2040 /*prepare the requests*/
2041 for (i = 0; i < count; i++) {
2042 echo_md_build_name(lname, name, id);
2044 CDEBUG(D_RPCTRACE, "Start lookup object "DFID" %s %p\n",
2045 PFID(lu_object_fid(new_parent)), lname->ln_name,
2048 rc = mdo_lookup(env, lu2md(new_parent), lname, fid, NULL);
2050 CERROR("Can not lookup child %s: rc = %d\n", name, rc);
2054 CDEBUG(D_RPCTRACE, "End lookup object "DFID" %s %p\n",
2055 PFID(lu_object_fid(new_parent)), lname->ln_name,
2061 if (new_parent != parent)
2062 lu_object_put(env, new_parent);
2067 static int echo_md_destroy_internal(const struct lu_env *env,
2068 struct echo_device *ed,
2069 struct md_object *parent,
2070 struct lu_name *lname,
2073 struct lu_device *ld = ed->ed_next;
2074 struct lu_object *ec_child;
2075 struct lu_object *child;
2080 ec_child = echo_md_lookup(env, ed, parent, lname);
2081 if (IS_ERR(ec_child)) {
2082 CERROR("Can't find child %s: rc = %ld\n", lname->ln_name,
2084 RETURN(PTR_ERR(ec_child));
2087 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
2089 CERROR("Can not locate the child %s\n", lname->ln_name);
2090 GOTO(out_put, rc = -EINVAL);
2093 if (lu_object_remote(child)) {
2094 CERROR("Can not destroy remote object %s: rc = %d\n",
2095 lname->ln_name, -EPERM);
2096 GOTO(out_put, rc = -EPERM);
2098 CDEBUG(D_RPCTRACE, "Start destroy object "DFID" %s %p\n",
2099 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
2101 rc = mdo_unlink(env, parent, lu2md(child), lname, ma, 0);
2103 CERROR("Can not unlink child %s: rc = %d\n",
2104 lname->ln_name, rc);
2107 CDEBUG(D_RPCTRACE, "End destroy object "DFID" %s %p\n",
2108 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
2110 lu_object_put(env, ec_child);
2114 static int echo_destroy_object(const struct lu_env *env,
2115 struct echo_device *ed,
2116 struct lu_object *ec_parent,
2117 char *name, int namelen,
2118 __u64 id, __u32 mode,
2121 struct echo_thread_info *info = echo_env_info(env);
2122 struct lu_name *lname = &info->eti_lname;
2123 struct md_attr *ma = &info->eti_ma;
2124 struct lu_device *ld = ed->ed_next;
2125 struct lu_object *parent;
2126 struct lu_object *new_parent;
2131 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
2135 rc = echo_md_dir_stripe_choose(env, ed, parent, name, namelen,
2140 memset(ma, 0, sizeof(*ma));
2141 ma->ma_attr.la_mode = mode;
2142 ma->ma_attr.la_valid = LA_CTIME;
2143 ma->ma_attr.la_ctime = ktime_get_real_seconds();
2144 ma->ma_need = MA_INODE;
2148 lname->ln_name = name;
2149 lname->ln_namelen = namelen;
2150 rc = echo_md_destroy_internal(env, ed, lu2md(new_parent), lname,
2155 /*prepare the requests*/
2156 for (i = 0; i < count; i++) {
2157 char *tmp_name = info->eti_name;
2160 echo_md_build_name(lname, tmp_name, id);
2162 rc = echo_md_destroy_internal(env, ed, lu2md(new_parent), lname,
2165 CERROR("Can not unlink child %s: rc = %d\n", name, rc);
2172 if (new_parent != parent)
2173 lu_object_put(env, new_parent);
2178 static struct lu_object *echo_resolve_path(const struct lu_env *env,
2179 struct echo_device *ed, char *path,
2182 struct lu_device *ld = ed->ed_next;
2183 struct echo_thread_info *info = echo_env_info(env);
2184 struct lu_fid *fid = &info->eti_fid;
2185 struct lu_name *lname = &info->eti_lname;
2186 struct lu_object *parent = NULL;
2187 struct lu_object *child = NULL;
2191 *fid = ed->ed_root_fid;
2194 * In the function below, .hs_keycmp resolves to
2195 * lu_obj_hop_keycmp()
2197 /* coverity[overrun-buffer-val] */
2198 parent = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
2199 if (IS_ERR(parent)) {
2200 CERROR("Can not find the parent "DFID": rc = %ld\n",
2201 PFID(fid), PTR_ERR(parent));
2206 struct lu_object *ld_parent;
2209 e = strsep(&path, "/");
2214 if (!path || path[0] == '\0')
2220 lname->ln_namelen = strlen(e);
2222 ld_parent = lu_object_locate(parent->lo_header, ld->ld_type);
2224 lu_object_put(env, parent);
2229 child = echo_md_lookup(env, ed, lu2md(ld_parent), lname);
2230 lu_object_put(env, parent);
2231 if (IS_ERR(child)) {
2232 rc = (int)PTR_ERR(child);
2233 CERROR("lookup %s under parent "DFID": rc = %d\n",
2234 lname->ln_name, PFID(lu_object_fid(ld_parent)),
2241 RETURN(ERR_PTR(rc));
2246 static void echo_ucred_init(struct lu_env *env)
2248 struct lu_ucred *ucred = lu_ucred(env);
2250 ucred->uc_valid = UCRED_INVALID;
2252 ucred->uc_suppgids[0] = -1;
2253 ucred->uc_suppgids[1] = -1;
2255 ucred->uc_uid = ucred->uc_o_uid =
2256 from_kuid(&init_user_ns, current_uid());
2257 ucred->uc_gid = ucred->uc_o_gid =
2258 from_kgid(&init_user_ns, current_gid());
2259 ucred->uc_fsuid = ucred->uc_o_fsuid =
2260 from_kuid(&init_user_ns, current_fsuid());
2261 ucred->uc_fsgid = ucred->uc_o_fsgid =
2262 from_kgid(&init_user_ns, current_fsgid());
2263 ucred->uc_cap = cfs_curproc_cap_pack();
2265 /* remove fs privilege for non-root user. */
2266 if (ucred->uc_fsuid)
2267 ucred->uc_cap &= ~CFS_CAP_FS_MASK;
2268 ucred->uc_valid = UCRED_NEW;
2271 static void echo_ucred_fini(struct lu_env *env)
2273 struct lu_ucred *ucred = lu_ucred(env);
2275 ucred->uc_valid = UCRED_INIT;
2278 static int echo_md_handler(struct echo_device *ed, int command,
2279 char *path, int path_len, __u64 id, int count,
2280 struct obd_ioctl_data *data)
2282 struct echo_thread_info *info;
2283 struct lu_device *ld = ed->ed_next;
2286 struct lu_object *parent;
2288 int namelen = data->ioc_plen2;
2293 CERROR("MD echo client is not being initialized properly\n");
2297 if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
2298 CERROR("Only support MDD layer right now!\n");
2302 env = cl_env_get(&refcheck);
2304 RETURN(PTR_ERR(env));
2306 rc = lu_env_refill_by_tags(env, ECHO_MD_CTX_TAG, ECHO_SES_TAG);
2310 /* init big_lmm buffer */
2311 info = echo_env_info(env);
2312 LASSERT(info->eti_big_lmm == NULL);
2313 OBD_ALLOC_LARGE(info->eti_big_lmm, MIN_MD_SIZE);
2314 if (!info->eti_big_lmm)
2315 GOTO(out_env, rc = -ENOMEM);
2316 info->eti_big_lmmsize = MIN_MD_SIZE;
2318 parent = echo_resolve_path(env, ed, path, path_len);
2319 if (IS_ERR(parent)) {
2320 CERROR("Can not resolve the path %s: rc = %ld\n", path,
2322 GOTO(out_free, rc = PTR_ERR(parent));
2326 OBD_ALLOC(name, namelen + 1);
2328 GOTO(out_put, rc = -ENOMEM);
2329 if (copy_from_user(name, data->ioc_pbuf2, namelen))
2330 GOTO(out_name, rc = -EFAULT);
2333 echo_ucred_init(env);
2336 case ECHO_MD_CREATE:
2337 case ECHO_MD_MKDIR: {
2338 struct echo_thread_info *info = echo_env_info(env);
2339 __u32 mode = data->ioc_obdo2.o_mode;
2340 struct lu_fid *fid = &info->eti_fid;
2341 int stripe_count = (int)data->ioc_obdo2.o_misc;
2342 int stripe_index = (int)data->ioc_obdo2.o_stripe_idx;
2344 rc = ostid_to_fid(fid, &data->ioc_obdo1.o_oi, 0);
2349 * In the function below, .hs_keycmp resolves to
2350 * lu_obj_hop_keycmp()
2352 /* coverity[overrun-buffer-val] */
2353 rc = echo_create_md_object(env, ed, parent, fid, name, namelen,
2354 id, mode, count, stripe_count,
2358 case ECHO_MD_DESTROY:
2359 case ECHO_MD_RMDIR: {
2360 __u32 mode = data->ioc_obdo2.o_mode;
2362 rc = echo_destroy_object(env, ed, parent, name, namelen,
2366 case ECHO_MD_LOOKUP:
2367 rc = echo_lookup_object(env, ed, parent, id, count);
2369 case ECHO_MD_GETATTR:
2370 rc = echo_getattr_object(env, ed, parent, id, count);
2372 case ECHO_MD_SETATTR:
2373 rc = echo_setattr_object(env, ed, parent, id, count);
2376 CERROR("unknown command %d\n", command);
2380 echo_ucred_fini(env);
2384 OBD_FREE(name, namelen + 1);
2386 lu_object_put(env, parent);
2388 LASSERT(info->eti_big_lmm);
2389 OBD_FREE_LARGE(info->eti_big_lmm, info->eti_big_lmmsize);
2390 info->eti_big_lmm = NULL;
2391 info->eti_big_lmmsize = 0;
2393 cl_env_put(env, &refcheck);
2396 #endif /* HAVE_SERVER_SUPPORT */
2398 static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
2401 struct echo_object *eco;
2402 struct echo_client_obd *ec = ed->ed_ec;
2407 if (!(oa->o_valid & OBD_MD_FLID) ||
2408 !(oa->o_valid & OBD_MD_FLGROUP) ||
2409 !fid_seq_is_echo(ostid_seq(&oa->o_oi))) {
2410 CERROR("invalid oid "DOSTID"\n", POSTID(&oa->o_oi));
2414 if (ostid_id(&oa->o_oi) == 0) {
2415 rc = ostid_set_id(&oa->o_oi, ++last_object_id);
2420 rc = obd_create(env, ec->ec_exp, oa);
2422 CERROR("Cannot create objects: rc = %d\n", rc);
2428 oa->o_valid |= OBD_MD_FLID;
2430 eco = cl_echo_object_find(ed, &oa->o_oi);
2432 GOTO(failed, rc = PTR_ERR(eco));
2433 cl_echo_object_put(eco);
2435 CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
2439 if (created && rc != 0)
2440 obd_destroy(env, ec->ec_exp, oa);
2443 CERROR("create object failed with: rc = %d\n", rc);
2448 static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
2451 struct echo_object *eco;
2455 if (!(oa->o_valid & OBD_MD_FLID) ||
2456 !(oa->o_valid & OBD_MD_FLGROUP) ||
2457 ostid_id(&oa->o_oi) == 0) {
2458 CERROR("invalid oid "DOSTID"\n", POSTID(&oa->o_oi));
2463 eco = cl_echo_object_find(ed, &oa->o_oi);
2472 static void echo_put_object(struct echo_object *eco)
2476 rc = cl_echo_object_put(eco);
2478 CERROR("%s: echo client drop an object failed: rc = %d\n",
2479 eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc);
2482 static void echo_client_page_debug_setup(struct page *page, int rw, u64 id,
2483 u64 offset, u64 count)
2490 /* no partial pages on the client */
2491 LASSERT(count == PAGE_SIZE);
2495 for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
2496 if (rw == OBD_BRW_WRITE) {
2497 stripe_off = offset + delta;
2500 stripe_off = 0xdeadbeef00c0ffeeULL;
2501 stripe_id = 0xdeadbeef00c0ffeeULL;
2503 block_debug_setup(addr + delta, OBD_ECHO_BLOCK_SIZE,
2504 stripe_off, stripe_id);
2511 echo_client_page_debug_check(struct page *page, u64 id, u64 offset, u64 count)
2520 /* no partial pages on the client */
2521 LASSERT(count == PAGE_SIZE);
2525 for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
2526 stripe_off = offset + delta;
2529 rc2 = block_debug_check("test_brw",
2530 addr + delta, OBD_ECHO_BLOCK_SIZE,
2531 stripe_off, stripe_id);
2533 CERROR("Error in echo object %#llx\n", id);
2542 static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
2543 struct echo_object *eco, u64 offset,
2544 u64 count, int async)
2547 struct brw_page *pga;
2548 struct brw_page *pgp;
2549 struct page **pages;
2558 verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
2559 (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
2560 (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
2562 gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER;
2564 LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
2566 if ((count & (~PAGE_MASK)) != 0)
2569 /* XXX think again with misaligned I/O */
2570 npages = count >> PAGE_SHIFT;
2572 if (rw == OBD_BRW_WRITE)
2573 brw_flags = OBD_BRW_ASYNC;
2575 OBD_ALLOC_PTR_ARRAY(pga, npages);
2579 OBD_ALLOC_PTR_ARRAY(pages, npages);
2581 OBD_FREE_PTR_ARRAY(pga, npages);
2585 for (i = 0, pgp = pga, off = offset;
2587 i++, pgp++, off += PAGE_SIZE) {
2589 LASSERT(pgp->pg == NULL); /* for cleanup */
2592 pgp->pg = alloc_page(gfp_mask);
2596 /* set mapping so page is not considered encrypted */
2597 pgp->pg->mapping = ECHO_MAPPING_UNENCRYPTED;
2599 pgp->count = PAGE_SIZE;
2601 pgp->flag = brw_flags;
2604 echo_client_page_debug_setup(pgp->pg, rw,
2605 ostid_id(&oa->o_oi), off,
2609 /* brw mode can only be used at client */
2610 LASSERT(ed->ed_next != NULL);
2611 rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
2614 if (rc != 0 || rw != OBD_BRW_READ)
2617 for (i = 0, pgp = pga; i < npages; i++, pgp++) {
2624 vrc = echo_client_page_debug_check(pgp->pg,
2625 ostid_id(&oa->o_oi),
2628 if (vrc != 0 && rc == 0)
2631 __free_page(pgp->pg);
2633 OBD_FREE_PTR_ARRAY(pga, npages);
2634 OBD_FREE_PTR_ARRAY(pages, npages);
2638 static int echo_client_prep_commit(const struct lu_env *env,
2639 struct obd_export *exp, int rw,
2640 struct obdo *oa, struct echo_object *eco,
2641 u64 offset, u64 count,
2642 u64 batch, int async)
2644 struct obd_ioobj ioo;
2645 struct niobuf_local *lnb;
2646 struct niobuf_remote rnb;
2648 u64 npages, tot_pages, apc;
2649 int i, ret = 0, brw_flags = 0;
2652 if (count <= 0 || (count & ~PAGE_MASK) != 0)
2655 apc = npages = batch >> PAGE_SHIFT;
2656 tot_pages = count >> PAGE_SHIFT;
2658 OBD_ALLOC_PTR_ARRAY_LARGE(lnb, apc);
2662 if (rw == OBD_BRW_WRITE && async)
2663 brw_flags |= OBD_BRW_ASYNC;
2665 obdo_to_ioobj(oa, &ioo);
2669 for (; tot_pages > 0; tot_pages -= npages) {
2672 if (tot_pages < npages)
2675 rnb.rnb_offset = off;
2676 rnb.rnb_len = npages * PAGE_SIZE;
2677 rnb.rnb_flags = brw_flags;
2679 off += npages * PAGE_SIZE;
2682 ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
2686 for (i = 0; i < lpages; i++) {
2687 struct page *page = lnb[i].lnb_page;
2689 /* read past eof? */
2690 if (!page && lnb[i].lnb_rc == 0)
2694 lnb[i].lnb_flags |= OBD_BRW_ASYNC;
2696 if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
2697 (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
2698 (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
2701 if (rw == OBD_BRW_WRITE)
2702 echo_client_page_debug_setup(page, rw,
2703 ostid_id(&oa->o_oi),
2704 lnb[i].lnb_file_offset,
2707 echo_client_page_debug_check(page,
2708 ostid_id(&oa->o_oi),
2709 lnb[i].lnb_file_offset,
2713 ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb,
2718 /* Reuse env context. */
2719 lu_context_exit((struct lu_context *)&env->le_ctx);
2720 lu_context_enter((struct lu_context *)&env->le_ctx);
2724 OBD_FREE_PTR_ARRAY_LARGE(lnb, apc);
2729 static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
2730 struct obd_export *exp,
2731 struct obd_ioctl_data *data)
2733 struct obd_device *obd = class_exp2obd(exp);
2734 struct echo_device *ed = obd2echo_dev(obd);
2735 struct echo_client_obd *ec = ed->ed_ec;
2736 struct obdo *oa = &data->ioc_obdo1;
2737 struct echo_object *eco;
2743 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
2745 rc = echo_get_object(&eco, ed, oa);
2749 oa->o_valid &= ~OBD_MD_FLHANDLE;
2751 /* OFD/obdfilter works only via prep/commit */
2752 test_mode = (long)data->ioc_pbuf1;
2753 if (!ed->ed_next && test_mode != 3) {
2755 data->ioc_plen1 = data->ioc_count;
2761 /* Truncate batch size to maximum */
2762 if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE)
2763 data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
2765 switch (test_mode) {
2769 rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
2770 data->ioc_count, async);
2773 rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
2774 data->ioc_offset, data->ioc_count,
2775 data->ioc_plen1, async);
2781 echo_put_object(eco);
2787 echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2788 void *karg, void __user *uarg)
2790 #ifdef HAVE_SERVER_SUPPORT
2791 struct tgt_session_info *tsi;
2793 struct obd_device *obd = exp->exp_obd;
2794 struct echo_device *ed = obd2echo_dev(obd);
2795 struct echo_client_obd *ec = ed->ed_ec;
2796 struct echo_object *eco;
2797 struct obd_ioctl_data *data = karg;
2799 unsigned long env_tags = 0;
2803 int rw = OBD_BRW_READ;
2807 oa = &data->ioc_obdo1;
2808 if (!(oa->o_valid & OBD_MD_FLGROUP)) {
2809 oa->o_valid |= OBD_MD_FLGROUP;
2810 ostid_set_seq_echo(&oa->o_oi);
2813 /* This FID is unpacked just for validation at this point */
2814 rc = ostid_to_fid(&fid, &oa->o_oi, 0);
2818 env = cl_env_get(&refcheck);
2820 RETURN(PTR_ERR(env));
2824 #ifdef HAVE_SERVER_SUPPORT
2825 if (cmd == OBD_IOC_ECHO_MD || cmd == OBD_IOC_ECHO_ALLOC_SEQ)
2826 env_tags = ECHO_MD_CTX_TAG;
2829 env_tags = ECHO_DT_CTX_TAG;
2831 rc = lu_env_refill_by_tags(env, env_tags, ECHO_SES_TAG);
2835 #ifdef HAVE_SERVER_SUPPORT
2836 tsi = tgt_ses_info(env);
2837 /* treat as local operation */
2838 tsi->tsi_exp = NULL;
2839 tsi->tsi_jobid = NULL;
2843 case OBD_IOC_CREATE: /* may create echo object */
2844 if (!cfs_capable(CAP_SYS_ADMIN))
2845 GOTO(out, rc = -EPERM);
2847 rc = echo_create_object(env, ed, oa);
2850 #ifdef HAVE_SERVER_SUPPORT
2851 case OBD_IOC_ECHO_MD: {
2858 if (!cfs_capable(CAP_SYS_ADMIN))
2859 GOTO(out, rc = -EPERM);
2861 count = data->ioc_count;
2862 cmd = data->ioc_command;
2864 id = data->ioc_obdo2.o_oi.oi.oi_id;
2865 dirlen = data->ioc_plen1;
2866 OBD_ALLOC(dir, dirlen + 1);
2868 GOTO(out, rc = -ENOMEM);
2870 if (copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
2871 OBD_FREE(dir, data->ioc_plen1 + 1);
2872 GOTO(out, rc = -EFAULT);
2875 rc = echo_md_handler(ed, cmd, dir, dirlen, id, count, data);
2876 OBD_FREE(dir, dirlen + 1);
2879 case OBD_IOC_ECHO_ALLOC_SEQ: {
2883 if (!cfs_capable(CAP_SYS_ADMIN))
2884 GOTO(out, rc = -EPERM);
2886 rc = seq_client_get_seq(env, ed->ed_cl_seq, &seq);
2888 CERROR("%s: Can not alloc seq: rc = %d\n",
2893 if (copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
2896 max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
2897 if (copy_to_user(data->ioc_pbuf2, &max_count,
2902 #endif /* HAVE_SERVER_SUPPORT */
2903 case OBD_IOC_DESTROY:
2904 if (!cfs_capable(CAP_SYS_ADMIN))
2905 GOTO(out, rc = -EPERM);
2907 rc = echo_get_object(&eco, ed, oa);
2909 rc = obd_destroy(env, ec->ec_exp, oa);
2911 eco->eo_deleted = 1;
2912 echo_put_object(eco);
2916 case OBD_IOC_GETATTR:
2917 rc = echo_get_object(&eco, ed, oa);
2919 rc = obd_getattr(env, ec->ec_exp, oa);
2920 echo_put_object(eco);
2924 case OBD_IOC_SETATTR:
2925 if (!cfs_capable(CAP_SYS_ADMIN))
2926 GOTO(out, rc = -EPERM);
2928 rc = echo_get_object(&eco, ed, oa);
2930 rc = obd_setattr(env, ec->ec_exp, oa);
2931 echo_put_object(eco);
2935 case OBD_IOC_BRW_WRITE:
2936 if (!cfs_capable(CAP_SYS_ADMIN))
2937 GOTO(out, rc = -EPERM);
2941 case OBD_IOC_BRW_READ:
2942 rc = echo_client_brw_ioctl(env, rw, exp, data);
2946 CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
2947 GOTO(out, rc = -ENOTTY);
2953 cl_env_put(env, &refcheck);
2958 static int echo_client_setup(const struct lu_env *env,
2959 struct obd_device *obd, struct lustre_cfg *lcfg)
2961 struct echo_client_obd *ec = &obd->u.echo_client;
2962 struct obd_device *tgt;
2963 struct obd_uuid echo_uuid = { "ECHO_UUID" };
2964 struct obd_connect_data *ocd = NULL;
2968 if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
2969 CERROR("requires a TARGET OBD name\n");
2973 tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
2974 if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
2975 CERROR("device not attached or not set up (%s)\n",
2976 lustre_cfg_string(lcfg, 1));
2980 spin_lock_init(&ec->ec_lock);
2981 INIT_LIST_HEAD(&ec->ec_objects);
2982 INIT_LIST_HEAD(&ec->ec_locks);
2985 lu_context_tags_update(ECHO_DT_CTX_TAG);
2986 lu_session_tags_update(ECHO_SES_TAG);
2988 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
2989 #ifdef HAVE_SERVER_SUPPORT
2990 lu_context_tags_update(ECHO_MD_CTX_TAG);
2993 "Local operations are NOT supported on client side. Only remote operations are supported. Metadata client must be run on server side.\n");
2998 OBD_ALLOC(ocd, sizeof(*ocd));
3000 CERROR("Can't alloc ocd connecting to %s\n",
3001 lustre_cfg_string(lcfg, 1));
3005 ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
3006 OBD_CONNECT_BRW_SIZE |
3007 OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
3008 OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE |
3010 ocd->ocd_brw_size = DT_MAX_BRW_SIZE;
3011 ocd->ocd_version = LUSTRE_VERSION_CODE;
3012 ocd->ocd_group = FID_SEQ_ECHO;
3014 rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
3016 /* Turn off pinger because it connects to tgt obd directly. */
3017 spin_lock(&tgt->obd_dev_lock);
3018 list_del_init(&ec->ec_exp->exp_obd_chain_timed);
3019 spin_unlock(&tgt->obd_dev_lock);
3022 OBD_FREE(ocd, sizeof(*ocd));
3025 CERROR("fail to connect to device %s\n",
3026 lustre_cfg_string(lcfg, 1));
3033 static int echo_client_cleanup(struct obd_device *obd)
3035 struct echo_device *ed = obd2echo_dev(obd);
3036 struct echo_client_obd *ec = &obd->u.echo_client;
3040 /*Do nothing for Metadata echo client*/
3044 lu_session_tags_clear(ECHO_SES_TAG & ~LCT_SESSION);
3045 lu_context_tags_clear(ECHO_DT_CTX_TAG);
3046 if (ed->ed_next_ismd) {
3047 #ifdef HAVE_SERVER_SUPPORT
3048 lu_context_tags_clear(ECHO_MD_CTX_TAG);
3051 "This is client-side only module, does not support metadata echo client.\n");
3056 if (!list_empty(&obd->obd_exports)) {
3057 CERROR("still has clients!\n");
3061 LASSERT(refcount_read(&ec->ec_exp->exp_handle.h_ref) > 0);
3062 rc = obd_disconnect(ec->ec_exp);
3064 CERROR("fail to disconnect device: %d\n", rc);
3069 static int echo_client_connect(const struct lu_env *env,
3070 struct obd_export **exp,
3071 struct obd_device *src, struct obd_uuid *cluuid,
3072 struct obd_connect_data *data, void *localdata)
3075 struct lustre_handle conn = { 0 };
3078 rc = class_connect(&conn, src, cluuid);
3080 *exp = class_conn2export(&conn);
3085 static int echo_client_disconnect(struct obd_export *exp)
3091 GOTO(out, rc = -EINVAL);
3093 rc = class_disconnect(exp);
3099 static const struct obd_ops echo_client_obd_ops = {
3100 .o_owner = THIS_MODULE,
3101 .o_iocontrol = echo_client_iocontrol,
3102 .o_connect = echo_client_connect,
3103 .o_disconnect = echo_client_disconnect
3106 static int __init obdecho_init(void)
3111 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
3113 LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
3115 # ifdef HAVE_SERVER_SUPPORT
3116 rc = echo_persistent_pages_init();
3120 rc = class_register_type(&echo_obd_ops, NULL, true,
3121 LUSTRE_ECHO_NAME, &echo_srv_type);
3126 rc = lu_kmem_init(echo_caches);
3128 rc = class_register_type(&echo_client_obd_ops, NULL, false,
3129 LUSTRE_ECHO_CLIENT_NAME,
3132 lu_kmem_fini(echo_caches);
3135 # ifdef HAVE_SERVER_SUPPORT
3139 class_unregister_type(LUSTRE_ECHO_NAME);
3141 echo_persistent_pages_fini();
3147 static void __exit obdecho_exit(void)
3149 class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
3150 lu_kmem_fini(echo_caches);
3152 #ifdef HAVE_SERVER_SUPPORT
3153 class_unregister_type(LUSTRE_ECHO_NAME);
3154 echo_persistent_pages_fini();
3158 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3159 MODULE_DESCRIPTION("Lustre Echo Client test driver");
3160 MODULE_VERSION(LUSTRE_VERSION_STRING);
3161 MODULE_LICENSE("GPL");
3163 module_init(obdecho_init);
3164 module_exit(obdecho_exit);
3166 /** @} echo_client */