4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_ECHO
35 #include <linux/user_namespace.h>
36 #include <linux/uidgid.h>
38 #include <libcfs/libcfs.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_debug.h>
43 #include <lprocfs_status.h>
44 #include <cl_object.h>
45 #include <lustre_fid.h>
46 #include <lustre_lmv.h>
47 #include <lustre_acl.h>
48 #include <uapi/linux/lustre/lustre_ioctl.h>
49 #include <lustre_net.h>
50 #ifdef HAVE_SERVER_SUPPORT
51 # include <md_object.h>
53 #define ETI_NAME_LEN 20
55 #endif /* HAVE_SERVER_SUPPORT */
57 #include "echo_internal.h"
59 /** \defgroup echo_client Echo Client
63 /* echo thread key have a CL_THREAD flag, which set cl_env function directly */
64 #define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
65 #define ECHO_DT_CTX_TAG (LCT_REMEMBER | LCT_DT_THREAD)
66 #define ECHO_SES_TAG (LCT_REMEMBER | LCT_SESSION | LCT_SERVER_SESSION)
69 struct cl_device ed_cl;
70 struct echo_client_obd *ed_ec;
72 struct cl_site ed_site_myself;
73 struct lu_site *ed_site;
74 struct lu_device *ed_next;
76 struct lu_client_seq *ed_cl_seq;
77 #ifdef HAVE_SERVER_SUPPORT
78 struct local_oid_storage *ed_los;
79 struct lu_fid ed_root_fid;
80 #endif /* HAVE_SERVER_SUPPORT */
84 struct cl_object eo_cl;
85 struct cl_object_header eo_hdr;
86 struct echo_device *eo_dev;
87 struct list_head eo_obj_chain;
88 struct lov_oinfo *eo_oinfo;
93 struct echo_object_conf {
94 struct cl_object_conf eoc_cl;
95 struct lov_oinfo **eoc_oinfo;
99 struct cl_page_slice ep_cl;
100 unsigned long ep_lock;
104 struct cl_lock_slice el_cl;
105 struct list_head el_chain;
106 struct echo_object *el_object;
108 atomic_t el_refcount;
111 #ifdef HAVE_SERVER_SUPPORT
112 static const char echo_md_root_dir_name[] = "ROOT_ECHO";
115 * In order to use the values of members in struct mdd_device,
116 * we define an alias structure here.
118 struct echo_md_device {
119 struct md_device emd_md_dev;
120 struct obd_export *emd_child_exp;
121 struct dt_device *emd_child;
122 struct dt_device *emd_bottom;
123 struct lu_fid emd_root_fid;
124 struct lu_fid emd_local_root_fid;
126 #endif /* HAVE_SERVER_SUPPORT */
128 static int echo_client_setup(const struct lu_env *env,
129 struct obd_device *obddev,
130 struct lustre_cfg *lcfg);
131 static int echo_client_cleanup(struct obd_device *obddev);
133 /** \defgroup echo_helpers Helper functions
136 static inline struct echo_device *cl2echo_dev(const struct cl_device *dev)
138 return container_of0(dev, struct echo_device, ed_cl);
141 static inline struct cl_device *echo_dev2cl(struct echo_device *d)
146 static inline struct echo_device *obd2echo_dev(const struct obd_device *obd)
148 return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev));
151 static inline struct cl_object *echo_obj2cl(struct echo_object *eco)
156 static inline struct echo_object *cl2echo_obj(const struct cl_object *o)
158 return container_of(o, struct echo_object, eo_cl);
161 static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s)
163 return container_of(s, struct echo_page, ep_cl);
166 static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s)
168 return container_of(s, struct echo_lock, el_cl);
171 static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl)
173 return ecl->el_cl.cls_lock;
176 static struct lu_context_key echo_thread_key;
178 static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
180 struct echo_thread_info *info;
182 info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
183 LASSERT(info != NULL);
188 struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
190 return container_of(c, struct echo_object_conf, eoc_cl);
193 #ifdef HAVE_SERVER_SUPPORT
194 static inline struct echo_md_device *lu2emd_dev(struct lu_device *d)
196 return container_of0(d, struct echo_md_device, emd_md_dev.md_lu_dev);
199 static inline struct lu_device *emd2lu_dev(struct echo_md_device *d)
201 return &d->emd_md_dev.md_lu_dev;
204 static inline struct seq_server_site *echo_md_seq_site(struct echo_md_device *d)
206 return emd2lu_dev(d)->ld_site->ld_seq_site;
209 static inline struct obd_device *emd2obd_dev(struct echo_md_device *d)
211 return d->emd_md_dev.md_lu_dev.ld_obd;
213 #endif /* HAVE_SERVER_SUPPORT */
215 /** @} echo_helpers */
217 static int cl_echo_object_put(struct echo_object *eco);
218 static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
219 struct page **pages, int npages, int async);
221 struct echo_thread_info {
222 struct echo_object_conf eti_conf;
223 struct lustre_md eti_md;
224 struct cl_2queue eti_queue;
226 struct cl_lock eti_lock;
227 struct lu_fid eti_fid;
228 struct lu_fid eti_fid2;
229 #ifdef HAVE_SERVER_SUPPORT
230 struct md_op_spec eti_spec;
231 struct lov_mds_md_v3 eti_lmm;
232 struct lov_user_md_v3 eti_lum;
233 struct md_attr eti_ma;
234 struct lu_name eti_lname;
235 /* per-thread values, can be re-used */
236 void *eti_big_lmm; /* may be vmalloc'd */
238 char eti_name[ETI_NAME_LEN];
239 struct lu_buf eti_buf;
240 /* If we want to test large ACL, then need to enlarge the buffer. */
241 char eti_xattr_buf[LUSTRE_POSIX_ACL_MAX_SIZE_OLD];
245 /* No session used right now */
246 struct echo_session_info {
250 static struct kmem_cache *echo_lock_kmem;
251 static struct kmem_cache *echo_object_kmem;
252 static struct kmem_cache *echo_thread_kmem;
253 static struct kmem_cache *echo_session_kmem;
254 /* static struct kmem_cache *echo_req_kmem; */
256 static struct lu_kmem_descr echo_caches[] = {
258 .ckd_cache = &echo_lock_kmem,
259 .ckd_name = "echo_lock_kmem",
260 .ckd_size = sizeof(struct echo_lock)
263 .ckd_cache = &echo_object_kmem,
264 .ckd_name = "echo_object_kmem",
265 .ckd_size = sizeof(struct echo_object)
268 .ckd_cache = &echo_thread_kmem,
269 .ckd_name = "echo_thread_kmem",
270 .ckd_size = sizeof(struct echo_thread_info)
273 .ckd_cache = &echo_session_kmem,
274 .ckd_name = "echo_session_kmem",
275 .ckd_size = sizeof(struct echo_session_info)
282 /** \defgroup echo_page Page operations
284 * Echo page operations.
288 static int echo_page_own(const struct lu_env *env,
289 const struct cl_page_slice *slice,
290 struct cl_io *io, int nonblock)
292 struct echo_page *ep = cl2echo_page(slice);
295 if (test_and_set_bit(0, &ep->ep_lock))
298 while (test_and_set_bit(0, &ep->ep_lock))
299 wait_on_bit(&ep->ep_lock, 0, TASK_UNINTERRUPTIBLE);
304 static void echo_page_disown(const struct lu_env *env,
305 const struct cl_page_slice *slice,
308 struct echo_page *ep = cl2echo_page(slice);
310 LASSERT(test_bit(0, &ep->ep_lock));
311 clear_and_wake_up_bit(0, &ep->ep_lock);
314 static void echo_page_discard(const struct lu_env *env,
315 const struct cl_page_slice *slice,
316 struct cl_io *unused)
318 cl_page_delete(env, slice->cpl_page);
321 static int echo_page_is_vmlocked(const struct lu_env *env,
322 const struct cl_page_slice *slice)
324 if (test_bit(0, &cl2echo_page(slice)->ep_lock))
329 static void echo_page_completion(const struct lu_env *env,
330 const struct cl_page_slice *slice,
333 LASSERT(slice->cpl_page->cp_sync_io != NULL);
336 static void echo_page_fini(const struct lu_env *env,
337 struct cl_page_slice *slice,
338 struct pagevec *pvec)
340 struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
343 atomic_dec(&eco->eo_npages);
344 put_page(slice->cpl_page->cp_vmpage);
348 static int echo_page_prep(const struct lu_env *env,
349 const struct cl_page_slice *slice,
350 struct cl_io *unused)
355 static int echo_page_print(const struct lu_env *env,
356 const struct cl_page_slice *slice,
357 void *cookie, lu_printer_t printer)
359 struct echo_page *ep = cl2echo_page(slice);
361 (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
362 ep, test_bit(0, &ep->ep_lock),
363 slice->cpl_page->cp_vmpage);
367 static const struct cl_page_operations echo_page_ops = {
368 .cpo_own = echo_page_own,
369 .cpo_disown = echo_page_disown,
370 .cpo_discard = echo_page_discard,
371 .cpo_fini = echo_page_fini,
372 .cpo_print = echo_page_print,
373 .cpo_is_vmlocked = echo_page_is_vmlocked,
376 .cpo_prep = echo_page_prep,
377 .cpo_completion = echo_page_completion,
380 .cpo_prep = echo_page_prep,
381 .cpo_completion = echo_page_completion,
388 /** \defgroup echo_lock Locking
390 * echo lock operations
394 static void echo_lock_fini(const struct lu_env *env,
395 struct cl_lock_slice *slice)
397 struct echo_lock *ecl = cl2echo_lock(slice);
399 LASSERT(list_empty(&ecl->el_chain));
400 OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
403 static struct cl_lock_operations echo_lock_ops = {
404 .clo_fini = echo_lock_fini,
409 /** \defgroup echo_cl_ops cl_object operations
411 * operations for cl_object
415 static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
416 struct cl_page *page, pgoff_t index)
418 struct echo_page *ep = cl_object_page_slice(obj, page);
419 struct echo_object *eco = cl2echo_obj(obj);
422 get_page(page->cp_vmpage);
424 * ep_lock is similar to the lock_page() lock, and
425 * cannot usefully be monitored by lockdep.
426 * So just use a bit in an "unsigned long" and use the
427 * wait_on_bit() interface to wait for the bit to be clear.
430 cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
431 atomic_inc(&eco->eo_npages);
435 static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
441 static int echo_lock_init(const struct lu_env *env,
442 struct cl_object *obj, struct cl_lock *lock,
443 const struct cl_io *unused)
445 struct echo_lock *el;
448 OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, GFP_NOFS);
450 cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
451 el->el_object = cl2echo_obj(obj);
452 INIT_LIST_HEAD(&el->el_chain);
453 atomic_set(&el->el_refcount, 0);
455 RETURN(el ? 0 : -ENOMEM);
458 static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
459 const struct cl_object_conf *conf)
464 static const struct cl_object_operations echo_cl_obj_ops = {
465 .coo_page_init = echo_page_init,
466 .coo_lock_init = echo_lock_init,
467 .coo_io_init = echo_io_init,
468 .coo_conf_set = echo_conf_set
470 /** @} echo_cl_ops */
472 /** \defgroup echo_lu_ops lu_object operations
474 * operations for echo lu object.
478 static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
479 const struct lu_object_conf *conf)
481 struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
482 struct echo_client_obd *ec = ed->ed_ec;
483 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
487 struct lu_object *below;
488 struct lu_device *under;
491 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
495 lu_object_add(obj, below);
498 if (!ed->ed_next_ismd) {
499 const struct cl_object_conf *cconf = lu2cl_conf(conf);
500 struct echo_object_conf *econf = cl2echo_conf(cconf);
502 LASSERT(econf->eoc_oinfo != NULL);
505 * Transfer the oinfo pointer to eco that it won't be
508 eco->eo_oinfo = *econf->eoc_oinfo;
509 *econf->eoc_oinfo = NULL;
511 eco->eo_oinfo = NULL;
515 atomic_set(&eco->eo_npages, 0);
516 cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
518 spin_lock(&ec->ec_lock);
519 list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
520 spin_unlock(&ec->ec_lock);
525 static void echo_object_delete(const struct lu_env *env, struct lu_object *obj)
527 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
528 struct echo_client_obd *ec;
532 /* object delete called unconditolally - layer init or not */
533 if (eco->eo_dev == NULL)
536 ec = eco->eo_dev->ed_ec;
538 LASSERT(atomic_read(&eco->eo_npages) == 0);
540 spin_lock(&ec->ec_lock);
541 list_del_init(&eco->eo_obj_chain);
542 spin_unlock(&ec->ec_lock);
545 OBD_FREE_PTR(eco->eo_oinfo);
548 static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
550 struct echo_object *eco = cl2echo_obj(lu2cl(obj));
555 lu_object_header_fini(obj->lo_header);
557 OBD_SLAB_FREE_PTR(eco, echo_object_kmem);
561 static int echo_object_print(const struct lu_env *env, void *cookie,
562 lu_printer_t p, const struct lu_object *o)
564 struct echo_object *obj = cl2echo_obj(lu2cl(o));
566 return (*p)(env, cookie, "echoclient-object@%p", obj);
569 static const struct lu_object_operations echo_lu_obj_ops = {
570 .loo_object_init = echo_object_init,
571 .loo_object_delete = echo_object_delete,
572 .loo_object_release = NULL,
573 .loo_object_free = echo_object_free,
574 .loo_object_print = echo_object_print,
575 .loo_object_invariant = NULL
577 /** @} echo_lu_ops */
579 /** \defgroup echo_lu_dev_ops lu_device operations
581 * Operations for echo lu device.
585 static struct lu_object *echo_object_alloc(const struct lu_env *env,
586 const struct lu_object_header *hdr,
587 struct lu_device *dev)
589 struct echo_object *eco;
590 struct lu_object *obj = NULL;
593 /* we're the top dev. */
594 LASSERT(hdr == NULL);
595 OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, GFP_NOFS);
597 struct cl_object_header *hdr = &eco->eo_hdr;
599 obj = &echo_obj2cl(eco)->co_lu;
600 cl_object_header_init(hdr);
601 hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
603 lu_object_init(obj, &hdr->coh_lu, dev);
604 lu_object_add_top(&hdr->coh_lu, obj);
606 eco->eo_cl.co_ops = &echo_cl_obj_ops;
607 obj->lo_ops = &echo_lu_obj_ops;
612 static struct lu_device_operations echo_device_lu_ops = {
613 .ldo_object_alloc = echo_object_alloc,
616 /** @} echo_lu_dev_ops */
618 /** \defgroup echo_init Setup and teardown
620 * Init and fini functions for echo client.
624 static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
626 struct cl_site *site = &ed->ed_site_myself;
629 /* initialize site */
630 rc = cl_site_init(site, &ed->ed_cl);
632 CERROR("Cannot initialize site for echo client(%d)\n", rc);
636 rc = lu_site_init_finish(&site->cs_lu);
642 ed->ed_site = &site->cs_lu;
646 static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
649 if (!ed->ed_next_ismd)
650 lu_site_fini(ed->ed_site);
655 static void *echo_thread_key_init(const struct lu_context *ctx,
656 struct lu_context_key *key)
658 struct echo_thread_info *info;
660 OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, GFP_NOFS);
662 info = ERR_PTR(-ENOMEM);
666 static void echo_thread_key_fini(const struct lu_context *ctx,
667 struct lu_context_key *key, void *data)
669 struct echo_thread_info *info = data;
671 OBD_SLAB_FREE_PTR(info, echo_thread_kmem);
674 static struct lu_context_key echo_thread_key = {
675 .lct_tags = LCT_CL_THREAD,
676 .lct_init = echo_thread_key_init,
677 .lct_fini = echo_thread_key_fini,
680 static void *echo_session_key_init(const struct lu_context *ctx,
681 struct lu_context_key *key)
683 struct echo_session_info *session;
685 OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, GFP_NOFS);
687 session = ERR_PTR(-ENOMEM);
691 static void echo_session_key_fini(const struct lu_context *ctx,
692 struct lu_context_key *key, void *data)
694 struct echo_session_info *session = data;
696 OBD_SLAB_FREE_PTR(session, echo_session_kmem);
699 static struct lu_context_key echo_session_key = {
700 .lct_tags = LCT_SESSION,
701 .lct_init = echo_session_key_init,
702 .lct_fini = echo_session_key_fini,
705 LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
707 #ifdef HAVE_SERVER_SUPPORT
708 # define ECHO_SEQ_WIDTH 0xffffffff
709 static int echo_fid_init(struct echo_device *ed, char *obd_name,
710 struct seq_server_site *ss)
716 OBD_ALLOC_PTR(ed->ed_cl_seq);
720 OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
722 GOTO(out_free_seq, rc = -ENOMEM);
724 snprintf(prefix, MAX_OBD_NAME + 5, "srv-%s", obd_name);
726 /* Init client side sequence-manager */
727 rc = seq_client_init(ed->ed_cl_seq, NULL,
729 prefix, ss->ss_server_seq);
730 ed->ed_cl_seq->lcs_width = ECHO_SEQ_WIDTH;
731 OBD_FREE(prefix, MAX_OBD_NAME + 5);
733 GOTO(out_free_seq, rc);
738 OBD_FREE_PTR(ed->ed_cl_seq);
739 ed->ed_cl_seq = NULL;
743 static int echo_fid_fini(struct obd_device *obddev)
745 struct echo_device *ed = obd2echo_dev(obddev);
749 seq_client_fini(ed->ed_cl_seq);
750 OBD_FREE_PTR(ed->ed_cl_seq);
751 ed->ed_cl_seq = NULL;
757 static void echo_ed_los_fini(const struct lu_env *env, struct echo_device *ed)
760 if (ed != NULL && ed->ed_next_ismd && ed->ed_los != NULL) {
761 local_oid_storage_fini(env, ed->ed_los);
767 echo_md_local_file_create(const struct lu_env *env, struct echo_md_device *emd,
768 struct local_oid_storage *los,
769 const struct lu_fid *pfid, const char *name,
770 __u32 mode, struct lu_fid *fid)
772 struct dt_object *parent = NULL;
773 struct dt_object *dto = NULL;
777 LASSERT(!fid_is_zero(pfid));
778 parent = dt_locate(env, emd->emd_bottom, pfid);
779 if (unlikely(IS_ERR(parent)))
780 RETURN(PTR_ERR(parent));
782 /* create local file with @fid */
783 dto = local_file_find_or_create_with_fid(env, emd->emd_bottom, fid,
786 GOTO(out_put, rc = PTR_ERR(dto));
788 *fid = *lu_object_fid(&dto->do_lu);
790 * since stack is not fully set up the local_storage uses own stack
791 * and we should drop its object from cache
793 dt_object_put_nocache(env, dto);
797 dt_object_put(env, parent);
802 echo_md_root_get(const struct lu_env *env, struct echo_md_device *emd,
803 struct echo_device *ed)
809 /* Setup local dirs */
810 fid.f_seq = FID_SEQ_LOCAL_NAME;
813 rc = local_oid_storage_init(env, emd->emd_bottom, &fid, &ed->ed_los);
817 lu_echo_root_fid(&fid);
818 if (echo_md_seq_site(emd)->ss_node_id == 0) {
819 rc = echo_md_local_file_create(env, emd, ed->ed_los,
820 &emd->emd_local_root_fid,
821 echo_md_root_dir_name, S_IFDIR |
822 S_IRUGO | S_IWUSR | S_IXUGO,
825 CERROR("%s: create md echo root fid failed: rc = %d\n",
826 emd2obd_dev(emd)->obd_name, rc);
830 ed->ed_root_fid = fid;
834 echo_ed_los_fini(env, ed);
838 #endif /* HAVE_SERVER_SUPPORT */
840 static struct lu_device *echo_device_alloc(const struct lu_env *env,
841 struct lu_device_type *t,
842 struct lustre_cfg *cfg)
844 struct lu_device *next;
845 struct echo_device *ed;
846 struct cl_device *cd;
847 struct obd_device *obd = NULL; /* to keep compiler happy */
848 struct obd_device *tgt;
849 const char *tgt_type_name;
856 GOTO(out, rc = -ENOMEM);
860 rc = cl_device_init(cd, t);
864 cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
867 obd = class_name2obd(lustre_cfg_string(cfg, 0));
868 LASSERT(obd != NULL);
869 LASSERT(env != NULL);
871 tgt = class_name2obd(lustre_cfg_string(cfg, 1));
873 CERROR("Can not find tgt device %s\n",
874 lustre_cfg_string(cfg, 1));
875 GOTO(out, rc = -ENODEV);
878 next = tgt->obd_lu_dev;
880 if (strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME) == 0) {
881 ed->ed_next_ismd = 1;
882 } else if (strcmp(tgt->obd_type->typ_name, LUSTRE_OST_NAME) == 0 ||
883 strcmp(tgt->obd_type->typ_name, LUSTRE_OSC_NAME) == 0) {
884 ed->ed_next_ismd = 0;
885 rc = echo_site_init(env, ed);
889 GOTO(out, rc = -EINVAL);
894 rc = echo_client_setup(env, obd, cfg);
898 ed->ed_ec = &obd->u.echo_client;
901 if (ed->ed_next_ismd) {
902 #ifdef HAVE_SERVER_SUPPORT
903 /* Suppose to connect to some Metadata layer */
904 struct lu_site *ls = NULL;
905 struct lu_device *ld = NULL;
906 struct md_device *md = NULL;
907 struct echo_md_device *emd = NULL;
911 CERROR("%s is not lu device type!\n",
912 lustre_cfg_string(cfg, 1));
913 GOTO(out, rc = -EINVAL);
916 tgt_type_name = lustre_cfg_string(cfg, 2);
917 if (!tgt_type_name) {
918 CERROR("%s no type name for echo %s setup\n",
919 lustre_cfg_string(cfg, 1),
920 tgt->obd_type->typ_name);
921 GOTO(out, rc = -EINVAL);
926 spin_lock(&ls->ls_ld_lock);
927 list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
928 if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
933 spin_unlock(&ls->ls_ld_lock);
936 CERROR("%s is not lu device type!\n",
937 lustre_cfg_string(cfg, 1));
938 GOTO(out, rc = -EINVAL);
942 /* For MD echo client, it will use the site in MDS stack */
944 ed->ed_cl.cd_lu_dev.ld_site = ls;
945 rc = echo_fid_init(ed, obd->obd_name, lu_site2seq(ls));
947 CERROR("echo fid init error %d\n", rc);
951 md = lu2md_dev(next);
952 emd = lu2emd_dev(&md->md_lu_dev);
953 rc = echo_md_root_get(env, emd, ed);
955 CERROR("%s: get root error: rc = %d\n",
956 emd2obd_dev(emd)->obd_name, rc);
959 #else /* !HAVE_SERVER_SUPPORT */
961 "Local operations are NOT supported on client side. Only remote operations are supported. Metadata client must be run on server side.\n");
962 GOTO(out, rc = -EOPNOTSUPP);
963 #endif /* HAVE_SERVER_SUPPORT */
966 * if echo client is to be stacked upon ost device, the next is
967 * NULL since ost is not a clio device so far
969 if (next != NULL && !lu_device_is_cl(next))
972 tgt_type_name = tgt->obd_type->typ_name;
974 LASSERT(next != NULL);
976 GOTO(out, rc = -EBUSY);
978 next->ld_site = ed->ed_site;
979 rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
980 next->ld_type->ldt_name,
985 LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
990 RETURN(&cd->cd_lu_dev);
996 rc2 = echo_client_cleanup(obd);
998 CERROR("Cleanup obd device %s error(%d)\n",
1004 echo_site_fini(env, ed);
1007 cl_device_fini(&ed->ed_cl);
1019 static int echo_device_init(const struct lu_env *env, struct lu_device *d,
1020 const char *name, struct lu_device *next)
1026 static struct lu_device *echo_device_fini(const struct lu_env *env,
1027 struct lu_device *d)
1029 struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
1030 struct lu_device *next = ed->ed_next;
1032 while (next && !ed->ed_next_ismd)
1033 next = next->ld_type->ldt_ops->ldto_device_fini(env, next);
1037 static void echo_lock_release(const struct lu_env *env,
1038 struct echo_lock *ecl,
1041 struct cl_lock *clk = echo_lock2cl(ecl);
1043 cl_lock_release(env, clk);
1046 static struct lu_device *echo_device_free(const struct lu_env *env,
1047 struct lu_device *d)
1049 struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
1050 struct echo_client_obd *ec = ed->ed_ec;
1051 struct echo_object *eco;
1052 struct lu_device *next = ed->ed_next;
1054 CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
1057 lu_site_purge(env, ed->ed_site, -1);
1060 * check if there are objects still alive.
1061 * It shouldn't have any object because lu_site_purge would cleanup
1062 * all of cached objects. Anyway, probably the echo device is being
1063 * parallelly accessed.
1065 spin_lock(&ec->ec_lock);
1066 list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
1067 eco->eo_deleted = 1;
1068 spin_unlock(&ec->ec_lock);
1071 lu_site_purge(env, ed->ed_site, -1);
1074 "Waiting for the reference of echo object to be dropped\n");
1076 /* Wait for the last reference to be dropped. */
1077 spin_lock(&ec->ec_lock);
1078 while (!list_empty(&ec->ec_objects)) {
1079 spin_unlock(&ec->ec_lock);
1081 "echo_client still has objects at cleanup time, wait for 1 second\n");
1082 schedule_timeout_uninterruptible(cfs_time_seconds(1));
1083 lu_site_purge(env, ed->ed_site, -1);
1084 spin_lock(&ec->ec_lock);
1086 spin_unlock(&ec->ec_lock);
1088 LASSERT(list_empty(&ec->ec_locks));
1090 CDEBUG(D_INFO, "No object exists, exiting...\n");
1092 echo_client_cleanup(d->ld_obd);
1093 #ifdef HAVE_SERVER_SUPPORT
1094 echo_fid_fini(d->ld_obd);
1095 echo_ed_los_fini(env, ed);
1097 while (next && !ed->ed_next_ismd)
1098 next = next->ld_type->ldt_ops->ldto_device_free(env, next);
1100 LASSERT(ed->ed_site == d->ld_site);
1101 echo_site_fini(env, ed);
1102 cl_device_fini(&ed->ed_cl);
1105 cl_env_cache_purge(~0);
1110 static const struct lu_device_type_operations echo_device_type_ops = {
1111 .ldto_init = echo_type_init,
1112 .ldto_fini = echo_type_fini,
1114 .ldto_start = echo_type_start,
1115 .ldto_stop = echo_type_stop,
1117 .ldto_device_alloc = echo_device_alloc,
1118 .ldto_device_free = echo_device_free,
1119 .ldto_device_init = echo_device_init,
1120 .ldto_device_fini = echo_device_fini
1123 static struct lu_device_type echo_device_type = {
1124 .ldt_tags = LU_DEVICE_CL,
1125 .ldt_name = LUSTRE_ECHO_CLIENT_NAME,
1126 .ldt_ops = &echo_device_type_ops,
1127 .ldt_ctx_tags = LCT_CL_THREAD | LCT_MD_THREAD | LCT_DT_THREAD,
1131 /** \defgroup echo_exports Exported operations
1133 * exporting functions to echo client
1138 /* Interfaces to echo client obd device */
1139 static struct echo_object *
1140 cl_echo_object_find(struct echo_device *d, const struct ost_id *oi)
1143 struct echo_thread_info *info;
1144 struct echo_object_conf *conf;
1145 struct echo_object *eco;
1146 struct cl_object *obj;
1147 struct lov_oinfo *oinfo = NULL;
1153 LASSERTF(ostid_id(oi) != 0, DOSTID"\n", POSTID(oi));
1154 LASSERTF(ostid_seq(oi) == FID_SEQ_ECHO, DOSTID"\n", POSTID(oi));
1156 /* Never return an object if the obd is to be freed. */
1157 if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
1158 RETURN(ERR_PTR(-ENODEV));
1160 env = cl_env_get(&refcheck);
1162 RETURN((void *)env);
1164 info = echo_env_info(env);
1165 conf = &info->eti_conf;
1167 OBD_ALLOC_PTR(oinfo);
1169 GOTO(out, eco = ERR_PTR(-ENOMEM));
1171 oinfo->loi_oi = *oi;
1172 conf->eoc_cl.u.coc_oinfo = oinfo;
1176 * If echo_object_init() is successful then ownership of oinfo
1177 * is transferred to the object.
1179 conf->eoc_oinfo = &oinfo;
1181 fid = &info->eti_fid;
1182 rc = ostid_to_fid(fid, oi, 0);
1184 GOTO(out, eco = ERR_PTR(rc));
1187 * In the function below, .hs_keycmp resolves to
1188 * lu_obj_hop_keycmp()
1190 /* coverity[overrun-buffer-val] */
1191 obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
1193 GOTO(out, eco = (void *)obj);
1195 eco = cl2echo_obj(obj);
1196 if (eco->eo_deleted) {
1197 cl_object_put(env, obj);
1198 eco = ERR_PTR(-EAGAIN);
1203 OBD_FREE_PTR(oinfo);
1205 cl_env_put(env, &refcheck);
1209 static int cl_echo_object_put(struct echo_object *eco)
1212 struct cl_object *obj = echo_obj2cl(eco);
1216 env = cl_env_get(&refcheck);
1218 RETURN(PTR_ERR(env));
1220 /* an external function to kill an object? */
1221 if (eco->eo_deleted) {
1222 struct lu_object_header *loh = obj->co_lu.lo_header;
1224 LASSERT(&eco->eo_hdr == luh2coh(loh));
1225 set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
1228 cl_object_put(env, obj);
1229 cl_env_put(env, &refcheck);
1233 static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
1234 u64 start, u64 end, int mode,
1235 __u64 *cookie, __u32 enqflags)
1238 struct cl_lock *lck;
1239 struct cl_object *obj;
1240 struct cl_lock_descr *descr;
1241 struct echo_thread_info *info;
1245 info = echo_env_info(env);
1247 lck = &info->eti_lock;
1248 obj = echo_obj2cl(eco);
1250 memset(lck, 0, sizeof(*lck));
1251 descr = &lck->cll_descr;
1252 descr->cld_obj = obj;
1253 descr->cld_start = cl_index(obj, start);
1254 descr->cld_end = cl_index(obj, end);
1255 descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
1256 descr->cld_enq_flags = enqflags;
1259 rc = cl_lock_request(env, io, lck);
1261 struct echo_client_obd *ec = eco->eo_dev->ed_ec;
1262 struct echo_lock *el;
1264 el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
1265 spin_lock(&ec->ec_lock);
1266 if (list_empty(&el->el_chain)) {
1267 list_add(&el->el_chain, &ec->ec_locks);
1268 el->el_cookie = ++ec->ec_unique;
1270 atomic_inc(&el->el_refcount);
1271 *cookie = el->el_cookie;
1272 spin_unlock(&ec->ec_lock);
1277 static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
1280 struct echo_client_obd *ec = ed->ed_ec;
1281 struct echo_lock *ecl = NULL;
1282 struct list_head *el;
1283 int found = 0, still_used = 0;
1286 LASSERT(ec != NULL);
1287 spin_lock(&ec->ec_lock);
1288 list_for_each(el, &ec->ec_locks) {
1289 ecl = list_entry(el, struct echo_lock, el_chain);
1290 CDEBUG(D_INFO, "ecl: %p, cookie: %#llx\n", ecl, ecl->el_cookie);
1291 found = (ecl->el_cookie == cookie);
1293 if (atomic_dec_and_test(&ecl->el_refcount))
1294 list_del_init(&ecl->el_chain);
1300 spin_unlock(&ec->ec_lock);
1305 echo_lock_release(env, ecl, still_used);
1309 static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
1310 struct pagevec *pvec)
1312 struct echo_thread_info *info;
1313 struct cl_2queue *queue;
1316 info = echo_env_info(env);
1317 LASSERT(io == &info->eti_io);
1319 queue = &info->eti_queue;
1321 for (i = 0; i < pagevec_count(pvec); i++) {
1322 struct page *vmpage = pvec->pages[i];
1323 struct cl_page *page = (struct cl_page *)vmpage->private;
1325 cl_page_list_add(&queue->c2_qout, page);
1329 static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
1330 struct page **pages, int npages, int async)
1333 struct echo_thread_info *info;
1334 struct cl_object *obj = echo_obj2cl(eco);
1335 struct echo_device *ed = eco->eo_dev;
1336 struct cl_2queue *queue;
1338 struct cl_page *clp;
1339 struct lustre_handle lh = { 0 };
1340 int page_size = cl_page_size(obj);
1346 LASSERT((offset & ~PAGE_MASK) == 0);
1347 LASSERT(ed->ed_next != NULL);
1348 env = cl_env_get(&refcheck);
1350 RETURN(PTR_ERR(env));
1352 info = echo_env_info(env);
1354 queue = &info->eti_queue;
1356 cl_2queue_init(queue);
1358 io->ci_ignore_layout = 1;
1359 rc = cl_io_init(env, io, CIT_MISC, obj);
1364 rc = cl_echo_enqueue0(env, eco, offset,
1365 offset + npages * PAGE_SIZE - 1,
1366 rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
1369 GOTO(error_lock, rc);
1371 for (i = 0; i < npages; i++) {
1373 clp = cl_page_find(env, obj, cl_index(obj, offset),
1374 pages[i], CPT_TRANSIENT);
1379 LASSERT(clp->cp_type == CPT_TRANSIENT);
1381 rc = cl_page_own(env, io, clp);
1383 LASSERT(clp->cp_state == CPS_FREEING);
1384 cl_page_put(env, clp);
1388 cl_2queue_add(queue, clp);
1391 * drop the reference count for cl_page_find, so that the page
1392 * will be freed in cl_2queue_fini.
1394 cl_page_put(env, clp);
1395 cl_page_clip(env, clp, 0, page_size);
1397 offset += page_size;
1401 enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE;
1403 async = async && (typ == CRT_WRITE);
1405 rc = cl_io_commit_async(env, io, &queue->c2_qin,
1407 echo_commit_callback);
1409 rc = cl_io_submit_sync(env, io, typ, queue, 0);
1410 CDEBUG(D_INFO, "echo_client %s write returns %d\n",
1411 async ? "async" : "sync", rc);
1414 cl_echo_cancel0(env, ed, lh.cookie);
1417 cl_2queue_discard(env, io, queue);
1418 cl_2queue_disown(env, io, queue);
1419 cl_2queue_fini(env, queue);
1420 cl_io_fini(env, io);
1422 cl_env_put(env, &refcheck);
1425 /** @} echo_exports */
1427 static u64 last_object_id;
1429 #ifdef HAVE_SERVER_SUPPORT
1430 static inline void echo_md_build_name(struct lu_name *lname, char *name,
1433 snprintf(name, ETI_NAME_LEN, "%llu", id);
1434 lname->ln_name = name;
1435 lname->ln_namelen = strlen(name);
1438 /* similar to mdt_attr_get_complex */
1439 static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
1442 struct echo_thread_info *info = echo_env_info(env);
1447 LASSERT(ma->ma_lmm_size > 0);
1449 LASSERT(ma->ma_need & (MA_LOV | MA_LMV));
1450 if (ma->ma_need & MA_LOV)
1451 rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LOV);
1453 rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LMV);
1458 /* big_lmm may need to be grown */
1459 if (info->eti_big_lmmsize < rc) {
1460 int size = size_roundup_power2(rc);
1462 if (info->eti_big_lmmsize > 0) {
1463 /* free old buffer */
1464 LASSERT(info->eti_big_lmm);
1465 OBD_FREE_LARGE(info->eti_big_lmm,
1466 info->eti_big_lmmsize);
1467 info->eti_big_lmm = NULL;
1468 info->eti_big_lmmsize = 0;
1471 OBD_ALLOC_LARGE(info->eti_big_lmm, size);
1472 if (!info->eti_big_lmm)
1474 info->eti_big_lmmsize = size;
1476 LASSERT(info->eti_big_lmmsize >= rc);
1478 info->eti_buf.lb_buf = info->eti_big_lmm;
1479 info->eti_buf.lb_len = info->eti_big_lmmsize;
1480 if (ma->ma_need & MA_LOV)
1481 rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LOV);
1483 rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LMV);
1487 if (ma->ma_need & MA_LOV)
1488 ma->ma_valid |= MA_LOV;
1490 ma->ma_valid |= MA_LMV;
1492 ma->ma_lmm = info->eti_big_lmm;
1493 ma->ma_lmm_size = rc;
1498 static int echo_attr_get_complex(const struct lu_env *env,
1499 struct md_object *next,
1502 struct echo_thread_info *info = echo_env_info(env);
1503 struct lu_buf *buf = &info->eti_buf;
1504 umode_t mode = lu_object_attr(&next->mo_lu);
1511 if (ma->ma_need & MA_INODE) {
1512 rc = mo_attr_get(env, next, ma);
1515 ma->ma_valid |= MA_INODE;
1518 if ((ma->ma_need & MA_LOV) && (S_ISREG(mode) || S_ISDIR(mode))) {
1519 LASSERT(ma->ma_lmm_size > 0);
1520 buf->lb_buf = ma->ma_lmm;
1521 buf->lb_len = ma->ma_lmm_size;
1522 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LOV);
1524 ma->ma_lmm_size = rc2;
1525 ma->ma_valid |= MA_LOV;
1526 } else if (rc2 == -ENODATA) {
1528 ma->ma_lmm_size = 0;
1529 } else if (rc2 == -ERANGE) {
1530 rc2 = echo_big_lmm_get(env, next, ma);
1532 GOTO(out, rc = rc2);
1534 GOTO(out, rc = rc2);
1538 if ((ma->ma_need & MA_LMV) && S_ISDIR(mode)) {
1539 LASSERT(ma->ma_lmm_size > 0);
1540 buf->lb_buf = ma->ma_lmm;
1541 buf->lb_len = ma->ma_lmm_size;
1542 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LMV);
1544 ma->ma_lmm_size = rc2;
1545 ma->ma_valid |= MA_LMV;
1546 } else if (rc2 == -ENODATA) {
1548 ma->ma_lmm_size = 0;
1549 } else if (rc2 == -ERANGE) {
1550 rc2 = echo_big_lmm_get(env, next, ma);
1552 GOTO(out, rc = rc2);
1554 GOTO(out, rc = rc2);
1558 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
1559 if ((ma->ma_need & MA_ACL_DEF) && S_ISDIR(mode)) {
1560 buf->lb_buf = ma->ma_acl;
1561 buf->lb_len = ma->ma_acl_size;
1562 rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
1564 ma->ma_acl_size = rc2;
1565 ma->ma_valid |= MA_ACL_DEF;
1566 } else if (rc2 == -ENODATA) {
1568 ma->ma_acl_size = 0;
1570 GOTO(out, rc = rc2);
1575 CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = %#llx ma_lmm=%p\n",
1576 rc, ma->ma_valid, ma->ma_lmm);
1581 echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
1582 struct md_object *parent, struct lu_fid *fid,
1583 struct lu_name *lname, struct md_op_spec *spec,
1586 struct lu_object *ec_child, *child;
1587 struct lu_device *ld = ed->ed_next;
1588 struct echo_thread_info *info = echo_env_info(env);
1589 struct lu_fid *fid2 = &info->eti_fid2;
1590 struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
1595 rc = mdo_lookup(env, parent, lname, fid2, spec);
1598 else if (rc != -ENOENT)
1601 ec_child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev,
1603 if (IS_ERR(ec_child)) {
1604 CERROR("Can not find the child "DFID": rc = %ld\n", PFID(fid),
1606 RETURN(PTR_ERR(ec_child));
1609 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1611 CERROR("Can not locate the child "DFID"\n", PFID(fid));
1612 GOTO(out_put, rc = -EINVAL);
1615 CDEBUG(D_RPCTRACE, "Start creating object "DFID" %s %p\n",
1616 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
1619 * Do not perform lookup sanity check. We know that name does not exist.
1621 spec->sp_cr_lookup = 0;
1622 rc = mdo_create(env, parent, lname, lu2md(child), spec, ma);
1624 CERROR("Can not create child "DFID": rc = %d\n", PFID(fid), rc);
1627 CDEBUG(D_RPCTRACE, "End creating object "DFID" %s %p rc = %d\n",
1628 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent, rc);
1631 lu_object_put(env, ec_child);
1635 static int echo_set_lmm_size(const struct lu_env *env, struct lu_device *ld,
1638 struct echo_thread_info *info = echo_env_info(env);
1640 if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
1641 ma->ma_lmm = (void *)&info->eti_lmm;
1642 ma->ma_lmm_size = sizeof(info->eti_lmm);
1644 LASSERT(info->eti_big_lmmsize);
1645 ma->ma_lmm = info->eti_big_lmm;
1646 ma->ma_lmm_size = info->eti_big_lmmsize;
1653 echo_md_dir_stripe_choose(const struct lu_env *env, struct echo_device *ed,
1654 struct lu_object *obj, const char *name,
1655 unsigned int namelen, __u64 id,
1656 struct lu_object **new_parent)
1658 struct echo_thread_info *info = echo_env_info(env);
1659 struct md_attr *ma = &info->eti_ma;
1660 struct lmv_mds_md_v1 *lmv;
1661 struct lu_device *ld = ed->ed_next;
1663 struct lu_name tmp_ln_name;
1664 struct lu_fid stripe_fid;
1665 struct lu_object *stripe_obj;
1668 LASSERT(obj != NULL);
1669 LASSERT(S_ISDIR(obj->lo_header->loh_attr));
1671 memset(ma, 0, sizeof(*ma));
1672 echo_set_lmm_size(env, ld, ma);
1673 ma->ma_need = MA_LMV;
1674 rc = echo_attr_get_complex(env, lu2md(obj), ma);
1676 CERROR("Can not getattr child "DFID": rc = %d\n",
1677 PFID(lu_object_fid(obj)), rc);
1681 if (!(ma->ma_valid & MA_LMV)) {
1686 lmv = (struct lmv_mds_md_v1 *)ma->ma_lmm;
1687 if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1) {
1689 CERROR("Invalid mds md magic %x "DFID": rc = %d\n",
1690 le32_to_cpu(lmv->lmv_magic), PFID(lu_object_fid(obj)),
1696 tmp_ln_name.ln_name = name;
1697 tmp_ln_name.ln_namelen = namelen;
1700 echo_md_build_name(&tmp_ln_name, info->eti_name, id);
1703 idx = lmv_name_to_stripe_index(LMV_HASH_TYPE_FNV_1A_64,
1704 le32_to_cpu(lmv->lmv_stripe_count),
1705 tmp_ln_name.ln_name, tmp_ln_name.ln_namelen);
1707 LASSERT(idx < le32_to_cpu(lmv->lmv_stripe_count));
1708 fid_le_to_cpu(&stripe_fid, &lmv->lmv_stripe_fids[idx]);
1710 stripe_obj = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, &stripe_fid,
1712 if (IS_ERR(stripe_obj)) {
1713 rc = PTR_ERR(stripe_obj);
1714 CERROR("Can not find the parent "DFID": rc = %d\n",
1715 PFID(&stripe_fid), rc);
1719 *new_parent = lu_object_locate(stripe_obj->lo_header, ld->ld_type);
1721 lu_object_put(env, stripe_obj);
1728 static int echo_create_md_object(const struct lu_env *env,
1729 struct echo_device *ed,
1730 struct lu_object *ec_parent,
1732 char *name, int namelen,
1733 __u64 id, __u32 mode, int count,
1734 int stripe_count, int stripe_offset)
1736 struct lu_object *parent;
1737 struct lu_object *new_parent;
1738 struct echo_thread_info *info = echo_env_info(env);
1739 struct lu_name *lname = &info->eti_lname;
1740 struct md_op_spec *spec = &info->eti_spec;
1741 struct md_attr *ma = &info->eti_ma;
1742 struct lu_device *ld = ed->ed_next;
1750 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1754 rc = echo_md_dir_stripe_choose(env, ed, parent, name, namelen,
1759 LASSERT(new_parent != NULL);
1760 memset(ma, 0, sizeof(*ma));
1761 memset(spec, 0, sizeof(*spec));
1762 echo_set_lmm_size(env, ld, ma);
1763 if (stripe_count != 0) {
1764 spec->sp_cr_flags |= MDS_FMODE_WRITE;
1765 if (stripe_count != -1) {
1766 if (S_ISDIR(mode)) {
1767 struct lmv_user_md *lmu;
1769 lmu = (struct lmv_user_md *)&info->eti_lum;
1770 lmu->lum_magic = LMV_USER_MAGIC;
1771 lmu->lum_stripe_offset = stripe_offset;
1772 lmu->lum_stripe_count = stripe_count;
1773 lmu->lum_hash_type = LMV_HASH_TYPE_FNV_1A_64;
1774 spec->u.sp_ea.eadata = lmu;
1775 spec->u.sp_ea.eadatalen = sizeof(*lmu);
1777 struct lov_user_md_v3 *lum = &info->eti_lum;
1779 lum->lmm_magic = LOV_USER_MAGIC_V3;
1780 lum->lmm_stripe_count = stripe_count;
1781 lum->lmm_stripe_offset = stripe_offset;
1782 lum->lmm_pattern = LOV_PATTERN_NONE;
1783 spec->u.sp_ea.eadata = lum;
1784 spec->u.sp_ea.eadatalen = sizeof(*lum);
1786 spec->sp_cr_flags |= MDS_OPEN_HAS_EA;
1790 ma->ma_attr.la_mode = mode;
1791 ma->ma_attr.la_valid = LA_CTIME | LA_MODE;
1792 ma->ma_attr.la_ctime = ktime_get_real_seconds();
1795 lname->ln_name = name;
1796 lname->ln_namelen = namelen;
1797 /* If name is specified, only create one object by name */
1798 rc = echo_md_create_internal(env, ed, lu2md(new_parent), fid,
1803 /* Create multiple object sequenced by id */
1804 for (i = 0; i < count; i++) {
1805 char *tmp_name = info->eti_name;
1807 echo_md_build_name(lname, tmp_name, id);
1809 rc = echo_md_create_internal(env, ed, lu2md(new_parent),
1810 fid, lname, spec, ma);
1812 CERROR("Can not create child %s: rc = %d\n", tmp_name,
1821 if (new_parent != parent)
1822 lu_object_put(env, new_parent);
1827 static struct lu_object *echo_md_lookup(const struct lu_env *env,
1828 struct echo_device *ed,
1829 struct md_object *parent,
1830 struct lu_name *lname)
1832 struct echo_thread_info *info = echo_env_info(env);
1833 struct lu_fid *fid = &info->eti_fid;
1834 struct lu_object *child;
1838 CDEBUG(D_INFO, "lookup %s in parent "DFID" %p\n", lname->ln_name,
1841 rc = mdo_lookup(env, parent, lname, fid, NULL);
1843 CERROR("lookup %s: rc = %d\n", lname->ln_name, rc);
1844 RETURN(ERR_PTR(rc));
1848 * In the function below, .hs_keycmp resolves to
1849 * lu_obj_hop_keycmp()
1851 /* coverity[overrun-buffer-val] */
1852 child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
1857 static int echo_setattr_object(const struct lu_env *env,
1858 struct echo_device *ed,
1859 struct lu_object *ec_parent,
1860 __u64 id, int count)
1862 struct lu_object *parent;
1863 struct lu_object *new_parent;
1864 struct echo_thread_info *info = echo_env_info(env);
1865 struct lu_name *lname = &info->eti_lname;
1866 char *name = info->eti_name;
1867 struct lu_device *ld = ed->ed_next;
1868 struct lu_buf *buf = &info->eti_buf;
1876 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1880 rc = echo_md_dir_stripe_choose(env, ed, parent, NULL, 0, id,
1885 for (i = 0; i < count; i++) {
1886 struct lu_object *ec_child, *child;
1888 echo_md_build_name(lname, name, id);
1890 ec_child = echo_md_lookup(env, ed, lu2md(new_parent), lname);
1891 if (IS_ERR(ec_child)) {
1892 rc = PTR_ERR(ec_child);
1893 CERROR("Can't find child %s: rc = %d\n",
1894 lname->ln_name, rc);
1898 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1900 CERROR("Can not locate the child %s\n", lname->ln_name);
1901 lu_object_put(env, ec_child);
1906 CDEBUG(D_RPCTRACE, "Start setattr object "DFID"\n",
1907 PFID(lu_object_fid(child)));
1909 buf->lb_buf = info->eti_xattr_buf;
1910 buf->lb_len = sizeof(info->eti_xattr_buf);
1912 sprintf(name, "%s.test1", XATTR_USER_PREFIX);
1913 rc = mo_xattr_set(env, lu2md(child), buf, name,
1916 CERROR("Can not setattr child "DFID": rc = %d\n",
1917 PFID(lu_object_fid(child)), rc);
1918 lu_object_put(env, ec_child);
1921 CDEBUG(D_RPCTRACE, "End setattr object "DFID"\n",
1922 PFID(lu_object_fid(child)));
1924 lu_object_put(env, ec_child);
1927 if (new_parent != parent)
1928 lu_object_put(env, new_parent);
1933 static int echo_getattr_object(const struct lu_env *env,
1934 struct echo_device *ed,
1935 struct lu_object *ec_parent,
1936 __u64 id, int count)
1938 struct lu_object *parent;
1939 struct lu_object *new_parent;
1940 struct echo_thread_info *info = echo_env_info(env);
1941 struct lu_name *lname = &info->eti_lname;
1942 char *name = info->eti_name;
1943 struct md_attr *ma = &info->eti_ma;
1944 struct lu_device *ld = ed->ed_next;
1952 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
1956 rc = echo_md_dir_stripe_choose(env, ed, parent, NULL, 0, id,
1961 memset(ma, 0, sizeof(*ma));
1962 ma->ma_need |= MA_INODE | MA_LOV | MA_PFID | MA_HSM | MA_ACL_DEF;
1963 ma->ma_acl = info->eti_xattr_buf;
1964 ma->ma_acl_size = sizeof(info->eti_xattr_buf);
1966 for (i = 0; i < count; i++) {
1967 struct lu_object *ec_child, *child;
1970 echo_md_build_name(lname, name, id);
1971 echo_set_lmm_size(env, ld, ma);
1973 ec_child = echo_md_lookup(env, ed, lu2md(new_parent), lname);
1974 if (IS_ERR(ec_child)) {
1975 CERROR("Can't find child %s: rc = %ld\n",
1976 lname->ln_name, PTR_ERR(ec_child));
1977 RETURN(PTR_ERR(ec_child));
1980 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
1982 CERROR("Can not locate the child %s\n", lname->ln_name);
1983 lu_object_put(env, ec_child);
1987 CDEBUG(D_RPCTRACE, "Start getattr object "DFID"\n",
1988 PFID(lu_object_fid(child)));
1989 rc = echo_attr_get_complex(env, lu2md(child), ma);
1991 CERROR("Can not getattr child "DFID": rc = %d\n",
1992 PFID(lu_object_fid(child)), rc);
1993 lu_object_put(env, ec_child);
1996 CDEBUG(D_RPCTRACE, "End getattr object "DFID"\n",
1997 PFID(lu_object_fid(child)));
1999 lu_object_put(env, ec_child);
2002 if (new_parent != parent)
2003 lu_object_put(env, new_parent);
2008 static int echo_lookup_object(const struct lu_env *env,
2009 struct echo_device *ed,
2010 struct lu_object *ec_parent,
2011 __u64 id, int count)
2013 struct lu_object *parent;
2014 struct lu_object *new_parent;
2015 struct echo_thread_info *info = echo_env_info(env);
2016 struct lu_name *lname = &info->eti_lname;
2017 char *name = info->eti_name;
2018 struct lu_fid *fid = &info->eti_fid;
2019 struct lu_device *ld = ed->ed_next;
2025 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
2029 rc = echo_md_dir_stripe_choose(env, ed, parent, NULL, 0, id,
2034 /*prepare the requests*/
2035 for (i = 0; i < count; i++) {
2036 echo_md_build_name(lname, name, id);
2038 CDEBUG(D_RPCTRACE, "Start lookup object "DFID" %s %p\n",
2039 PFID(lu_object_fid(new_parent)), lname->ln_name,
2042 rc = mdo_lookup(env, lu2md(new_parent), lname, fid, NULL);
2044 CERROR("Can not lookup child %s: rc = %d\n", name, rc);
2048 CDEBUG(D_RPCTRACE, "End lookup object "DFID" %s %p\n",
2049 PFID(lu_object_fid(new_parent)), lname->ln_name,
2055 if (new_parent != parent)
2056 lu_object_put(env, new_parent);
2061 static int echo_md_destroy_internal(const struct lu_env *env,
2062 struct echo_device *ed,
2063 struct md_object *parent,
2064 struct lu_name *lname,
2067 struct lu_device *ld = ed->ed_next;
2068 struct lu_object *ec_child;
2069 struct lu_object *child;
2074 ec_child = echo_md_lookup(env, ed, parent, lname);
2075 if (IS_ERR(ec_child)) {
2076 CERROR("Can't find child %s: rc = %ld\n", lname->ln_name,
2078 RETURN(PTR_ERR(ec_child));
2081 child = lu_object_locate(ec_child->lo_header, ld->ld_type);
2083 CERROR("Can not locate the child %s\n", lname->ln_name);
2084 GOTO(out_put, rc = -EINVAL);
2087 if (lu_object_remote(child)) {
2088 CERROR("Can not destroy remote object %s: rc = %d\n",
2089 lname->ln_name, -EPERM);
2090 GOTO(out_put, rc = -EPERM);
2092 CDEBUG(D_RPCTRACE, "Start destroy object "DFID" %s %p\n",
2093 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
2095 rc = mdo_unlink(env, parent, lu2md(child), lname, ma, 0);
2097 CERROR("Can not unlink child %s: rc = %d\n",
2098 lname->ln_name, rc);
2101 CDEBUG(D_RPCTRACE, "End destroy object "DFID" %s %p\n",
2102 PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
2104 lu_object_put(env, ec_child);
2108 static int echo_destroy_object(const struct lu_env *env,
2109 struct echo_device *ed,
2110 struct lu_object *ec_parent,
2111 char *name, int namelen,
2112 __u64 id, __u32 mode,
2115 struct echo_thread_info *info = echo_env_info(env);
2116 struct lu_name *lname = &info->eti_lname;
2117 struct md_attr *ma = &info->eti_ma;
2118 struct lu_device *ld = ed->ed_next;
2119 struct lu_object *parent;
2120 struct lu_object *new_parent;
2125 parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
2129 rc = echo_md_dir_stripe_choose(env, ed, parent, name, namelen,
2134 memset(ma, 0, sizeof(*ma));
2135 ma->ma_attr.la_mode = mode;
2136 ma->ma_attr.la_valid = LA_CTIME;
2137 ma->ma_attr.la_ctime = ktime_get_real_seconds();
2138 ma->ma_need = MA_INODE;
2142 lname->ln_name = name;
2143 lname->ln_namelen = namelen;
2144 rc = echo_md_destroy_internal(env, ed, lu2md(new_parent), lname,
2149 /*prepare the requests*/
2150 for (i = 0; i < count; i++) {
2151 char *tmp_name = info->eti_name;
2154 echo_md_build_name(lname, tmp_name, id);
2156 rc = echo_md_destroy_internal(env, ed, lu2md(new_parent), lname,
2159 CERROR("Can not unlink child %s: rc = %d\n", name, rc);
2166 if (new_parent != parent)
2167 lu_object_put(env, new_parent);
2172 static struct lu_object *echo_resolve_path(const struct lu_env *env,
2173 struct echo_device *ed, char *path,
2176 struct lu_device *ld = ed->ed_next;
2177 struct echo_thread_info *info = echo_env_info(env);
2178 struct lu_fid *fid = &info->eti_fid;
2179 struct lu_name *lname = &info->eti_lname;
2180 struct lu_object *parent = NULL;
2181 struct lu_object *child = NULL;
2185 *fid = ed->ed_root_fid;
2188 * In the function below, .hs_keycmp resolves to
2189 * lu_obj_hop_keycmp()
2191 /* coverity[overrun-buffer-val] */
2192 parent = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
2193 if (IS_ERR(parent)) {
2194 CERROR("Can not find the parent "DFID": rc = %ld\n",
2195 PFID(fid), PTR_ERR(parent));
2200 struct lu_object *ld_parent;
2203 e = strsep(&path, "/");
2208 if (!path || path[0] == '\0')
2214 lname->ln_namelen = strlen(e);
2216 ld_parent = lu_object_locate(parent->lo_header, ld->ld_type);
2218 lu_object_put(env, parent);
2223 child = echo_md_lookup(env, ed, lu2md(ld_parent), lname);
2224 lu_object_put(env, parent);
2225 if (IS_ERR(child)) {
2226 rc = (int)PTR_ERR(child);
2227 CERROR("lookup %s under parent "DFID": rc = %d\n",
2228 lname->ln_name, PFID(lu_object_fid(ld_parent)),
2235 RETURN(ERR_PTR(rc));
2240 static void echo_ucred_init(struct lu_env *env)
2242 struct lu_ucred *ucred = lu_ucred(env);
2244 ucred->uc_valid = UCRED_INVALID;
2246 ucred->uc_suppgids[0] = -1;
2247 ucred->uc_suppgids[1] = -1;
2249 ucred->uc_uid = ucred->uc_o_uid =
2250 from_kuid(&init_user_ns, current_uid());
2251 ucred->uc_gid = ucred->uc_o_gid =
2252 from_kgid(&init_user_ns, current_gid());
2253 ucred->uc_fsuid = ucred->uc_o_fsuid =
2254 from_kuid(&init_user_ns, current_fsuid());
2255 ucred->uc_fsgid = ucred->uc_o_fsgid =
2256 from_kgid(&init_user_ns, current_fsgid());
2257 ucred->uc_cap = cfs_curproc_cap_pack();
2259 /* remove fs privilege for non-root user. */
2260 if (ucred->uc_fsuid)
2261 ucred->uc_cap &= ~CFS_CAP_FS_MASK;
2262 ucred->uc_valid = UCRED_NEW;
2265 static void echo_ucred_fini(struct lu_env *env)
2267 struct lu_ucred *ucred = lu_ucred(env);
2269 ucred->uc_valid = UCRED_INIT;
2272 static int echo_md_handler(struct echo_device *ed, int command,
2273 char *path, int path_len, __u64 id, int count,
2274 struct obd_ioctl_data *data)
2276 struct echo_thread_info *info;
2277 struct lu_device *ld = ed->ed_next;
2280 struct lu_object *parent;
2282 int namelen = data->ioc_plen2;
2287 CERROR("MD echo client is not being initialized properly\n");
2291 if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
2292 CERROR("Only support MDD layer right now!\n");
2296 env = cl_env_get(&refcheck);
2298 RETURN(PTR_ERR(env));
2300 rc = lu_env_refill_by_tags(env, ECHO_MD_CTX_TAG, ECHO_SES_TAG);
2304 /* init big_lmm buffer */
2305 info = echo_env_info(env);
2306 LASSERT(info->eti_big_lmm == NULL);
2307 OBD_ALLOC_LARGE(info->eti_big_lmm, MIN_MD_SIZE);
2308 if (!info->eti_big_lmm)
2309 GOTO(out_env, rc = -ENOMEM);
2310 info->eti_big_lmmsize = MIN_MD_SIZE;
2312 parent = echo_resolve_path(env, ed, path, path_len);
2313 if (IS_ERR(parent)) {
2314 CERROR("Can not resolve the path %s: rc = %ld\n", path,
2316 GOTO(out_free, rc = PTR_ERR(parent));
2320 OBD_ALLOC(name, namelen + 1);
2322 GOTO(out_put, rc = -ENOMEM);
2323 if (copy_from_user(name, data->ioc_pbuf2, namelen))
2324 GOTO(out_name, rc = -EFAULT);
2327 echo_ucred_init(env);
2330 case ECHO_MD_CREATE:
2331 case ECHO_MD_MKDIR: {
2332 struct echo_thread_info *info = echo_env_info(env);
2333 __u32 mode = data->ioc_obdo2.o_mode;
2334 struct lu_fid *fid = &info->eti_fid;
2335 int stripe_count = (int)data->ioc_obdo2.o_misc;
2336 int stripe_index = (int)data->ioc_obdo2.o_stripe_idx;
2338 rc = ostid_to_fid(fid, &data->ioc_obdo1.o_oi, 0);
2343 * In the function below, .hs_keycmp resolves to
2344 * lu_obj_hop_keycmp()
2346 /* coverity[overrun-buffer-val] */
2347 rc = echo_create_md_object(env, ed, parent, fid, name, namelen,
2348 id, mode, count, stripe_count,
2352 case ECHO_MD_DESTROY:
2353 case ECHO_MD_RMDIR: {
2354 __u32 mode = data->ioc_obdo2.o_mode;
2356 rc = echo_destroy_object(env, ed, parent, name, namelen,
2360 case ECHO_MD_LOOKUP:
2361 rc = echo_lookup_object(env, ed, parent, id, count);
2363 case ECHO_MD_GETATTR:
2364 rc = echo_getattr_object(env, ed, parent, id, count);
2366 case ECHO_MD_SETATTR:
2367 rc = echo_setattr_object(env, ed, parent, id, count);
2370 CERROR("unknown command %d\n", command);
2374 echo_ucred_fini(env);
2378 OBD_FREE(name, namelen + 1);
2380 lu_object_put(env, parent);
2382 LASSERT(info->eti_big_lmm);
2383 OBD_FREE_LARGE(info->eti_big_lmm, info->eti_big_lmmsize);
2384 info->eti_big_lmm = NULL;
2385 info->eti_big_lmmsize = 0;
2387 cl_env_put(env, &refcheck);
2390 #endif /* HAVE_SERVER_SUPPORT */
2392 static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
2395 struct echo_object *eco;
2396 struct echo_client_obd *ec = ed->ed_ec;
2401 if (!(oa->o_valid & OBD_MD_FLID) ||
2402 !(oa->o_valid & OBD_MD_FLGROUP) ||
2403 !fid_seq_is_echo(ostid_seq(&oa->o_oi))) {
2404 CERROR("invalid oid "DOSTID"\n", POSTID(&oa->o_oi));
2408 if (ostid_id(&oa->o_oi) == 0) {
2409 rc = ostid_set_id(&oa->o_oi, ++last_object_id);
2414 rc = obd_create(env, ec->ec_exp, oa);
2416 CERROR("Cannot create objects: rc = %d\n", rc);
2422 oa->o_valid |= OBD_MD_FLID;
2424 eco = cl_echo_object_find(ed, &oa->o_oi);
2426 GOTO(failed, rc = PTR_ERR(eco));
2427 cl_echo_object_put(eco);
2429 CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
2433 if (created && rc != 0)
2434 obd_destroy(env, ec->ec_exp, oa);
2437 CERROR("create object failed with: rc = %d\n", rc);
2442 static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
2445 struct echo_object *eco;
2449 if (!(oa->o_valid & OBD_MD_FLID) ||
2450 !(oa->o_valid & OBD_MD_FLGROUP) ||
2451 ostid_id(&oa->o_oi) == 0) {
2452 CERROR("invalid oid "DOSTID"\n", POSTID(&oa->o_oi));
2457 eco = cl_echo_object_find(ed, &oa->o_oi);
2466 static void echo_put_object(struct echo_object *eco)
2470 rc = cl_echo_object_put(eco);
2472 CERROR("%s: echo client drop an object failed: rc = %d\n",
2473 eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc);
2476 static void echo_client_page_debug_setup(struct page *page, int rw, u64 id,
2477 u64 offset, u64 count)
2484 /* no partial pages on the client */
2485 LASSERT(count == PAGE_SIZE);
2489 for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
2490 if (rw == OBD_BRW_WRITE) {
2491 stripe_off = offset + delta;
2494 stripe_off = 0xdeadbeef00c0ffeeULL;
2495 stripe_id = 0xdeadbeef00c0ffeeULL;
2497 block_debug_setup(addr + delta, OBD_ECHO_BLOCK_SIZE,
2498 stripe_off, stripe_id);
2505 echo_client_page_debug_check(struct page *page, u64 id, u64 offset, u64 count)
2514 /* no partial pages on the client */
2515 LASSERT(count == PAGE_SIZE);
2519 for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
2520 stripe_off = offset + delta;
2523 rc2 = block_debug_check("test_brw",
2524 addr + delta, OBD_ECHO_BLOCK_SIZE,
2525 stripe_off, stripe_id);
2527 CERROR("Error in echo object %#llx\n", id);
2536 static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
2537 struct echo_object *eco, u64 offset,
2538 u64 count, int async)
2541 struct brw_page *pga;
2542 struct brw_page *pgp;
2543 struct page **pages;
2552 verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
2553 (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
2554 (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
2556 gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER;
2558 LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
2560 if ((count & (~PAGE_MASK)) != 0)
2563 /* XXX think again with misaligned I/O */
2564 npages = count >> PAGE_SHIFT;
2566 if (rw == OBD_BRW_WRITE)
2567 brw_flags = OBD_BRW_ASYNC;
2569 OBD_ALLOC(pga, npages * sizeof(*pga));
2573 OBD_ALLOC(pages, npages * sizeof(*pages));
2575 OBD_FREE(pga, npages * sizeof(*pga));
2579 for (i = 0, pgp = pga, off = offset;
2581 i++, pgp++, off += PAGE_SIZE) {
2583 LASSERT(pgp->pg == NULL); /* for cleanup */
2586 pgp->pg = alloc_page(gfp_mask);
2591 pgp->count = PAGE_SIZE;
2593 pgp->flag = brw_flags;
2596 echo_client_page_debug_setup(pgp->pg, rw,
2597 ostid_id(&oa->o_oi), off,
2601 /* brw mode can only be used at client */
2602 LASSERT(ed->ed_next != NULL);
2603 rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
2606 if (rc != 0 || rw != OBD_BRW_READ)
2609 for (i = 0, pgp = pga; i < npages; i++, pgp++) {
2616 vrc = echo_client_page_debug_check(pgp->pg,
2617 ostid_id(&oa->o_oi),
2620 if (vrc != 0 && rc == 0)
2623 __free_page(pgp->pg);
2625 OBD_FREE(pga, npages * sizeof(*pga));
2626 OBD_FREE(pages, npages * sizeof(*pages));
2630 static int echo_client_prep_commit(const struct lu_env *env,
2631 struct obd_export *exp, int rw,
2632 struct obdo *oa, struct echo_object *eco,
2633 u64 offset, u64 count,
2634 u64 batch, int async)
2636 struct obd_ioobj ioo;
2637 struct niobuf_local *lnb;
2638 struct niobuf_remote rnb;
2640 u64 npages, tot_pages, apc;
2641 int i, ret = 0, brw_flags = 0;
2644 if (count <= 0 || (count & ~PAGE_MASK) != 0)
2647 apc = npages = batch >> PAGE_SHIFT;
2648 tot_pages = count >> PAGE_SHIFT;
2650 OBD_ALLOC_LARGE(lnb, apc * sizeof(*lnb));
2654 if (rw == OBD_BRW_WRITE && async)
2655 brw_flags |= OBD_BRW_ASYNC;
2657 obdo_to_ioobj(oa, &ioo);
2661 for (; tot_pages > 0; tot_pages -= npages) {
2664 if (tot_pages < npages)
2667 rnb.rnb_offset = off;
2668 rnb.rnb_len = npages * PAGE_SIZE;
2669 rnb.rnb_flags = brw_flags;
2671 off += npages * PAGE_SIZE;
2674 ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
2678 for (i = 0; i < lpages; i++) {
2679 struct page *page = lnb[i].lnb_page;
2681 /* read past eof? */
2682 if (!page && lnb[i].lnb_rc == 0)
2686 lnb[i].lnb_flags |= OBD_BRW_ASYNC;
2688 if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
2689 (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
2690 (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
2693 if (rw == OBD_BRW_WRITE)
2694 echo_client_page_debug_setup(page, rw,
2695 ostid_id(&oa->o_oi),
2696 lnb[i].lnb_file_offset,
2699 echo_client_page_debug_check(page,
2700 ostid_id(&oa->o_oi),
2701 lnb[i].lnb_file_offset,
2705 ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb,
2710 /* Reuse env context. */
2711 lu_context_exit((struct lu_context *)&env->le_ctx);
2712 lu_context_enter((struct lu_context *)&env->le_ctx);
2716 OBD_FREE_LARGE(lnb, apc * sizeof(*lnb));
2721 static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
2722 struct obd_export *exp,
2723 struct obd_ioctl_data *data)
2725 struct obd_device *obd = class_exp2obd(exp);
2726 struct echo_device *ed = obd2echo_dev(obd);
2727 struct echo_client_obd *ec = ed->ed_ec;
2728 struct obdo *oa = &data->ioc_obdo1;
2729 struct echo_object *eco;
2735 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
2737 rc = echo_get_object(&eco, ed, oa);
2741 oa->o_valid &= ~OBD_MD_FLHANDLE;
2743 /* OFD/obdfilter works only via prep/commit */
2744 test_mode = (long)data->ioc_pbuf1;
2745 if (!ed->ed_next && test_mode != 3) {
2747 data->ioc_plen1 = data->ioc_count;
2753 /* Truncate batch size to maximum */
2754 if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE)
2755 data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
2757 switch (test_mode) {
2761 rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
2762 data->ioc_count, async);
2765 rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
2766 data->ioc_offset, data->ioc_count,
2767 data->ioc_plen1, async);
2773 echo_put_object(eco);
2779 echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2780 void *karg, void __user *uarg)
2782 #ifdef HAVE_SERVER_SUPPORT
2783 struct tgt_session_info *tsi;
2785 struct obd_device *obd = exp->exp_obd;
2786 struct echo_device *ed = obd2echo_dev(obd);
2787 struct echo_client_obd *ec = ed->ed_ec;
2788 struct echo_object *eco;
2789 struct obd_ioctl_data *data = karg;
2791 unsigned long env_tags = 0;
2795 int rw = OBD_BRW_READ;
2799 oa = &data->ioc_obdo1;
2800 if (!(oa->o_valid & OBD_MD_FLGROUP)) {
2801 oa->o_valid |= OBD_MD_FLGROUP;
2802 ostid_set_seq_echo(&oa->o_oi);
2805 /* This FID is unpacked just for validation at this point */
2806 rc = ostid_to_fid(&fid, &oa->o_oi, 0);
2810 env = cl_env_get(&refcheck);
2812 RETURN(PTR_ERR(env));
2816 #ifdef HAVE_SERVER_SUPPORT
2817 if (cmd == OBD_IOC_ECHO_MD || cmd == OBD_IOC_ECHO_ALLOC_SEQ)
2818 env_tags = ECHO_MD_CTX_TAG;
2821 env_tags = ECHO_DT_CTX_TAG;
2823 rc = lu_env_refill_by_tags(env, env_tags, ECHO_SES_TAG);
2827 #ifdef HAVE_SERVER_SUPPORT
2828 tsi = tgt_ses_info(env);
2829 /* treat as local operation */
2830 tsi->tsi_exp = NULL;
2831 tsi->tsi_jobid = NULL;
2835 case OBD_IOC_CREATE: /* may create echo object */
2836 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2837 GOTO(out, rc = -EPERM);
2839 rc = echo_create_object(env, ed, oa);
2842 #ifdef HAVE_SERVER_SUPPORT
2843 case OBD_IOC_ECHO_MD: {
2850 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2851 GOTO(out, rc = -EPERM);
2853 count = data->ioc_count;
2854 cmd = data->ioc_command;
2856 id = data->ioc_obdo2.o_oi.oi.oi_id;
2857 dirlen = data->ioc_plen1;
2858 OBD_ALLOC(dir, dirlen + 1);
2860 GOTO(out, rc = -ENOMEM);
2862 if (copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
2863 OBD_FREE(dir, data->ioc_plen1 + 1);
2864 GOTO(out, rc = -EFAULT);
2867 rc = echo_md_handler(ed, cmd, dir, dirlen, id, count, data);
2868 OBD_FREE(dir, dirlen + 1);
2871 case OBD_IOC_ECHO_ALLOC_SEQ: {
2875 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2876 GOTO(out, rc = -EPERM);
2878 rc = seq_client_get_seq(env, ed->ed_cl_seq, &seq);
2880 CERROR("%s: Can not alloc seq: rc = %d\n",
2885 if (copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
2888 max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
2889 if (copy_to_user(data->ioc_pbuf2, &max_count,
2894 #endif /* HAVE_SERVER_SUPPORT */
2895 case OBD_IOC_DESTROY:
2896 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2897 GOTO(out, rc = -EPERM);
2899 rc = echo_get_object(&eco, ed, oa);
2901 rc = obd_destroy(env, ec->ec_exp, oa);
2903 eco->eo_deleted = 1;
2904 echo_put_object(eco);
2908 case OBD_IOC_GETATTR:
2909 rc = echo_get_object(&eco, ed, oa);
2911 rc = obd_getattr(env, ec->ec_exp, oa);
2912 echo_put_object(eco);
2916 case OBD_IOC_SETATTR:
2917 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2918 GOTO(out, rc = -EPERM);
2920 rc = echo_get_object(&eco, ed, oa);
2922 rc = obd_setattr(env, ec->ec_exp, oa);
2923 echo_put_object(eco);
2927 case OBD_IOC_BRW_WRITE:
2928 if (!cfs_capable(CFS_CAP_SYS_ADMIN))
2929 GOTO(out, rc = -EPERM);
2933 case OBD_IOC_BRW_READ:
2934 rc = echo_client_brw_ioctl(env, rw, exp, data);
2938 CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
2939 GOTO(out, rc = -ENOTTY);
2945 cl_env_put(env, &refcheck);
2950 static int echo_client_setup(const struct lu_env *env,
2951 struct obd_device *obddev, struct lustre_cfg *lcfg)
2953 struct echo_client_obd *ec = &obddev->u.echo_client;
2954 struct obd_device *tgt;
2955 struct obd_uuid echo_uuid = { "ECHO_UUID" };
2956 struct obd_connect_data *ocd = NULL;
2960 if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
2961 CERROR("requires a TARGET OBD name\n");
2965 tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
2966 if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
2967 CERROR("device not attached or not set up (%s)\n",
2968 lustre_cfg_string(lcfg, 1));
2972 spin_lock_init(&ec->ec_lock);
2973 INIT_LIST_HEAD(&ec->ec_objects);
2974 INIT_LIST_HEAD(&ec->ec_locks);
2977 lu_context_tags_update(ECHO_DT_CTX_TAG);
2978 lu_session_tags_update(ECHO_SES_TAG);
2980 if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
2981 #ifdef HAVE_SERVER_SUPPORT
2982 lu_context_tags_update(ECHO_MD_CTX_TAG);
2985 "Local operations are NOT supported on client side. Only remote operations are supported. Metadata client must be run on server side.\n");
2990 OBD_ALLOC(ocd, sizeof(*ocd));
2992 CERROR("Can't alloc ocd connecting to %s\n",
2993 lustre_cfg_string(lcfg, 1));
2997 ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
2998 OBD_CONNECT_BRW_SIZE |
2999 OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
3000 OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE |
3002 ocd->ocd_brw_size = DT_MAX_BRW_SIZE;
3003 ocd->ocd_version = LUSTRE_VERSION_CODE;
3004 ocd->ocd_group = FID_SEQ_ECHO;
3006 rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
3008 /* Turn off pinger because it connects to tgt obd directly. */
3009 spin_lock(&tgt->obd_dev_lock);
3010 list_del_init(&ec->ec_exp->exp_obd_chain_timed);
3011 spin_unlock(&tgt->obd_dev_lock);
3014 OBD_FREE(ocd, sizeof(*ocd));
3017 CERROR("fail to connect to device %s\n",
3018 lustre_cfg_string(lcfg, 1));
3025 static int echo_client_cleanup(struct obd_device *obddev)
3027 struct echo_device *ed = obd2echo_dev(obddev);
3028 struct echo_client_obd *ec = &obddev->u.echo_client;
3032 /*Do nothing for Metadata echo client*/
3036 lu_session_tags_clear(ECHO_SES_TAG & ~LCT_SESSION);
3037 lu_context_tags_clear(ECHO_DT_CTX_TAG);
3038 if (ed->ed_next_ismd) {
3039 #ifdef HAVE_SERVER_SUPPORT
3040 lu_context_tags_clear(ECHO_MD_CTX_TAG);
3043 "This is client-side only module, does not support metadata echo client.\n");
3048 if (!list_empty(&obddev->obd_exports)) {
3049 CERROR("still has clients!\n");
3053 LASSERT(refcount_read(&ec->ec_exp->exp_handle.h_ref) > 0);
3054 rc = obd_disconnect(ec->ec_exp);
3056 CERROR("fail to disconnect device: %d\n", rc);
3061 static int echo_client_connect(const struct lu_env *env,
3062 struct obd_export **exp,
3063 struct obd_device *src, struct obd_uuid *cluuid,
3064 struct obd_connect_data *data, void *localdata)
3067 struct lustre_handle conn = { 0 };
3070 rc = class_connect(&conn, src, cluuid);
3072 *exp = class_conn2export(&conn);
3077 static int echo_client_disconnect(struct obd_export *exp)
3083 GOTO(out, rc = -EINVAL);
3085 rc = class_disconnect(exp);
3091 static const struct obd_ops echo_client_obd_ops = {
3092 .o_owner = THIS_MODULE,
3093 .o_iocontrol = echo_client_iocontrol,
3094 .o_connect = echo_client_connect,
3095 .o_disconnect = echo_client_disconnect
3098 static int __init obdecho_init(void)
3103 LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
3105 LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
3107 # ifdef HAVE_SERVER_SUPPORT
3108 rc = echo_persistent_pages_init();
3112 rc = class_register_type(&echo_obd_ops, NULL, true, NULL,
3113 LUSTRE_ECHO_NAME, &echo_srv_type);
3118 rc = lu_kmem_init(echo_caches);
3120 rc = class_register_type(&echo_client_obd_ops, NULL, false,
3121 NULL, LUSTRE_ECHO_CLIENT_NAME,
3124 lu_kmem_fini(echo_caches);
3127 # ifdef HAVE_SERVER_SUPPORT
3131 class_unregister_type(LUSTRE_ECHO_NAME);
3133 echo_persistent_pages_fini();
3139 static void __exit obdecho_exit(void)
3141 class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
3142 lu_kmem_fini(echo_caches);
3144 #ifdef HAVE_SERVER_SUPPORT
3145 class_unregister_type(LUSTRE_ECHO_NAME);
3146 echo_persistent_pages_fini();
3150 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3151 MODULE_DESCRIPTION("Lustre Echo Client test driver");
3152 MODULE_VERSION(LUSTRE_VERSION_STRING);
3153 MODULE_LICENSE("GPL");
3155 module_init(obdecho_init);
3156 module_exit(obdecho_exit);
3158 /** @} echo_client */